python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../.."))
sys.path.insert(0, target_dir)
print(target_dir)
# -- Project information -----------------------------------------------------
project = "PyTorchExamples"
copyright = "2022, Meta"
author = "Meta"
# The full version, including alpha/beta/rc tags
release = "1.11"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.napoleon", "sphinx.ext.autodoc", 'sphinx_panels']
panels_add_bootstrap_css = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
panels_add_fontawesome_latex = True
html_theme_options = {
'pytorch_project': 'examples',
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
'analytics_id': 'UA-117752657-2',
}
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class Sequence(nn.Module):
def __init__(self):
super(Sequence, self).__init__()
self.lstm1 = nn.LSTMCell(1, 51)
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)
def forward(self, input, future = 0):
outputs = []
h_t = torch.zeros(input.size(0), 51, dtype=torch.double)
c_t = torch.zeros(input.size(0), 51, dtype=torch.double)
h_t2 = torch.zeros(input.size(0), 51, dtype=torch.double)
c_t2 = torch.zeros(input.size(0), 51, dtype=torch.double)
for input_t in input.split(1, dim=1):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
for i in range(future):# if we should predict the future
h_t, c_t = self.lstm1(output, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.cat(outputs, dim=1)
return outputs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--steps', type=int, default=15, help='steps to run')
opt = parser.parse_args()
# set random seed to 0
np.random.seed(0)
torch.manual_seed(0)
# load data and make training set
data = torch.load('traindata.pt')
input = torch.from_numpy(data[3:, :-1])
target = torch.from_numpy(data[3:, 1:])
test_input = torch.from_numpy(data[:3, :-1])
test_target = torch.from_numpy(data[:3, 1:])
# build the model
seq = Sequence()
seq.double()
criterion = nn.MSELoss()
# use LBFGS as optimizer since we can load the whole data to train
optimizer = optim.LBFGS(seq.parameters(), lr=0.8)
#begin to train
for i in range(opt.steps):
print('STEP: ', i)
def closure():
optimizer.zero_grad()
out = seq(input)
loss = criterion(out, target)
print('loss:', loss.item())
loss.backward()
return loss
optimizer.step(closure)
# begin to predict, no need to track gradient here
with torch.no_grad():
future = 1000
pred = seq(test_input, future=future)
loss = criterion(pred[:, :-future], test_target)
print('test loss:', loss.item())
y = pred.detach().numpy()
# draw the result
plt.figure(figsize=(30,10))
plt.title('Predict future values for time sequences\n(Dashlines are predicted values)', fontsize=30)
plt.xlabel('x', fontsize=20)
plt.ylabel('y', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
def draw(yi, color):
plt.plot(np.arange(input.size(1)), yi[:input.size(1)], color, linewidth = 2.0)
plt.plot(np.arange(input.size(1), input.size(1) + future), yi[input.size(1):], color + ':', linewidth = 2.0)
draw(y[0], 'r')
draw(y[1], 'g')
draw(y[2], 'b')
plt.savefig('predict%d.pdf'%i)
plt.close()
|
import numpy as np
import torch
np.random.seed(2)
T = 20
L = 1000
N = 100
x = np.empty((N, L), 'int64')
x[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1)
data = np.sin(x / 1.0 / T).astype('float64')
torch.save(data, open('traindata.pt', 'wb'))
|
import os
import zipfile
# PyTorch 1.1 moves _download_url_to_file
# from torch.utils.model_zoo to torch.hub
# PyTorch 1.0 exists another _download_url_to_file
# 2 argument
# TODO: If you remove support PyTorch 1.0 or older,
# You should remove torch.utils.model_zoo
# Ref. PyTorch #18758
# https://github.com/pytorch/pytorch/pull/18758/commits
try:
from torch.utils.model_zoo import _download_url_to_file
except ImportError:
try:
from torch.hub import download_url_to_file as _download_url_to_file
except ImportError:
from torch.hub import _download_url_to_file
def unzip(source_filename, dest_dir):
with zipfile.ZipFile(source_filename) as zf:
zf.extractall(path=dest_dir)
if __name__ == '__main__':
_download_url_to_file('https://www.dropbox.com/s/lrvwfehqdcxoza8/saved_models.zip?dl=1', 'saved_models.zip', None, True)
unzip('saved_models.zip', '.')
|
import torch
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
|
from collections import namedtuple
import torch
from torchvision import models
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out
|
import argparse
import os
import sys
import time
import re
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import torch.onnx
import utils
from transformer_net import TransformerNet
from vgg import Vgg16
def check_paths(args):
try:
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
if args.checkpoint_model_dir is not None and not (os.path.exists(args.checkpoint_model_dir)):
os.makedirs(args.checkpoint_model_dir)
except OSError as e:
print(e)
sys.exit(1)
def train(args):
if args.cuda:
device = torch.device("cuda")
elif args.mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
transformer = TransformerNet().to(device)
optimizer = Adam(transformer.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
vgg = Vgg16(requires_grad=False).to(device)
style_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
style = utils.load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1).to(device)
features_style = vgg(utils.normalize_batch(style))
gram_style = [utils.gram_matrix(y) for y in features_style]
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
count = 0
for batch_id, (x, _) in enumerate(train_loader):
n_batch = len(x)
count += n_batch
optimizer.zero_grad()
x = x.to(device)
y = transformer(x)
y = utils.normalize_batch(y)
x = utils.normalize_batch(x)
features_y = vgg(y)
features_x = vgg(x)
content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2)
style_loss = 0.
for ft_y, gm_s in zip(features_y, gram_style):
gm_y = utils.gram_matrix(ft_y)
style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
style_loss *= args.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
optimizer.step()
agg_content_loss += content_loss.item()
agg_style_loss += style_loss.item()
if (batch_id + 1) % args.log_interval == 0:
mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
time.ctime(), e + 1, count, len(train_dataset),
agg_content_loss / (batch_id + 1),
agg_style_loss / (batch_id + 1),
(agg_content_loss + agg_style_loss) / (batch_id + 1)
)
print(mesg)
if args.checkpoint_model_dir is not None and (batch_id + 1) % args.checkpoint_interval == 0:
transformer.eval().cpu()
ckpt_model_filename = "ckpt_epoch_" + str(e) + "_batch_id_" + str(batch_id + 1) + ".pth"
ckpt_model_path = os.path.join(args.checkpoint_model_dir, ckpt_model_filename)
torch.save(transformer.state_dict(), ckpt_model_path)
transformer.to(device).train()
# save model
transformer.eval().cpu()
save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
args.content_weight) + "_" + str(args.style_weight) + ".model"
save_model_path = os.path.join(args.save_model_dir, save_model_filename)
torch.save(transformer.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
content_image = utils.load_image(args.content_image, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
if args.model.endswith(".onnx"):
output = stylize_onnx(content_image, args)
else:
with torch.no_grad():
style_model = TransformerNet()
state_dict = torch.load(args.model)
# remove saved deprecated running_* keys in InstanceNorm from the checkpoint
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.to(device)
style_model.eval()
if args.export_onnx:
assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
output = torch.onnx._export(
style_model, content_image, args.export_onnx, opset_version=11,
).cpu()
else:
output = style_model(content_image).cpu()
utils.save_image(args.output_image, output[0])
def stylize_onnx(content_image, args):
"""
Read ONNX model and run it using onnxruntime
"""
assert not args.export_onnx
import onnxruntime
ort_session = onnxruntime.InferenceSession(args.model)
def to_numpy(tensor):
return (
tensor.detach().cpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(content_image)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
return torch.from_numpy(img_out_y)
def main():
main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand")
train_arg_parser = subparsers.add_parser("train", help="parser for training arguments")
train_arg_parser.add_argument("--epochs", type=int, default=2,
help="number of training epochs, default is 2")
train_arg_parser.add_argument("--batch-size", type=int, default=4,
help="batch size for training, default is 4")
train_arg_parser.add_argument("--dataset", type=str, required=True,
help="path to training dataset, the path should point to a folder "
"containing another folder with all the training images")
train_arg_parser.add_argument("--style-image", type=str, default="images/style-images/mosaic.jpg",
help="path to style-image")
train_arg_parser.add_argument("--save-model-dir", type=str, required=True,
help="path to folder where trained model will be saved.")
train_arg_parser.add_argument("--checkpoint-model-dir", type=str, default=None,
help="path to folder where checkpoints of trained models will be saved")
train_arg_parser.add_argument("--image-size", type=int, default=256,
help="size of training images, default is 256 X 256")
train_arg_parser.add_argument("--style-size", type=int, default=None,
help="size of style-image, default is the original size of style image")
train_arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
train_arg_parser.add_argument("--seed", type=int, default=42,
help="random seed for training")
train_arg_parser.add_argument("--content-weight", type=float, default=1e5,
help="weight for content-loss, default is 1e5")
train_arg_parser.add_argument("--style-weight", type=float, default=1e10,
help="weight for style-loss, default is 1e10")
train_arg_parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate, default is 1e-3")
train_arg_parser.add_argument("--log-interval", type=int, default=500,
help="number of images after which the training loss is logged, default is 500")
train_arg_parser.add_argument("--checkpoint-interval", type=int, default=2000,
help="number of batches after which a checkpoint of the trained model will be created")
eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments")
eval_arg_parser.add_argument("--content-image", type=str, required=True,
help="path to content image you want to stylize")
eval_arg_parser.add_argument("--content-scale", type=float, default=None,
help="factor for scaling down the content image")
eval_arg_parser.add_argument("--output-image", type=str, required=True,
help="path for saving the output image")
eval_arg_parser.add_argument("--model", type=str, required=True,
help="saved model to be used for stylizing the image. If file ends in .pth - PyTorch path is used, if in .onnx - Caffe2 path")
eval_arg_parser.add_argument("--cuda", type=int, default=False,
help="set it to 1 for running on cuda, 0 for CPU")
eval_arg_parser.add_argument("--export_onnx", type=str,
help="export ONNX model to a given file")
eval_arg_parser.add_argument('--mps', action='store_true', default=False, help='enable macOS GPU training')
args = main_arg_parser.parse_args()
if args.subcommand is None:
print("ERROR: specify either train or eval")
sys.exit(1)
if args.cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
if not args.mps and torch.backends.mps.is_available():
print("WARNING: mps is available, run with --mps to enable macOS GPU")
if args.subcommand == "train":
check_paths(args)
train(args)
else:
stylize(args)
if __name__ == "__main__":
main()
|
import torch
from PIL import Image
def load_image(filename, size=None, scale=None):
img = Image.open(filename).convert('RGB')
if size is not None:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
return img
def save_image(filename, data):
img = data.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype("uint8")
img = Image.fromarray(img)
img.save(filename)
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def normalize_batch(batch):
# normalize using imagenet mean and std
mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
batch = batch.div_(255.0)
return (batch - mean) / std
|
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import urllib
try:
from urllib.error import URLError
from urllib.request import urlretrieve
except ImportError:
from urllib2 import URLError
from urllib import urlretrieve
RESOURCES = [
'train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz',
]
def report_download_progress(chunk_number, chunk_size, file_size):
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = '#' * int(64 * percent)
sys.stdout.write('\r0% |{:<64}| {}%'.format(bar, int(percent * 100)))
def download(destination_path, url, quiet):
if os.path.exists(destination_path):
if not quiet:
print('{} already exists, skipping ...'.format(destination_path))
else:
print('Downloading {} ...'.format(url))
try:
hook = None if quiet else report_download_progress
urlretrieve(url, destination_path, reporthook=hook)
except URLError:
raise RuntimeError('Error downloading resource!')
finally:
if not quiet:
# Just a newline.
print()
def unzip(zipped_path, quiet):
unzipped_path = os.path.splitext(zipped_path)[0]
if os.path.exists(unzipped_path):
if not quiet:
print('{} already exists, skipping ... '.format(unzipped_path))
return
with gzip.open(zipped_path, 'rb') as zipped_file:
with open(unzipped_path, 'wb') as unzipped_file:
unzipped_file.write(zipped_file.read())
if not quiet:
print('Unzipped {} ...'.format(zipped_path))
def main():
parser = argparse.ArgumentParser(
description='Download the MNIST dataset from the internet')
parser.add_argument(
'-d', '--destination', default='.', help='Destination directory')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help="Don't report about progress")
options = parser.parse_args()
if not os.path.exists(options.destination):
os.makedirs(options.destination)
try:
for resource in RESOURCES:
path = os.path.join(options.destination, resource)
url = 'http://yann.lecun.com/exdb/mnist/{}'.format(resource)
download(path, url, options.quiet)
unzip(path, options.quiet)
except KeyboardInterrupt:
print('Interrupted')
if __name__ == '__main__':
main()
|
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import matplotlib.pyplot as plt
import torch
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sample-file", required=True)
parser.add_argument("-o", "--out-file", default="out.png")
parser.add_argument("-d", "--dimension", type=int, default=3)
options = parser.parse_args()
module = torch.jit.load(options.sample_file)
images = list(module.parameters())[0]
for index in range(options.dimension * options.dimension):
image = images[index].detach().cpu().reshape(28, 28).mul(255).to(torch.uint8)
array = image.numpy()
axis = plt.subplot(options.dimension, options.dimension, 1 + index)
plt.imshow(array, cmap="gray")
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
plt.savefig(options.out_file)
print("Saved ", options.out_file)
|
"""
This python script converts the network into Script Module
"""
import torch
from torchvision import models
# Download and load the pre-trained model
model = models.resnet18(pretrained=True)
# Set upgrading the gradients to False
for param in model.parameters():
param.requires_grad = False
# Save the model except the final FC Layer
resnet18 = torch.nn.Sequential(*list(model.children())[:-1])
example_input = torch.rand(1, 3, 224, 224)
script_module = torch.jit.trace(resnet18, example_input)
script_module.save('resnet18_without_last_layer.pt')
|
import argparse
import os
import random
import shutil
import time
import warnings
from enum import Enum
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Subset
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', nargs='?', default='imagenet',
help='path to dataset (default: imagenet)')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--dummy', action='store_true', help="use fake data to benchmark")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
cudnn.benchmark = False
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
if torch.cuda.is_available():
ngpus_per_node = torch.cuda.device_count()
else:
ngpus_per_node = 1
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available() and not torch.backends.mps.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if torch.cuda.is_available():
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs of the current node.
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None and torch.cuda.is_available():
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif torch.backends.mps.is_available():
device = torch.device("mps")
model = model.to(device)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
if torch.cuda.is_available():
if args.gpu:
device = torch.device('cuda:{}'.format(args.gpu))
else:
device = torch.device("cuda")
elif torch.backends.mps.is_available():
device = torch.device("mps")
else:
device = torch.device("cpu")
# define loss function (criterion), optimizer, and learning rate scheduler
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
elif torch.cuda.is_available():
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
if args.dummy:
print("=> Dummy data is used!")
train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor())
val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor())
else:
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=val_sampler)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, device, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
scheduler.step()
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : scheduler.state_dict()
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, device, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# move data to the same device as model
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i + 1)
def validate(val_loader, model, criterion, args):
def run_validate(loader, base_progress=0):
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
i = base_progress + i
if args.gpu is not None and torch.cuda.is_available():
images = images.cuda(args.gpu, non_blocking=True)
if torch.backends.mps.is_available():
images = images.to('mps')
target = target.to('mps')
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i + 1)
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
progress = ProgressMeter(
len(val_loader) + (args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset))),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
run_validate(val_loader)
if args.distributed:
top1.all_reduce()
top5.all_reduce()
if args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset)):
aux_val_dataset = Subset(val_loader.dataset,
range(len(val_loader.sampler) * args.world_size, len(val_loader.dataset)))
aux_val_loader = torch.utils.data.DataLoader(
aux_val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
run_validate(aux_val_loader, len(val_loader))
progress.display_summary()
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def all_reduce(self):
if torch.cuda.is_available():
device = torch.device("cuda")
elif torch.backends.mps.is_available():
device = torch.device("mps")
else:
device = torch.device("cpu")
total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device)
dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
self.sum, self.count = total.tolist()
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
print(' '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
# This code is based on the implementation of Mohammad Pezeshki available at
# https://github.com/mohammadpz/pytorch_forward_forward and licensed under the MIT License.
# Modifications/Improvements to the original code have been made by Vivek V Patel.
import argparse
import torch
import torch.nn as nn
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize, Lambda
from torch.utils.data import DataLoader
from torch.optim import Adam
def get_y_neg(y):
y_neg = y.clone()
for idx, y_samp in enumerate(y):
allowed_indices = list(range(10))
allowed_indices.remove(y_samp.item())
y_neg[idx] = torch.tensor(allowed_indices)[
torch.randint(len(allowed_indices), size=(1,))
].item()
return y_neg.to(device)
def overlay_y_on_x(x, y, classes=10):
x_ = x.clone()
x_[:, :classes] *= 0.0
x_[range(x.shape[0]), y] = x.max()
return x_
class Net(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.layers = []
for d in range(len(dims) - 1):
self.layers = self.layers + [Layer(dims[d], dims[d + 1]).to(device)]
def predict(self, x):
goodness_per_label = []
for label in range(10):
h = overlay_y_on_x(x, label)
goodness = []
for layer in self.layers:
h = layer(h)
goodness = goodness + [h.pow(2).mean(1)]
goodness_per_label += [sum(goodness).unsqueeze(1)]
goodness_per_label = torch.cat(goodness_per_label, 1)
return goodness_per_label.argmax(1)
def train(self, x_pos, x_neg):
h_pos, h_neg = x_pos, x_neg
for i, layer in enumerate(self.layers):
print("training layer: ", i)
h_pos, h_neg = layer.train(h_pos, h_neg)
class Layer(nn.Linear):
def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):
super().__init__(in_features, out_features, bias, device, dtype)
self.relu = torch.nn.ReLU()
self.opt = Adam(self.parameters(), lr=args.lr)
self.threshold = args.threshold
self.num_epochs = args.epochs
def forward(self, x):
x_direction = x / (x.norm(2, 1, keepdim=True) + 1e-4)
return self.relu(torch.mm(x_direction, self.weight.T) + self.bias.unsqueeze(0))
def train(self, x_pos, x_neg):
for i in range(self.num_epochs):
g_pos = self.forward(x_pos).pow(2).mean(1)
g_neg = self.forward(x_neg).pow(2).mean(1)
loss = torch.log(
1
+ torch.exp(
torch.cat([-g_pos + self.threshold, g_neg - self.threshold])
)
).mean()
self.opt.zero_grad()
loss.backward()
self.opt.step()
if i % args.log_interval == 0:
print("Loss: ", loss.item())
return self.forward(x_pos).detach(), self.forward(x_neg).detach()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--epochs",
type=int,
default=1000,
metavar="N",
help="number of epochs to train (default: 1000)",
)
parser.add_argument(
"--lr",
type=float,
default=0.03,
metavar="LR",
help="learning rate (default: 0.03)",
)
parser.add_argument(
"--no_cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--no_mps", action="store_true", default=False, help="disables MPS training"
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--save_model",
action="store_true",
default=False,
help="For saving the current Model",
)
parser.add_argument(
"--train_size", type=int, default=50000, help="size of training set"
)
parser.add_argument(
"--threshold", type=float, default=2, help="threshold for training"
)
parser.add_argument("--test_size", type=int, default=10000, help="size of test set")
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
train_kwargs = {"batch_size": args.train_size}
test_kwargs = {"batch_size": args.test_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = Compose(
[
ToTensor(),
Normalize((0.1307,), (0.3081,)),
Lambda(lambda x: torch.flatten(x)),
]
)
train_loader = DataLoader(
MNIST("./data/", train=True, download=True, transform=transform), **train_kwargs
)
test_loader = DataLoader(
MNIST("./data/", train=False, download=True, transform=transform), **test_kwargs
)
net = Net([784, 500, 500])
x, y = next(iter(train_loader))
x, y = x.to(device), y.to(device)
x_pos = overlay_y_on_x(x, y)
y_neg = get_y_neg(y)
x_neg = overlay_y_on_x(x, y_neg)
net.train(x_pos, x_neg)
print("train error:", 1.0 - net.predict(x).eq(y).float().mean().item())
x_te, y_te = next(iter(test_loader))
x_te, y_te = x_te.to(device), y_te.to(device)
if args.save_model:
torch.save(net.state_dict(), "mnist_ff.pt")
print("test error:", 1.0 - net.predict(x_te).eq(y_te).float().mean().item())
|
from __future__ import print_function
import argparse, random, copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision import transforms as T
from torch.optim.lr_scheduler import StepLR
class SiameseNetwork(nn.Module):
"""
Siamese network for image similarity estimation.
The network is composed of two identical networks, one for each input.
The output of each network is concatenated and passed to a linear layer.
The output of the linear layer passed through a sigmoid function.
`"FaceNet" <https://arxiv.org/pdf/1503.03832.pdf>`_ is a variant of the Siamese network.
This implementation varies from FaceNet as we use the `ResNet-18` model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ as our feature extractor.
In addition, we aren't using `TripletLoss` as the MNIST dataset is simple, so `BCELoss` can do the trick.
"""
def __init__(self):
super(SiameseNetwork, self).__init__()
# get resnet model
self.resnet = torchvision.models.resnet18(weights=None)
# over-write the first conv layer to be able to read MNIST images
# as resnet18 reads (3,x,x) where 3 is RGB channels
# whereas MNIST has (1,x,x) where 1 is a gray-scale channel
self.resnet.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.fc_in_features = self.resnet.fc.in_features
# remove the last layer of resnet18 (linear layer which is before avgpool layer)
self.resnet = torch.nn.Sequential(*(list(self.resnet.children())[:-1]))
# add linear layers to compare between the features of the two images
self.fc = nn.Sequential(
nn.Linear(self.fc_in_features * 2, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 1),
)
self.sigmoid = nn.Sigmoid()
# initialize the weights
self.resnet.apply(self.init_weights)
self.fc.apply(self.init_weights)
def init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def forward_once(self, x):
output = self.resnet(x)
output = output.view(output.size()[0], -1)
return output
def forward(self, input1, input2):
# get two images' features
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
# concatenate both images' features
output = torch.cat((output1, output2), 1)
# pass the concatenation to the linear layers
output = self.fc(output)
# pass the out of the linear layers to sigmoid layer
output = self.sigmoid(output)
return output
class APP_MATCHER(Dataset):
def __init__(self, root, train, download=False):
super(APP_MATCHER, self).__init__()
# get MNIST dataset
self.dataset = datasets.MNIST(root, train=train, download=download)
# as `self.dataset.data`'s shape is (Nx28x28), where N is the number of
# examples in MNIST dataset, a single example has the dimensions of
# (28x28) for (WxH), where W and H are the width and the height of the image.
# However, every example should have (CxWxH) dimensions where C is the number
# of channels to be passed to the network. As MNIST contains gray-scale images,
# we add an additional dimension to corresponds to the number of channels.
self.data = self.dataset.data.unsqueeze(1).clone()
self.group_examples()
def group_examples(self):
"""
To ease the accessibility of data based on the class, we will use `group_examples` to group
examples based on class.
Every key in `grouped_examples` corresponds to a class in MNIST dataset. For every key in
`grouped_examples`, every value will conform to all of the indices for the MNIST
dataset examples that correspond to that key.
"""
# get the targets from MNIST dataset
np_arr = np.array(self.dataset.targets.clone())
# group examples based on class
self.grouped_examples = {}
for i in range(0,10):
self.grouped_examples[i] = np.where((np_arr==i))[0]
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
"""
For every example, we will select two images. There are two cases,
positive and negative examples. For positive examples, we will have two
images from the same class. For negative examples, we will have two images
from different classes.
Given an index, if the index is even, we will pick the second image from the same class,
but it won't be the same image we chose for the first class. This is used to ensure the positive
example isn't trivial as the network would easily distinguish the similarity between same images. However,
if the network were given two different images from the same class, the network will need to learn
the similarity between two different images representing the same class. If the index is odd, we will
pick the second image from a different class than the first image.
"""
# pick some random class for the first image
selected_class = random.randint(0, 9)
# pick a random index for the first image in the grouped indices based of the label
# of the class
random_index_1 = random.randint(0, self.grouped_examples[selected_class].shape[0]-1)
# pick the index to get the first image
index_1 = self.grouped_examples[selected_class][random_index_1]
# get the first image
image_1 = self.data[index_1].clone().float()
# same class
if index % 2 == 0:
# pick a random index for the second image
random_index_2 = random.randint(0, self.grouped_examples[selected_class].shape[0]-1)
# ensure that the index of the second image isn't the same as the first image
while random_index_2 == random_index_1:
random_index_2 = random.randint(0, self.grouped_examples[selected_class].shape[0]-1)
# pick the index to get the second image
index_2 = self.grouped_examples[selected_class][random_index_2]
# get the second image
image_2 = self.data[index_2].clone().float()
# set the label for this example to be positive (1)
target = torch.tensor(1, dtype=torch.float)
# different class
else:
# pick a random class
other_selected_class = random.randint(0, 9)
# ensure that the class of the second image isn't the same as the first image
while other_selected_class == selected_class:
other_selected_class = random.randint(0, 9)
# pick a random index for the second image in the grouped indices based of the label
# of the class
random_index_2 = random.randint(0, self.grouped_examples[other_selected_class].shape[0]-1)
# pick the index to get the second image
index_2 = self.grouped_examples[other_selected_class][random_index_2]
# get the second image
image_2 = self.data[index_2].clone().float()
# set the label for this example to be negative (0)
target = torch.tensor(0, dtype=torch.float)
return image_1, image_2, target
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
# we aren't using `TripletLoss` as the MNIST dataset is simple, so `BCELoss` can do the trick.
criterion = nn.BCELoss()
for batch_idx, (images_1, images_2, targets) in enumerate(train_loader):
images_1, images_2, targets = images_1.to(device), images_2.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(images_1, images_2).squeeze()
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(images_1), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
# we aren't using `TripletLoss` as the MNIST dataset is simple, so `BCELoss` can do the trick.
criterion = nn.BCELoss()
with torch.no_grad():
for (images_1, images_2, targets) in test_loader:
images_1, images_2, targets = images_1.to(device), images_2.to(device), targets.to(device)
outputs = model(images_1, images_2).squeeze()
test_loss += criterion(outputs, targets).sum().item() # sum up batch loss
pred = torch.where(outputs > 0.5, 1, 0) # get the index of the max log-probability
correct += pred.eq(targets.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
# for the 1st epoch, the average loss is 0.0001 and the accuracy 97-98%
# using default settings. After completing the 10th epoch, the average
# loss is 0.0000 and the accuracy 99.5-100% using default settings.
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Siamese network Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
torch.manual_seed(args.seed)
if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_dataset = APP_MATCHER('../data', train=True, download=True)
test_dataset = APP_MATCHER('../data', train=False)
train_loader = torch.utils.data.DataLoader(train_dataset,**train_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)
model = SiameseNetwork().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "siamese_network.pt")
if __name__ == '__main__':
main()
|
import os
import torch
import torch.optim as optim
import torch.nn.functional as F
def train(rank, args, model, device, dataset, dataloader_kwargs):
torch.manual_seed(args.seed + rank)
train_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train_epoch(epoch, args, model, device, train_loader, optimizer)
def test(args, model, device, dataset, dataloader_kwargs):
torch.manual_seed(args.seed)
test_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs)
test_epoch(model, device, test_loader)
def train_epoch(epoch, args, model, device, data_loader, optimizer):
model.train()
pid = os.getpid()
for batch_idx, (data, target) in enumerate(data_loader):
optimizer.zero_grad()
output = model(data.to(device))
loss = F.nll_loss(output, target.to(device))
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('{}\tTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
pid, epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.item()))
if args.dry_run:
break
def test_epoch(model, device, data_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in data_loader:
output = model(data.to(device))
test_loss += F.nll_loss(output, target.to(device), reduction='sum').item() # sum up batch loss
pred = output.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.to(device)).sum().item()
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.data.sampler import Sampler
from torchvision import datasets, transforms
from train import train, test
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--num-processes', type=int, default=2, metavar='N',
help='how many training processes to use (default: 2)')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--mps', action='store_true', default=False,
help='enables macOS GPU training')
parser.add_argument('--save_model', action='store_true', default=False,
help='save the trained model to state_dict')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
if __name__ == '__main__':
args = parser.parse_args()
use_cuda = args.cuda and torch.cuda.is_available()
use_mps = args.mps and torch.backends.mps.is_available()
if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
kwargs = {'batch_size': args.batch_size,
'shuffle': True}
if use_cuda:
kwargs.update({'num_workers': 1,
'pin_memory': True,
})
torch.manual_seed(args.seed)
mp.set_start_method('spawn', force=True)
model = Net().to(device)
model.share_memory() # gradients are allocated lazily, so they are not shared here
processes = []
for rank in range(args.num_processes):
p = mp.Process(target=train, args=(rank, args, model, device,
dataset1, kwargs))
# We first train the model across `num_processes` processes
p.start()
processes.append(p)
for p in processes:
p.join()
if args.save_model:
torch.save(model.state_dict(), "MNIST_hogwild.pt")
# Once training is complete, we can test the model
test(args, model, device, dataset2, kwargs)
|
#!/usr/bin/env python
from __future__ import print_function
from itertools import count
import torch
import torch.nn.functional as F
POLY_DEGREE = 4
W_target = torch.randn(POLY_DEGREE, 1) * 5
b_target = torch.randn(1) * 5
def make_features(x):
"""Builds features i.e. a matrix with columns [x, x^2, x^3, x^4]."""
x = x.unsqueeze(1)
return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)
def f(x):
"""Approximated function."""
return x.mm(W_target) + b_target.item()
def poly_desc(W, b):
"""Creates a string description of a polynomial."""
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, i + 1)
result += '{:+.2f}'.format(b[0])
return result
def get_batch(batch_size=32):
"""Builds a batch i.e. (x, f(x)) pair."""
random = torch.randn(batch_size)
x = make_features(random)
y = f(x)
return x, y
# Define model
fc = torch.nn.Linear(W_target.size(0), 1)
for batch_idx in count(1):
# Get data
batch_x, batch_y = get_batch()
# Reset gradients
fc.zero_grad()
# Forward pass
output = F.smooth_l1_loss(fc(batch_x), batch_y)
loss = output.item()
# Backward pass
output.backward()
# Apply gradients
for param in fc.parameters():
param.data.add_(-0.1 * param.grad)
# Stop criterion
if loss < 1e-3:
break
print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
print('==> Learned function:\t' + poly_desc(fc.weight.view(-1), fc.bias))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
|
import os
import time
import requests
import tarfile
import numpy as np
import argparse
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
class GraphConv(nn.Module):
"""
Graph Convolutional Layer described in "Semi-Supervised Classification with Graph Convolutional Networks".
Given an input feature representation for each node in a graph, the Graph Convolutional Layer aims to aggregate
information from the node's neighborhood to update its own representation. This is achieved by applying a graph
convolutional operation that combines the features of a node with the features of its neighboring nodes.
Mathematically, the Graph Convolutional Layer can be described as follows:
H' = f(D^(-1/2) * A * D^(-1/2) * H * W)
where:
H: Input feature matrix with shape (N, F_in), where N is the number of nodes and F_in is the number of
input features per node.
A: Adjacency matrix of the graph with shape (N, N), representing the relationships between nodes.
W: Learnable weight matrix with shape (F_in, F_out), where F_out is the number of output features per node.
"""
def __init__(self, input_dim, output_dim, use_bias=False):
super(GraphConv, self).__init__()
# Initialize the weight matrix W (in this case called `kernel`)
self.kernel = nn.Parameter(torch.Tensor(input_dim, output_dim))
nn.init.xavier_normal_(self.kernel) # Initialize the weights using Xavier initialization
# Initialize the bias (if use_bias is True)
self.bias = None
if use_bias:
self.bias = nn.Parameter(torch.Tensor(output_dim))
nn.init.zeros_(self.bias) # Initialize the bias to zeros
def forward(self, input_tensor, adj_mat):
"""
Performs a graph convolution operation.
Args:
input_tensor (torch.Tensor): Input tensor representing node features.
adj_mat (torch.Tensor): Adjacency matrix representing graph structure.
Returns:
torch.Tensor: Output tensor after the graph convolution operation.
"""
support = torch.mm(input_tensor, self.kernel) # Matrix multiplication between input and weight matrix
output = torch.spmm(adj_mat, support) # Sparse matrix multiplication between adjacency matrix and support
# Add the bias (if bias is not None)
if self.bias is not None:
output = output + self.bias
return output
class GCN(nn.Module):
"""
Graph Convolutional Network (GCN) as described in the paper `"Semi-Supervised Classification with Graph
Convolutional Networks" <https://arxiv.org/pdf/1609.02907.pdf>`.
The Graph Convolutional Network is a deep learning architecture designed for semi-supervised node
classification tasks on graph-structured data. It leverages the graph structure to learn node representations
by propagating information through the graph using graph convolutional layers.
The original implementation consists of two stacked graph convolutional layers. The ReLU activation function is
applied to the hidden representations, and the Softmax activation function is applied to the output representations.
"""
def __init__(self, input_dim, hidden_dim, output_dim, use_bias=True, dropout_p=0.1):
super(GCN, self).__init__()
# Define the Graph Convolution layers
self.gc1 = GraphConv(input_dim, hidden_dim, use_bias=use_bias)
self.gc2 = GraphConv(hidden_dim, output_dim, use_bias=use_bias)
# Define the dropout layer
self.dropout = nn.Dropout(dropout_p)
def forward(self, input_tensor, adj_mat):
"""
Performs forward pass of the Graph Convolutional Network (GCN).
Args:
input_tensor (torch.Tensor): Input node feature matrix with shape (N, input_dim), where N is the number of nodes
and input_dim is the number of input features per node.
adj_mat (torch.Tensor): Adjacency matrix of the graph with shape (N, N), representing the relationships between
nodes.
Returns:
torch.Tensor: Output tensor with shape (N, output_dim), representing the predicted class probabilities for each node.
"""
# Perform the first graph convolutional layer
x = self.gc1(input_tensor, adj_mat)
x = F.relu(x) # Apply ReLU activation function
x = self.dropout(x) # Apply dropout regularization
# Perform the second graph convolutional layer
x = self.gc2(x, adj_mat)
# Apply log-softmax activation function for classification
return F.log_softmax(x, dim=1)
def load_cora(path='./cora', device='cpu'):
"""
The graph convolutional operation rquires normalize the adjacency matrix: D^(-1/2) * A * D^(-1/2). This step
scales the adjacency matrix such that the features of neighboring nodes are weighted appropriately during
aggregation. The steps involved in the renormalization trick are as follows:
- Compute the degree matrix.
- Compute the inverse square root of the degree matrix.
- Multiply the inverse square root of the degree matrix with the adjacency matrix.
"""
# Set the paths to the data files
content_path = os.path.join(path, 'cora.content')
cites_path = os.path.join(path, 'cora.cites')
# Load data from files
content_tensor = np.genfromtxt(content_path, dtype=np.dtype(str))
cites_tensor = np.genfromtxt(cites_path, dtype=np.int32)
# Process features
features = torch.FloatTensor(content_tensor[:, 1:-1].astype(np.int32)) # Extract feature values
scale_vector = torch.sum(features, dim=1) # Compute sum of features for each node
scale_vector = 1 / scale_vector # Compute reciprocal of the sums
scale_vector[scale_vector == float('inf')] = 0 # Handle division by zero cases
scale_vector = torch.diag(scale_vector).to_sparse() # Convert the scale vector to a sparse diagonal matrix
features = scale_vector @ features # Scale the features using the scale vector
# Process labels
classes, labels = np.unique(content_tensor[:, -1], return_inverse=True) # Extract unique classes and map labels to indices
labels = torch.LongTensor(labels) # Convert labels to a tensor
# Process adjacency matrix
idx = content_tensor[:, 0].astype(np.int32) # Extract node indices
idx_map = {id: pos for pos, id in enumerate(idx)} # Create a dictionary to map indices to positions
# Map node indices to positions in the adjacency matrix
edges = np.array(
list(map(lambda edge: [idx_map[edge[0]], idx_map[edge[1]]],
cites_tensor)), dtype=np.int32)
V = len(idx) # Number of nodes
E = edges.shape[0] # Number of edges
adj_mat = torch.sparse_coo_tensor(edges.T, torch.ones(E), (V, V), dtype=torch.int64) # Create the initial adjacency matrix as a sparse tensor
adj_mat = torch.eye(V) + adj_mat # Add self-loops to the adjacency matrix
degree_mat = torch.sum(adj_mat, dim=1) # Compute the sum of each row in the adjacency matrix (degree matrix)
degree_mat = torch.sqrt(1 / degree_mat) # Compute the reciprocal square root of the degrees
degree_mat[degree_mat == float('inf')] = 0 # Handle division by zero cases
degree_mat = torch.diag(degree_mat).to_sparse() # Convert the degree matrix to a sparse diagonal matrix
adj_mat = degree_mat @ adj_mat @ degree_mat # Apply the renormalization trick
return features.to_sparse().to(device), labels.to(device), adj_mat.to_sparse().to(device)
def train_iter(epoch, model, optimizer, criterion, input, target, mask_train, mask_val, print_every=10):
start_t = time.time()
model.train()
optimizer.zero_grad()
# Forward pass
output = model(*input)
loss = criterion(output[mask_train], target[mask_train]) # Compute the loss using the training mask
loss.backward()
optimizer.step()
# Evaluate the model performance on training and validation sets
loss_train, acc_train = test(model, criterion, input, target, mask_train)
loss_val, acc_val = test(model, criterion, input, target, mask_val)
if epoch % print_every == 0:
# Print the training progress at specified intervals
print(f'Epoch: {epoch:04d} ({(time.time() - start_t):.4f}s) loss_train: {loss_train:.4f} acc_train: {acc_train:.4f} loss_val: {loss_val:.4f} acc_val: {acc_val:.4f}')
def test(model, criterion, input, target, mask):
model.eval()
with torch.no_grad():
output = model(*input)
output, target = output[mask], target[mask]
loss = criterion(output, target)
acc = (output.argmax(dim=1) == target).float().sum() / len(target)
return loss.item(), acc.item()
if __name__ == '__main__':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
parser = argparse.ArgumentParser(description='PyTorch Graph Convolutional Network')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--l2', type=float, default=5e-4,
help='weight decay (default: 5e-4)')
parser.add_argument('--dropout-p', type=float, default=0.5,
help='dropout probability (default: 0.5)')
parser.add_argument('--hidden-dim', type=int, default=16,
help='dimension of the hidden representation (default: 16)')
parser.add_argument('--val-every', type=int, default=20,
help='epochs to wait for print training and validation evaluation (default: 20)')
parser.add_argument('--include-bias', action='store_true', default=False,
help='use bias term in convolutions (default: False)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
torch.manual_seed(args.seed)
if use_cuda:
device = torch.device('cuda')
elif use_mps:
device = torch.device('mps')
else:
device = torch.device('cpu')
print(f'Using {device} device')
cora_url = 'https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz'
print('Downloading dataset...')
with requests.get(cora_url, stream=True) as tgz_file:
with tarfile.open(fileobj=tgz_file.raw, mode='r:gz') as tgz_object:
tgz_object.extractall()
print('Loading dataset...')
features, labels, adj_mat = load_cora(device=device)
idx = torch.randperm(len(labels)).to(device)
idx_test, idx_val, idx_train = idx[:1000], idx[1000:1500], idx[1500:]
gcn = GCN(features.shape[1], args.hidden_dim, labels.max().item() + 1,args.include_bias, args.dropout_p).to(device)
optimizer = Adam(gcn.parameters(), lr=args.lr, weight_decay=args.l2)
criterion = nn.NLLLoss()
for epoch in range(args.epochs):
train_iter(epoch + 1, gcn, optimizer, criterion, (features, adj_mat), labels, idx_train, idx_val, args.val_every)
if args.dry_run:
break
loss_test, acc_test = test(gcn, criterion, (features, adj_mat), labels, idx_test)
print(f'Test set results: loss {loss_test:.4f} accuracy {acc_test:.4f}') |
from __future__ import print_function
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
torch.manual_seed(args.seed)
if args.cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=False, **kwargs)
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_function(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
def test(epoch):
model.eval()
test_loss = 0
with torch.no_grad():
for i, (data, _) in enumerate(test_loader):
data = data.to(device)
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).item()
if i == 0:
n = min(data.size(0), 8)
comparison = torch.cat([data[:n],
recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
save_image(comparison.cpu(),
'results/reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
if __name__ == "__main__":
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
with torch.no_grad():
sample = torch.randn(64, 20).to(device)
sample = model.decode(sample).cpu()
save_image(sample.view(64, 1, 28, 28),
'results/sample_' + str(epoch) + '.png')
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from botorch import settings
from botorch.logging import LOG_LEVEL_DEFAULT, logger, shape_to_str
from botorch.utils.testing import BotorchTestCase
class TestLogging(BotorchTestCase):
def test_logger(self):
# Verify log statements are properly captured
# assertLogs() captures all log calls, ignoring the severity level
with self.assertLogs(logger="botorch", level="INFO") as logs_cm:
logger.info("Hello World!")
logger.error("Goodbye Universe!")
self.assertEqual(
logs_cm.output,
["INFO:botorch:Hello World!", "ERROR:botorch:Goodbye Universe!"],
)
def test_settings_log_level(self):
# Verify the default level is applied
self.assertEqual(logger.level, LOG_LEVEL_DEFAULT)
# Next, verify the level of overwritten within the context manager
with settings.log_level(logging.INFO):
self.assertEqual(logger.level, logging.INFO)
# Finally, verify the original level is set again
self.assertEqual(logger.level, LOG_LEVEL_DEFAULT)
def test_shape_to_str(self):
self.assertEqual("``", shape_to_str(torch.Size([])))
self.assertEqual("`1`", shape_to_str(torch.Size([1])))
self.assertEqual("`1 x 2`", shape_to_str(torch.Size([1, 2])))
self.assertEqual("`1 x 2 x 3`", shape_to_str(torch.Size([1, 2, 3])))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.cross_validation import batch_cross_validation, gen_loo_cv_folds
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
class TestFitBatchCrossValidation(BotorchTestCase):
def test_single_task_batch_cv(self):
n = 10
for batch_shape, m, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, n=n, **tkwargs
)
if m == 1:
train_Y = train_Y.squeeze(-1)
train_Yvar = torch.full_like(train_Y, 0.01)
noiseless_cv_folds = gen_loo_cv_folds(train_X=train_X, train_Y=train_Y)
# check shapes
expected_shape_train_X = batch_shape + torch.Size(
[n, n - 1, train_X.shape[-1]]
)
expected_shape_test_X = batch_shape + torch.Size([n, 1, train_X.shape[-1]])
self.assertEqual(noiseless_cv_folds.train_X.shape, expected_shape_train_X)
self.assertEqual(noiseless_cv_folds.test_X.shape, expected_shape_test_X)
expected_shape_train_Y = batch_shape + torch.Size([n, n - 1, m])
expected_shape_test_Y = batch_shape + torch.Size([n, 1, m])
self.assertEqual(noiseless_cv_folds.train_Y.shape, expected_shape_train_Y)
self.assertEqual(noiseless_cv_folds.test_Y.shape, expected_shape_test_Y)
self.assertIsNone(noiseless_cv_folds.train_Yvar)
self.assertIsNone(noiseless_cv_folds.test_Yvar)
# Test SingleTaskGP
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
cv_results = batch_cross_validation(
model_cls=SingleTaskGP,
mll_cls=ExactMarginalLogLikelihood,
cv_folds=noiseless_cv_folds,
fit_args={"optimizer_kwargs": {"options": {"maxiter": 1}}},
)
expected_shape = batch_shape + torch.Size([n, 1, m])
self.assertEqual(cv_results.posterior.mean.shape, expected_shape)
self.assertEqual(cv_results.observed_Y.shape, expected_shape)
# Test FixedNoiseGP
noisy_cv_folds = gen_loo_cv_folds(
train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar
)
# check shapes
self.assertEqual(noisy_cv_folds.train_X.shape, expected_shape_train_X)
self.assertEqual(noisy_cv_folds.test_X.shape, expected_shape_test_X)
self.assertEqual(noisy_cv_folds.train_Y.shape, expected_shape_train_Y)
self.assertEqual(noisy_cv_folds.test_Y.shape, expected_shape_test_Y)
self.assertEqual(noisy_cv_folds.train_Yvar.shape, expected_shape_train_Y)
self.assertEqual(noisy_cv_folds.test_Yvar.shape, expected_shape_test_Y)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
cv_results = batch_cross_validation(
model_cls=FixedNoiseGP,
mll_cls=ExactMarginalLogLikelihood,
cv_folds=noisy_cv_folds,
fit_args={"optimizer_kwargs": {"options": {"maxiter": 1}}},
)
self.assertEqual(cv_results.posterior.mean.shape, expected_shape)
self.assertEqual(cv_results.observed_Y.shape, expected_shape)
self.assertEqual(cv_results.observed_Y.shape, expected_shape)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from contextlib import ExitStack, nullcontext
from itertools import filterfalse, product
from typing import Callable, Iterable, Optional
from unittest.mock import MagicMock, patch
from warnings import catch_warnings, warn, WarningMessage
import torch
from botorch import fit
from botorch.exceptions.errors import ModelFittingError, UnsupportedError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models import SingleTaskGP, SingleTaskVariationalGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.optim.closures import get_loss_closure_with_grads
from botorch.optim.fit import fit_gpytorch_mll_scipy, fit_gpytorch_mll_torch
from botorch.optim.utils import get_data_loader
from botorch.settings import debug
from botorch.utils.context_managers import (
module_rollback_ctx,
requires_grad_ctx,
TensorCheckpoint,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel
from gpytorch.mlls import ExactMarginalLogLikelihood, VariationalELBO
from linear_operator.utils.errors import NotPSDError
MAX_ITER_MSG = "TOTAL NO. of ITERATIONS REACHED LIMIT"
class MockOptimizer:
def __init__(
self,
randomize_requires_grad: bool = True,
warnings: Iterable[WarningMessage] = (),
exception: Optional[BaseException] = None,
):
r"""Class used to mock `optimizer` argument to `fit_gpytorch_mll."""
self.randomize_requires_grad = randomize_requires_grad
self.warnings = warnings
self.exception = exception
self.call_count = 0
def __call__(self, mll, closure: Optional[Callable] = None):
self.call_count += 1
for w in self.warnings:
warn(str(w.message), w.category)
if self.randomize_requires_grad:
with torch.no_grad():
for param in mll.parameters():
if param.requires_grad:
param[...] = torch.rand_like(param)
if self.exception is not None:
raise self.exception
return mll, None
class TestFitAPI(BotorchTestCase):
r"""Unit tests for general fitting API"""
def setUp(self) -> None:
super().setUp()
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_F = torch.sin(2 * math.pi * train_X)
train_Y = train_F + 0.1 * torch.randn_like(train_F)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mll = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_mll(self):
# Test that `optimizer` is only passed when non-None
with patch.object(fit, "FitGPyTorchMLL") as mock_dispatcher:
fit.fit_gpytorch_mll(self.mll, optimizer=None)
mock_dispatcher.assert_called_once_with(
self.mll,
type(self.mll.likelihood),
type(self.mll.model),
closure=None,
closure_kwargs=None,
optimizer_kwargs=None,
)
fit.fit_gpytorch_mll(self.mll, optimizer="foo")
mock_dispatcher.assert_called_with(
self.mll,
type(self.mll.likelihood),
type(self.mll.model),
closure=None,
closure_kwargs=None,
optimizer="foo",
optimizer_kwargs=None,
)
def test_fit_gyptorch_model(self):
r"""Test support for legacy API"""
# Test `option` argument
options = {"foo": 0}
with catch_warnings(), patch.object(
fit,
"fit_gpytorch_mll",
new=lambda mll, optimizer_kwargs=None, **kwargs: optimizer_kwargs,
):
self.assertEqual(
{"options": options, "bar": 1},
fit.fit_gpytorch_model(
self.mll,
options=options,
optimizer_kwargs={"bar": 1},
),
)
# Test `max_retries` argument
with catch_warnings(), patch.object(
fit,
"fit_gpytorch_mll",
new=lambda mll, max_attempts=None, **kwargs: max_attempts,
):
self.assertEqual(100, fit.fit_gpytorch_model(self.mll, max_retries=100))
# Test `exclude` argument
self.assertTrue(self.mll.model.mean_module.constant.requires_grad)
with catch_warnings(), patch.object(
fit,
"fit_gpytorch_mll",
new=lambda mll, **kwargs: mll.model.mean_module.constant.requires_grad,
):
self.assertFalse(
fit.fit_gpytorch_model(
self.mll,
options=options,
exclude=["model.mean_module.constant"],
)
)
self.assertTrue(self.mll.model.mean_module.constant.requires_grad)
# Test collisions
with catch_warnings(record=True) as ws, self.assertRaises(SyntaxError):
fit.fit_gpytorch_model(
self.mll,
options=options,
optimizer_kwargs={"options": {"bar": 1}},
)
self.assertTrue(any("marked for deprecation" in str(w.message) for w in ws))
# Test that ModelFittingErrors are rethrown as warnings
def mock_fit_gpytorch_mll(*args, **kwargs):
raise ModelFittingError("foo")
with catch_warnings(record=True) as ws, patch.object(
fit, "fit_gpytorch_mll", new=mock_fit_gpytorch_mll
):
fit.fit_gpytorch_model(self.mll)
self.assertTrue(any("foo" in str(w.message) for w in ws))
class TestFitFallback(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_F = torch.sin(2 * math.pi * train_X)
self.mlls = {}
self.checkpoints = {}
for model_type, output_dim in product([SingleTaskGP], [1, 2]):
train_Y = train_F.repeat(1, output_dim)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = model_type(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=output_dim),
**(
{}
if model_type is SingleTaskGP
else {"train_Yvar": torch.full_like(train_Y, 0.1)}
),
)
self.assertIsInstance(model.covar_module.base_kernel, MaternKernel)
model.covar_module.base_kernel.nu = 2.5
mll = ExactMarginalLogLikelihood(model.likelihood, model)
for dtype in (torch.float32, torch.float64):
key = model_type, output_dim
self.mlls[key] = mll.to(dtype=dtype)
self.checkpoints[key] = {
k: TensorCheckpoint(
values=v.detach().clone(), device=v.device, dtype=v.dtype
)
for k, v in mll.state_dict().items()
}
def test_main(self):
for case, mll in self.mlls.items():
self._test_main(mll, self.checkpoints[case])
def test_warnings(self):
for case, mll in self.mlls.items():
self._test_warnings(mll, self.checkpoints[case])
def test_exceptions(self):
for case, mll in self.mlls.items():
self._test_exceptions(mll, self.checkpoints[case])
def _test_main(self, mll, ckpt):
r"""Main test for `_fit_fallback`."""
optimizer = MockOptimizer()
optimizer.warnings = [
WarningMessage("test_runtime_warning", RuntimeWarning, __file__, 0),
]
for should_fail in (True, False):
optimizer.call_count = 0
with catch_warnings(), requires_grad_ctx(
module=mll, assignments={"model.mean_module.constant": False}
), module_rollback_ctx(mll, checkpoint=ckpt):
try:
fit._fit_fallback(
mll,
None,
None,
max_attempts=2,
optimizer=optimizer,
warning_handler=lambda w: not should_fail,
)
except ModelFittingError:
failed = True
else:
failed = False
# Test control flow
self.assertEqual(failed, should_fail)
self.assertEqual(optimizer.call_count, 2 if should_fail else 1)
# Test terminal state
self.assertEqual(failed, mll.training)
for key, vals in mll.state_dict().items():
if failed:
self.assertTrue(vals.equal(ckpt[key].values))
else:
try:
param = mll.get_parameter(key)
self.assertNotEqual(
param.equal(ckpt[key].values), param.requires_grad
)
except AttributeError:
pass
# Test `closure_kwargs`
with self.subTest("closure_kwargs"):
mock_closure = MagicMock(side_effect=StopIteration("foo"))
with self.assertRaisesRegex(StopIteration, "foo"):
fit._fit_fallback(
mll, None, None, closure=mock_closure, closure_kwargs={"ab": "cd"}
)
mock_closure.assert_called_once_with(ab="cd")
def _test_warnings(self, mll, ckpt):
r"""Test warning handling for `_fit_fallback`."""
optimizer = MockOptimizer(randomize_requires_grad=False)
optimizer.warnings = [
WarningMessage("test_runtime_warning", RuntimeWarning, __file__, 0),
WarningMessage(MAX_ITER_MSG, OptimizationWarning, __file__, 0),
WarningMessage(
"Optimization timed out after X", OptimizationWarning, __file__, 0
),
]
warning_handlers = {
"default": fit.DEFAULT_WARNING_HANDLER,
"none": lambda w: False,
"all": lambda w: True,
}
for case, warning_handler in warning_handlers.items():
with ExitStack() as es:
logs = es.enter_context(
self.assertLogs(level="DEBUG")
if case == "default"
else nullcontext()
)
ws = es.enter_context(catch_warnings(record=True))
es.enter_context(debug(True))
try:
fit._fit_fallback(
mll,
None,
None,
max_attempts=2,
optimizer=optimizer,
warning_handler=warning_handler,
)
except ModelFittingError:
failed = True
else:
failed = False
# Test that warnings were resolved in the expected fashion
self.assertEqual(failed, case == "none")
with catch_warnings(record=True) as rethrown:
unresolved = list(filterfalse(warning_handler, optimizer.warnings))
self.assertEqual(failed, len(unresolved) > 0)
self.assertEqual(
{str(w.message) for w in ws},
{str(w.message) for w in rethrown + unresolved},
)
if logs: # test that default filter logs certain warnings
self.assertTrue(any(MAX_ITER_MSG in log for log in logs.output))
# Test default of retrying upon encountering an uncaught OptimizationWarning
optimizer.warnings.append(
WarningMessage("test_optim_warning", OptimizationWarning, __file__, 0)
)
with self.assertRaises(ModelFittingError), catch_warnings():
fit._fit_fallback(
mll,
None,
None,
max_attempts=1,
optimizer=optimizer,
)
def _test_exceptions(self, mll, ckpt):
r"""Test exception handling for `_fit_fallback`."""
optimizer = MockOptimizer(exception=NotPSDError("not_psd"))
with catch_warnings():
# Test behavior when encountering a caught exception
with self.assertLogs(level="DEBUG") as logs, self.assertRaises(
ModelFittingError
):
fit._fit_fallback(
mll,
None,
None,
max_attempts=1,
optimizer=optimizer,
)
self.assertTrue(any("not_psd" in log for log in logs.output))
self.assertTrue( # test state rollback
all(v.equal(ckpt[k].values) for k, v in mll.state_dict().items())
)
# Test behavior when encountering an uncaught exception
with self.assertRaisesRegex(NotPSDError, "not_psd"):
fit._fit_fallback(
mll,
None,
None,
max_attempts=1,
optimizer=optimizer,
caught_exception_types=(),
)
self.assertTrue( # test state rollback
all(v.equal(ckpt[k].values) for k, v in mll.state_dict().items())
)
class TestFitFallbackAppoximate(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_F = torch.sin(2 * math.pi * train_X)
train_Y = train_F + 0.1 * torch.randn_like(train_F)
model = SingleTaskVariationalGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mll = mll = VariationalELBO(model.likelihood, model.model, num_data=10)
self.data_loader = get_data_loader(mll.model, batch_size=1)
self.closure = get_loss_closure_with_grads(
mll=mll,
parameters={n: p for n, p in mll.named_parameters() if p.requires_grad},
data_loader=self.data_loader,
)
def test_main(self):
# Test parameter updates
with module_rollback_ctx(self.mll) as ckpt:
fit._fit_fallback_approximate(
self.mll,
None,
None,
closure=self.closure,
optimizer_kwargs={"step_limit": 3},
)
for name, param in self.mll.named_parameters():
self.assertFalse(param.equal(ckpt[name].values))
# Test dispatching pattern
kwargs = {"full_batch_limit": float("inf")}
with patch.object(fit, "_fit_fallback") as mock_fallback:
fit._fit_fallback_approximate(self.mll, None, None, full_batch_limit=1)
mock_fallback.assert_called_once_with(
self.mll,
None,
None,
closure=None,
optimizer=fit_gpytorch_mll_torch,
)
with patch.object(fit, "_fit_fallback") as mock_fallback:
fit._fit_fallback_approximate(self.mll, None, None, **kwargs)
mock_fallback.assert_called_once_with(
self.mll,
None,
None,
closure=None,
optimizer=fit_gpytorch_mll_scipy,
)
with patch.object(fit, "_fit_fallback") as mock_fallback:
fit._fit_fallback_approximate(
self.mll, None, None, closure=self.closure, **kwargs
)
mock_fallback.assert_called_once_with(
self.mll,
None,
None,
closure=self.closure,
optimizer=fit_gpytorch_mll_torch,
)
with patch.object(fit, "_fit_fallback") as mock_fallback, patch.object(
fit, "get_loss_closure_with_grads"
) as mock_get_closure:
mock_get_closure.return_value = "foo"
fit._fit_fallback_approximate(
self.mll,
None,
None,
data_loader=self.data_loader,
**kwargs,
)
params = {n: p for n, p in self.mll.named_parameters() if p.requires_grad}
mock_get_closure.assert_called_once_with(
mll=self.mll,
data_loader=self.data_loader,
parameters=params,
)
mock_fallback.assert_called_once_with(
self.mll,
None,
None,
closure="foo",
optimizer=fit_gpytorch_mll_torch,
)
# Test exception handling
with self.assertRaisesRegex(
UnsupportedError, "Only one of `data_loader` or `closure` may be passed."
):
fit._fit_fallback_approximate(
self.mll,
None,
None,
closure=self.closure,
data_loader=self.data_loader,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
import torch
from botorch.acquisition import ExpectedImprovement, qExpectedImprovement
from botorch.exceptions.warnings import OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models import FixedNoiseGP, SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.utils.testing import BotorchTestCase
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
EPS = 1e-8
NOISE = [
[0.127],
[-0.113],
[-0.345],
[-0.034],
[-0.069],
[-0.272],
[0.013],
[0.056],
[0.087],
[-0.081],
]
class TestEndToEnd(BotorchTestCase):
def _setUp(self, double=False):
dtype = torch.double if double else torch.float
train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).view(-1, 1)
train_y = torch.sin(train_x * (2 * math.pi))
train_yvar = torch.tensor(0.1**2, device=self.device, dtype=dtype)
noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
self.train_x = train_x
self.train_y = train_y + noise
self.train_yvar = train_yvar
self.bounds = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype)
model_st = SingleTaskGP(self.train_x, self.train_y)
self.model_st = model_st.to(device=self.device, dtype=dtype)
self.mll_st = ExactMarginalLogLikelihood(
self.model_st.likelihood, self.model_st
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
self.mll_st = fit_gpytorch_mll(
self.mll_st,
optimizer_kwargs={"options": {"maxiter": 5}},
max_attempts=1,
)
model_fn = FixedNoiseGP(
self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
)
self.model_fn = model_fn.to(device=self.device, dtype=dtype)
self.mll_fn = ExactMarginalLogLikelihood(
self.model_fn.likelihood, self.model_fn
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
self.mll_fn = fit_gpytorch_mll(
self.mll_fn,
optimizer_kwargs={"options": {"maxiter": 5}},
max_attempts=1,
)
def test_qEI(self):
for double in (True, False):
self._setUp(double=double)
qEI = qExpectedImprovement(self.model_st, best_f=0.0)
candidates, _ = optimize_acqf(
acq_function=qEI,
bounds=self.bounds,
q=3,
num_restarts=10,
raw_samples=20,
options={"maxiter": 5},
)
self.assertTrue(torch.all(-EPS <= candidates))
self.assertTrue(torch.all(candidates <= 1 + EPS))
qEI = qExpectedImprovement(self.model_fn, best_f=0.0)
candidates, _ = optimize_acqf(
acq_function=qEI,
bounds=self.bounds,
q=3,
num_restarts=10,
raw_samples=20,
options={"maxiter": 5},
)
self.assertTrue(torch.all(-EPS <= candidates))
self.assertTrue(torch.all(candidates <= 1 + EPS))
candidates_batch_limit, _ = optimize_acqf(
acq_function=qEI,
bounds=self.bounds,
q=3,
num_restarts=10,
raw_samples=20,
options={"maxiter": 5, "batch_limit": 5},
)
self.assertTrue(torch.all(-EPS <= candidates_batch_limit))
self.assertTrue(torch.all(candidates_batch_limit <= 1 + EPS))
def test_EI(self):
for double in (True, False):
self._setUp(double=double)
EI = ExpectedImprovement(self.model_st, best_f=0.0)
candidates, _ = optimize_acqf(
acq_function=EI,
bounds=self.bounds,
q=1,
num_restarts=10,
raw_samples=20,
options={"maxiter": 5},
)
self.assertTrue(-EPS <= candidates <= 1 + EPS)
EI = ExpectedImprovement(self.model_fn, best_f=0.0)
candidates, _ = optimize_acqf(
acq_function=EI,
bounds=self.bounds,
q=1,
num_restarts=10,
raw_samples=20,
options={"maxiter": 5},
)
self.assertTrue(-EPS <= candidates <= 1 + EPS)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Monolithic CUDA tests. This implements a single monolithic test for all
CUDA functionality. The main reason for doing this is that if individual tests
are run in separate processes, the overhead of initializing the GPU can vastly
outweight the speedup from parallelization, and, in addition, this can lead
to the GPU running out of memory.
"""
import unittest
from itertools import chain
from pathlib import Path
from typing import Union
import torch
from botorch.utils.testing import BotorchTestCase
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
class TestBotorchCUDA(unittest.TestCase):
def test_cuda(self):
test_dir = Path(__file__).parent.resolve()
tests = unittest.TestLoader().discover(test_dir)
self.assertTrue(run_cuda_tests(tests))
def run_cuda_tests(tests: Union[unittest.TestCase, unittest.TestSuite]) -> bool:
"""Function for running all tests on cuda (except TestBotorchCUDA itself)"""
if isinstance(tests, BotorchTestCase):
tests.device = torch.device("cuda")
test_result = tests.run()
if test_result is None:
# some test runners may return None on skipped tests
return True
passed = test_result.wasSuccessful()
if not passed:
# print test name
print(f"test: {tests}")
for error in chain(test_result.errors, test_result.failures):
# print traceback
print(f"error: {error[1]}")
return passed
elif isinstance(tests, unittest.TestSuite):
return all(run_cuda_tests(tests_) for tests_ in tests)
elif (
isinstance(tests, unittest.TestCase)
and tests.id() == "test_cuda.TestBotorchCUDA.test_cuda"
):
# ignore TestBotorchCUDA
return True
elif isinstance(tests, unittest.loader._FailedTest):
# test failed to load, often import error
print(f"test: {tests}")
print(f"exception: {tests._exception}")
return False
else:
raise ValueError(f"Unexpected type for test: {tests}")
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import gpytorch.settings as gp_settings
import linear_operator.settings as linop_settings
from botorch import settings
from botorch.exceptions import BotorchWarning
from botorch.utils.testing import BotorchTestCase
class TestSettings(BotorchTestCase):
def test_flags(self):
for flag in (settings.debug, settings.propagate_grads):
self.assertFalse(flag.on())
self.assertTrue(flag.off())
with flag(True):
self.assertTrue(flag.on())
self.assertFalse(flag.off())
self.assertFalse(flag.on())
self.assertTrue(flag.off())
def test_debug(self):
# Turn on debug.
settings.debug._set_state(True)
# Check that debug warnings are suppressed when it is turned off.
with settings.debug(False):
with warnings.catch_warnings(record=True) as ws:
if settings.debug.on():
warnings.warn("test", BotorchWarning)
self.assertEqual(len(ws), 0)
# Check that warnings are not suppressed outside of context manager.
with warnings.catch_warnings(record=True) as ws:
if settings.debug.on():
warnings.warn("test", BotorchWarning)
self.assertEqual(len(ws), 1)
# Turn off debug.
settings.debug._set_state(False)
# Check that warnings are not suppressed within debug.
with settings.debug(True):
with warnings.catch_warnings(record=True) as ws:
if settings.debug.on():
warnings.warn("test", BotorchWarning)
self.assertEqual(len(ws), 1)
# Check that warnings are suppressed outside of context manager.
with warnings.catch_warnings(record=True) as ws:
if settings.debug.on():
warnings.warn("test", BotorchWarning)
self.assertEqual(len(ws), 0)
class TestDefaultGPyTorchLinOpSettings(BotorchTestCase):
def test_default_gpytorch_linop_settings(self):
self.assertTrue(linop_settings._fast_covar_root_decomposition.off())
self.assertTrue(linop_settings._fast_log_prob.off())
self.assertTrue(linop_settings._fast_solves.off())
self.assertEqual(linop_settings.cholesky_max_tries.value(), 6)
self.assertEqual(linop_settings.max_cholesky_size.value(), 4096)
self.assertEqual(gp_settings.max_eager_kernel_size.value(), 4096)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.test_functions.multi_fidelity import (
AugmentedBranin,
AugmentedHartmann,
AugmentedRosenbrock,
)
from botorch.utils.testing import (
BaseTestProblemTestCaseMixIn,
BotorchTestCase,
SyntheticTestFunctionTestCaseMixin,
)
class TestAugmentedBranin(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
AugmentedBranin(),
AugmentedBranin(negate=True),
AugmentedBranin(noise_std=0.1),
]
class TestAugmentedHartmann(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
AugmentedHartmann(),
AugmentedHartmann(negate=True),
AugmentedHartmann(noise_std=0.1),
]
class TestAugmentedRosenbrock(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
AugmentedRosenbrock(),
AugmentedRosenbrock(negate=True),
AugmentedRosenbrock(noise_std=0.1),
AugmentedRosenbrock(dim=4),
AugmentedRosenbrock(dim=4, negate=True),
AugmentedRosenbrock(dim=4, noise_std=0.1),
]
def test_min_dimension(self):
with self.assertRaises(ValueError):
AugmentedRosenbrock(dim=2)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.test_functions.multi_objective_multi_fidelity import (
MOMFBraninCurrin,
MOMFPark,
)
from botorch.utils.testing import (
BaseTestProblemTestCaseMixIn,
BotorchTestCase,
MultiObjectiveTestProblemTestCaseMixin,
)
class TestMOMFBraninCurrin(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
functions = [MOMFBraninCurrin()]
bounds = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
def test_init(self):
for f in self.functions:
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, 3)
self.assertTrue(
torch.equal(f.bounds, torch.tensor(self.bounds).to(f.bounds))
)
class TestMOMFPark(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
functions = [MOMFPark()]
bounds = [[0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]]
def test_init(self):
for f in self.functions:
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, 5)
self.assertTrue(
torch.equal(f.bounds, torch.tensor(self.bounds).to(f.bounds))
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import InputDataError
from botorch.test_functions.synthetic import (
Ackley,
Beale,
Branin,
Bukin,
Cosine8,
DixonPrice,
DropWave,
EggHolder,
Griewank,
Hartmann,
HolderTable,
Levy,
Michalewicz,
Powell,
PressureVessel,
Rastrigin,
Rosenbrock,
Shekel,
SixHumpCamel,
SpeedReducer,
StyblinskiTang,
SyntheticTestFunction,
TensionCompressionString,
ThreeHumpCamel,
WeldedBeamSO,
)
from botorch.utils.testing import (
BaseTestProblemTestCaseMixIn,
BotorchTestCase,
ConstrainedTestProblemTestCaseMixin,
SyntheticTestFunctionTestCaseMixin,
)
from torch import Tensor
class DummySyntheticTestFunction(SyntheticTestFunction):
dim = 2
_bounds = [(-1, 1), (-1, 1)]
_optimal_value = 0
def evaluate_true(self, X: Tensor) -> Tensor:
return -X.pow(2).sum(dim=-1)
class DummySyntheticTestFunctionWithOptimizers(DummySyntheticTestFunction):
_optimizers = [(0, 0)]
class TestCustomBounds(BotorchTestCase):
functions_with_custom_bounds = [ # Function name and the default dimension.
(Ackley, 2),
(Beale, 2),
(Branin, 2),
(Bukin, 2),
(Cosine8, 8),
(DropWave, 2),
(DixonPrice, 2),
(EggHolder, 2),
(Griewank, 2),
(Hartmann, 6),
(HolderTable, 2),
(Levy, 2),
(Michalewicz, 2),
(Powell, 4),
(Rastrigin, 2),
(Rosenbrock, 2),
(Shekel, 4),
(SixHumpCamel, 2),
(StyblinskiTang, 2),
(ThreeHumpCamel, 2),
]
def test_custom_bounds(self):
with self.assertRaisesRegex(
InputDataError,
"Expected the bounds to match the dimensionality of the domain. ",
):
DummySyntheticTestFunctionWithOptimizers(bounds=[(0, 0)])
with self.assertRaisesRegex(
ValueError, "No global optimum found within custom bounds"
):
DummySyntheticTestFunctionWithOptimizers(bounds=[(1, 2), (3, 4)])
dummy = DummySyntheticTestFunctionWithOptimizers(bounds=[(-2, 2), (-3, 3)])
self.assertEqual(dummy._bounds[0], (-2, 2))
self.assertEqual(dummy._bounds[1], (-3, 3))
self.assertAllClose(
dummy.bounds, torch.tensor([[-2, -3], [2, 3]], dtype=torch.double)
)
# Test each function with custom bounds.
for func_class, dim in self.functions_with_custom_bounds:
bounds = [(-1e5, 1e5) for _ in range(dim)]
bounds_tensor = torch.tensor(bounds, dtype=torch.double).T
func = func_class(bounds=bounds)
self.assertEqual(func._bounds, bounds)
self.assertAllClose(func.bounds, bounds_tensor)
class TestAckley(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Ackley(), Ackley(negate=True), Ackley(noise_std=0.1), Ackley(dim=3)]
class TestBeale(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Beale(), Beale(negate=True), Beale(noise_std=0.1)]
class TestBranin(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Branin(), Branin(negate=True), Branin(noise_std=0.1)]
class TestBukin(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Bukin(), Bukin(negate=True), Bukin(noise_std=0.1)]
class TestCosine8(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Cosine8(), Cosine8(negate=True), Cosine8(noise_std=0.1)]
class TestDropWave(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [DropWave(), DropWave(negate=True), DropWave(noise_std=0.1)]
class TestDixonPrice(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
DixonPrice(),
DixonPrice(negate=True),
DixonPrice(noise_std=0.1),
DixonPrice(dim=3),
]
class TestEggHolder(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [EggHolder(), EggHolder(negate=True), EggHolder(noise_std=0.1)]
class TestGriewank(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
Griewank(),
Griewank(negate=True),
Griewank(noise_std=0.1),
Griewank(dim=4),
]
class TestHartmann(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
Hartmann(),
Hartmann(negate=True),
Hartmann(noise_std=0.1),
Hartmann(dim=3),
Hartmann(dim=3, negate=True),
Hartmann(dim=3, noise_std=0.1),
Hartmann(dim=4),
Hartmann(dim=4, negate=True),
Hartmann(dim=4, noise_std=0.1),
]
def test_dimension(self):
with self.assertRaises(ValueError):
Hartmann(dim=2)
class TestHolderTable(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [HolderTable(), HolderTable(negate=True), HolderTable(noise_std=0.1)]
class TestLevy(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
Levy(),
Levy(negate=True),
Levy(noise_std=0.1),
Levy(dim=3),
Levy(dim=3, negate=True),
Levy(dim=3, noise_std=0.1),
]
class TestMichalewicz(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
Michalewicz(),
Michalewicz(negate=True),
Michalewicz(noise_std=0.1),
Michalewicz(dim=5),
Michalewicz(dim=5, negate=True),
Michalewicz(dim=5, noise_std=0.1),
Michalewicz(dim=10),
Michalewicz(dim=10, negate=True),
Michalewicz(dim=10, noise_std=0.1),
]
class TestPowell(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Powell(), Powell(negate=True), Powell(noise_std=0.1)]
class TestRastrigin(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
Rastrigin(),
Rastrigin(negate=True),
Rastrigin(noise_std=0.1),
Rastrigin(dim=3),
Rastrigin(dim=3, negate=True),
Rastrigin(dim=3, noise_std=0.1),
]
class TestRosenbrock(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
Rosenbrock(),
Rosenbrock(negate=True),
Rosenbrock(noise_std=0.1),
Rosenbrock(dim=3),
Rosenbrock(dim=3, negate=True),
Rosenbrock(dim=3, noise_std=0.1),
]
class TestShekel(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [Shekel(), Shekel(negate=True), Shekel(noise_std=0.1)]
class TestSixHumpCamel(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [SixHumpCamel(), SixHumpCamel(negate=True), SixHumpCamel(noise_std=0.1)]
class TestStyblinskiTang(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
StyblinskiTang(),
StyblinskiTang(negate=True),
StyblinskiTang(noise_std=0.1),
StyblinskiTang(dim=3),
StyblinskiTang(dim=3, negate=True),
StyblinskiTang(dim=3, noise_std=0.1),
]
class TestThreeHumpCamel(
BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin
):
functions = [
ThreeHumpCamel(),
ThreeHumpCamel(negate=True),
ThreeHumpCamel(noise_std=0.1),
]
# ------------------ Constrained synthetic test problems ------------------ #
class TestPressureVessel(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
ConstrainedTestProblemTestCaseMixin,
):
functions = [PressureVessel()]
class TestSpeedReducer(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
ConstrainedTestProblemTestCaseMixin,
):
functions = [SpeedReducer()]
class TestTensionCompressionString(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
ConstrainedTestProblemTestCaseMixin,
):
functions = [TensionCompressionString()]
class TestWeldedBeamSO(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
ConstrainedTestProblemTestCaseMixin,
):
functions = [WeldedBeamSO()]
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.test_functions.base import BaseTestProblem
from botorch.test_functions.multi_objective import (
BNH,
BraninCurrin,
C2DTLZ2,
CarSideImpact,
CONSTR,
ConstrainedBraninCurrin,
DH1,
DH2,
DH3,
DH4,
DiscBrake,
DTLZ1,
DTLZ2,
DTLZ3,
DTLZ4,
DTLZ5,
DTLZ7,
GMM,
MultiObjectiveTestProblem,
MW7,
OSY,
Penicillin,
SRN,
ToyRobust,
VehicleSafety,
WeldedBeam,
ZDT1,
ZDT2,
ZDT3,
)
from botorch.utils.testing import (
BaseTestProblemTestCaseMixIn,
BotorchTestCase,
ConstrainedTestProblemTestCaseMixin,
MultiObjectiveTestProblemTestCaseMixin,
)
class DummyMOProblem(MultiObjectiveTestProblem):
_ref_point = [0.0, 0.0]
_num_objectives = 2
_bounds = [(0.0, 1.0)] * 2
dim = 2
def evaluate_true(self, X):
f_X = X + 2
return -f_X if self.negate else f_X
class TestBaseTestMultiObjectiveProblem(BotorchTestCase):
def test_base_mo_problem(self):
for negate in (True, False):
for noise_std in (None, 1.0):
f = DummyMOProblem(noise_std=noise_std, negate=negate)
self.assertEqual(f.noise_std, noise_std)
self.assertEqual(f.negate, negate)
for dtype in (torch.float, torch.double):
f.to(dtype=dtype, device=self.device)
X = torch.rand(3, 2, dtype=dtype, device=self.device)
f_X = f.evaluate_true(X)
expected_f_X = -(X + 2) if negate else X + 2
self.assertTrue(torch.equal(f_X, expected_f_X))
with self.assertRaises(NotImplementedError):
f.gen_pareto_front(1)
class TestBraninCurrin(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [BraninCurrin()]
def test_init(self):
for f in self.functions:
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, 2)
class TestDH(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
def setUp(self, suppress_input_warnings: bool = True) -> None:
super().setUp(suppress_input_warnings=suppress_input_warnings)
self.dims = [2, 3, 4, 5]
self.bounds = [
[[0.0, -1], [1, 1]],
[[0.0, -1, -1], [1, 1, 1]],
[[0.0, 0, -1, -1], [1, 1, 1, 1]],
[[0.0, -0.15, -1, -1, -1], [1, 1, 1, 1, 1]],
]
self.expected = [
[[0.0, 1.0], [1.0, 1.0 / 1.2 + 1.0]],
[[0.0, 1.0], [1.0, 2.0 / 1.2 + 20.0]],
[[0.0, 1.88731], [1.0, 1.9990726 * 100]],
[[0.0, 1.88731], [1.0, 150.0]],
]
@property
def functions(self) -> List[BaseTestProblem]:
return [DH1(dim=2), DH2(dim=3), DH3(dim=4), DH4(dim=5)]
def test_init(self):
for i, f in enumerate(self.functions):
with self.assertRaises(ValueError):
f.__class__(dim=1)
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, self.dims[i])
self.assertTrue(
torch.equal(
f.bounds,
torch.tensor(
self.bounds[i], dtype=f.bounds.dtype, device=f.bounds.device
),
)
)
def test_function_values(self):
for i, f in enumerate(self.functions):
test_X = torch.zeros(2, self.dims[i], device=self.device)
test_X[1] = 1.0
actual = f(test_X)
expected = torch.tensor(self.expected[i], device=self.device)
self.assertAllClose(actual, expected)
class TestDTLZ(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [
DTLZ1(dim=5, num_objectives=2),
DTLZ2(dim=5, num_objectives=2),
DTLZ3(dim=5, num_objectives=2),
DTLZ4(dim=5, num_objectives=2),
DTLZ5(dim=5, num_objectives=2),
DTLZ7(dim=5, num_objectives=2),
]
def test_init(self):
for f in self.functions:
with self.assertRaises(ValueError):
f.__class__(dim=1, num_objectives=2)
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, 5)
self.assertEqual(f.k, 4)
def test_gen_pareto_front(self):
for dtype in (torch.float, torch.double):
for f in self.functions:
for negate in (True, False):
f.negate = negate
f = f.to(dtype=dtype, device=self.device)
if isinstance(f, (DTLZ5, DTLZ7)):
with self.assertRaises(NotImplementedError):
f.gen_pareto_front(n=1)
else:
pareto_f = f.gen_pareto_front(n=10)
if negate:
pareto_f *= -1
self.assertEqual(pareto_f.dtype, dtype)
self.assertEqual(pareto_f.device.type, self.device.type)
self.assertTrue((pareto_f > 0).all())
if isinstance(f, DTLZ1):
# assert is the hyperplane sum_i (f(x_i)) = 0.5
self.assertTrue(
torch.allclose(
pareto_f.sum(dim=-1),
torch.full(
pareto_f.shape[0:1],
0.5,
dtype=dtype,
device=self.device,
),
)
)
elif isinstance(f, (DTLZ2, DTLZ3, DTLZ4)):
# assert the points lie on the surface
# of the unit hypersphere
self.assertTrue(
torch.allclose(
pareto_f.pow(2).sum(dim=-1),
torch.ones(
pareto_f.shape[0],
dtype=dtype,
device=self.device,
),
)
)
class TestGMM(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [GMM(num_objectives=4)]
def test_init(self):
f = self.functions[0]
with self.assertRaises(UnsupportedError):
f.__class__(num_objectives=5)
self.assertEqual(f.num_objectives, 4)
self.assertEqual(f.dim, 2)
def test_result(self):
x = torch.tensor(
[
[[0.0342, 0.8055], [0.7844, 0.4831]],
[[0.5236, 0.3158], [0.0992, 0.9873]],
[[0.4693, 0.5792], [0.5357, 0.9451]],
],
device=self.device,
)
expected_f_x = -torch.tensor(
[
[
[3.6357e-03, 5.9030e-03, 5.8958e-03, 1.0309e-04],
[1.6304e-02, 3.1430e-04, 4.7323e-04, 2.0691e-04],
],
[
[1.2251e-01, 3.2309e-02, 3.7199e-02, 5.4211e-03],
[1.9378e-04, 1.5290e-03, 3.5051e-04, 3.6924e-07],
],
[
[3.5550e-01, 5.9409e-02, 1.7352e-01, 8.5574e-02],
[3.2686e-02, 9.7298e-02, 7.2311e-02, 1.5613e-03],
],
],
device=self.device,
)
f = self.functions[0]
f.to(device=self.device)
for dtype in (torch.float, torch.double):
f.to(dtype=dtype)
f_x = f(x.to(dtype=dtype))
self.assertTrue(
torch.allclose(f_x, expected_f_x.to(dtype=dtype), rtol=1e-4, atol=1e-4)
)
class TestMW7(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [MW7(dim=3)]
def test_init(self):
for f in self.functions:
with self.assertRaises(ValueError):
f.__class__(dim=1)
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, 3)
class TestZDT(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [
ZDT1(dim=3, num_objectives=2),
ZDT2(dim=3, num_objectives=2),
ZDT3(dim=3, num_objectives=2),
]
def test_init(self):
for f in self.functions:
with self.assertRaises(NotImplementedError):
f.__class__(dim=3, num_objectives=3)
with self.assertRaises(NotImplementedError):
f.__class__(dim=3, num_objectives=1)
with self.assertRaises(ValueError):
f.__class__(dim=1, num_objectives=2)
self.assertEqual(f.num_objectives, 2)
self.assertEqual(f.dim, 3)
def test_gen_pareto_front(self):
for dtype in (torch.float, torch.double):
for f in self.functions:
for negate in (True, False):
f.negate = negate
f = f.to(dtype=dtype, device=self.device)
pareto_f = f.gen_pareto_front(n=11)
if negate:
pareto_f *= -1
self.assertEqual(pareto_f.dtype, dtype)
self.assertEqual(pareto_f.device.type, self.device.type)
if isinstance(f, ZDT1):
self.assertTrue(
torch.equal(pareto_f[:, 1], 1 - pareto_f[:, 0].sqrt())
)
elif isinstance(f, ZDT2):
self.assertTrue(
torch.equal(pareto_f[:, 1], 1 - pareto_f[:, 0].pow(2))
)
elif isinstance(f, ZDT3):
f_0 = pareto_f[:, 0]
f_1 = pareto_f[:, 1]
# check f_0 is in the expected discontinuous part of the pareto
# front
self.assertTrue(
(
(f_0[:3] >= f._parts[0][0])
& (f_0[:3] <= f._parts[0][1])
).all()
)
for i in range(0, 4):
f_0_i = f_0[3 + 2 * i : 3 + 2 * (i + 1)]
comparison = f_0_i > torch.tensor(
f._parts[i + 1], dtype=dtype, device=self.device
)
self.assertTrue((comparison[..., 0]).all())
self.assertTrue((~comparison[..., 1]).all())
self.assertTrue(
((comparison[..., 0]) & (~comparison[..., 1])).all()
)
# check f_1
self.assertTrue(
torch.equal(
f_1,
1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0),
)
)
# ------------------ Unconstrained Multi-objective test problems ------------------ #
class TestCarSideImpact(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [CarSideImpact()]
class TestPenicillin(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [Penicillin()]
class TestToyRobust(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [ToyRobust()]
class TestVehicleSafety(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [VehicleSafety()]
# ------------------ Constrained Multi-objective test problems ------------------ #
class TestBNH(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [BNH()]
class TestSRN(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [SRN()]
class TestCONSTR(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [CONSTR()]
class TestConstrainedBraninCurrin(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [ConstrainedBraninCurrin()]
class TestC2DTLZ2(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [C2DTLZ2(dim=3, num_objectives=2)]
def test_batch_exception(self):
f = C2DTLZ2(dim=3, num_objectives=2)
with self.assertRaises(NotImplementedError):
f.evaluate_slack_true(torch.empty(1, 1, 3))
class TestDiscBrake(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [DiscBrake()]
class TestWeldedBeam(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [WeldedBeam()]
class TestOSY(
BotorchTestCase,
BaseTestProblemTestCaseMixIn,
MultiObjectiveTestProblemTestCaseMixin,
ConstrainedTestProblemTestCaseMixin,
):
@property
def functions(self) -> List[BaseTestProblem]:
return [OSY()]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.test_functions.sensitivity_analysis import Gsobol, Ishigami, Morris
from botorch.utils.testing import BotorchTestCase
class TestIshigami(BotorchTestCase):
def testFunction(self):
with self.assertRaises(ValueError):
Ishigami(b=0.33)
f = Ishigami(b=0.1)
self.assertEqual(f.b, 0.1)
f = Ishigami(b=0.05)
self.assertEqual(f.b, 0.05)
X = torch.tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]])
m1, m2, m3 = f.compute_dgsm(X)
for m in [m1, m2, m3]:
self.assertEqual(len(m), 3)
Z = f.evaluate_true(X)
Ztrue = torch.tensor([5.8401, 7.4245])
self.assertAllClose(Z, Ztrue, atol=1e-3)
self.assertIsNone(f._optimizers)
with self.assertRaises(NotImplementedError):
f.optimal_value
class TestGsobol(BotorchTestCase):
def testFunction(self):
for dim in [6, 8, 15]:
f = Gsobol(dim=dim)
self.assertIsNotNone(f.a)
self.assertEqual(len(f.a), dim)
f = Gsobol(dim=3, a=[1, 2, 3])
self.assertEqual(f.a, [1, 2, 3])
X = torch.tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]])
Z = f.evaluate_true(X)
Ztrue = torch.tensor([2.5, 21.0])
self.assertAllClose(Z, Ztrue, atol=1e-3)
self.assertIsNone(f._optimizers)
with self.assertRaises(NotImplementedError):
f.optimal_value
class TestMorris(BotorchTestCase):
def testFunction(self):
f = Morris()
X = torch.stack((torch.zeros(20), torch.ones(20)))
Z = f.evaluate_true(X)
Ztrue = torch.tensor([5163.0, -8137.0])
self.assertAllClose(Z, Ztrue, atol=1e-3)
self.assertIsNone(f._optimizers)
with self.assertRaises(NotImplementedError):
f.optimal_value
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.test_functions.base import BaseTestProblem, ConstrainedBaseTestProblem
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
class DummyTestProblem(BaseTestProblem):
dim = 2
_bounds = [(0, 1), (2, 3)]
def evaluate_true(self, X: Tensor) -> Tensor:
return -X.pow(2).sum(dim=-1)
class DummyConstrainedTestProblem(DummyTestProblem, ConstrainedBaseTestProblem):
num_constraints = 1
def evaluate_slack_true(self, X: Tensor) -> Tensor:
return 0.25 - X.sum(dim=-1, keepdim=True)
class TestBaseTestProblems(BotorchTestCase):
def test_base_test_problem(self):
for dtype in (torch.float, torch.double):
problem = DummyTestProblem()
self.assertIsNone(problem.noise_std)
self.assertFalse(problem.negate)
bnds_expected = torch.tensor([(0, 2), (1, 3)], dtype=torch.float)
self.assertTrue(torch.equal(problem.bounds, bnds_expected))
problem = problem.to(device=self.device, dtype=dtype)
bnds_expected = bnds_expected.to(device=self.device, dtype=dtype)
self.assertTrue(torch.equal(problem.bounds, bnds_expected))
X = torch.rand(2, 2, device=self.device, dtype=dtype)
Y = problem(X)
self.assertAllClose(Y, -X.pow(2).sum(dim=-1))
problem = DummyTestProblem(negate=True, noise_std=0.1)
self.assertEqual(problem.noise_std, 0.1)
self.assertTrue(problem.negate)
def test_constrained_base_test_problem(self):
for dtype in (torch.float, torch.double):
problem = DummyConstrainedTestProblem().to(device=self.device, dtype=dtype)
X = torch.tensor([[0.4, 0.6], [0.1, 0.1]])
feas = problem.is_feasible(X=X)
self.assertFalse(feas[0].item())
self.assertTrue(feas[1].item())
problem = DummyConstrainedTestProblem(noise_std=0.0).to(
device=self.device, dtype=dtype
)
feas = problem.is_feasible(X=X)
self.assertFalse(feas[0].item())
self.assertTrue(feas[1].item())
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from botorch.acquisition import LinearMCObjective, ScalarizedPosteriorTransform
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.acquisition.proximal import ProximalAcquisitionFunction
from botorch.exceptions.errors import UnsupportedError
from botorch.models import ModelListGP, SingleTaskGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.models.transforms.input import Normalize
from botorch.utils.testing import BotorchTestCase
from torch.distributions.multivariate_normal import MultivariateNormal
class DummyModel(GPyTorchModel):
num_outputs = 1
def __init__(self): # noqa: D107
super(GPyTorchModel, self).__init__()
def subset_output(self, idcs: List[int]) -> Model:
pass
class DummyAcquisitionFunction(AcquisitionFunction):
def forward(self, X):
pass
class NegativeAcquisitionFunction(AcquisitionFunction):
def forward(self, X):
return torch.ones(*X.shape[:-1]) * -1.0
class TestProximalAcquisitionFunction(BotorchTestCase):
def test_proximal(self):
for dtype in (torch.float, torch.double):
# test single point evaluation with and without input transform
normalize = Normalize(
3, bounds=torch.tensor(((0.0, 0.0, 0.0), (2.0, 2.0, 2.0)))
)
for input_transform, x_scale in [(None, 1), (normalize, 2)]:
train_X = torch.rand(5, 3, device=self.device, dtype=dtype) * x_scale
train_Y = train_X.norm(dim=-1, keepdim=True)
# test with and without transformed weights
for transformed_weighting in [True, False]:
# test with single outcome model
model = SingleTaskGP(
train_X, train_Y, input_transform=input_transform
)
model = model.to(device=self.device, dtype=dtype).eval()
EI = ExpectedImprovement(model, best_f=0.0)
proximal_weights = torch.ones(3, device=self.device, dtype=dtype)
last_X = train_X[-1]
test_X = torch.rand(1, 3, device=self.device, dtype=dtype)
EI_prox = ProximalAcquisitionFunction(
EI,
proximal_weights=proximal_weights,
transformed_weighting=transformed_weighting,
)
# softplus transformed value of the acquisition function
ei = EI(test_X)
# modify last_X/test_X depending on transformed_weighting
proximal_test_X = test_X.clone()
if transformed_weighting:
if input_transform is not None:
last_X = input_transform(train_X[-1])
proximal_test_X = input_transform(test_X)
mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights))
test_prox_weight = torch.exp(
mv_normal.log_prob(proximal_test_X)
) / torch.exp(mv_normal.log_prob(last_X))
ei_prox = EI_prox(test_X)
self.assertAllClose(ei_prox, ei * test_prox_weight)
self.assertEqual(ei_prox.shape, torch.Size([1]))
# test with beta specified
EI_prox_beta = ProximalAcquisitionFunction(
EI,
proximal_weights=proximal_weights,
transformed_weighting=transformed_weighting,
beta=1.0,
)
# SoftPlus transformed value of the acquisition function
ei = torch.nn.functional.softplus(EI(test_X), beta=1.0)
# modify last_X/test_X depending on transformed_weighting
proximal_test_X = test_X.clone()
if transformed_weighting:
if input_transform is not None:
last_X = input_transform(train_X[-1])
proximal_test_X = input_transform(test_X)
mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights))
test_prox_weight = torch.exp(
mv_normal.log_prob(proximal_test_X) - mv_normal.log_prob(last_X)
)
ei_prox_beta = EI_prox_beta(test_X)
self.assertAllClose(ei_prox_beta, ei * test_prox_weight)
self.assertEqual(ei_prox_beta.shape, torch.Size([1]))
# test t-batch with broadcasting
test_X = torch.rand(4, 1, 3, device=self.device, dtype=dtype)
proximal_test_X = test_X.clone()
if transformed_weighting:
if input_transform is not None:
last_X = input_transform(train_X[-1])
proximal_test_X = input_transform(test_X)
ei = EI(test_X)
mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights))
test_prox_weight = torch.exp(
mv_normal.log_prob(proximal_test_X)
) / torch.exp(mv_normal.log_prob(last_X))
ei_prox = EI_prox(test_X)
self.assertTrue(
torch.allclose(ei_prox, ei * test_prox_weight.flatten())
)
self.assertEqual(ei_prox.shape, torch.Size([4]))
# test q-based MC acquisition function
qEI = qExpectedImprovement(model, best_f=0.0)
test_X = torch.rand(4, 1, 3, device=self.device, dtype=dtype)
proximal_test_X = test_X.clone()
if transformed_weighting:
if input_transform is not None:
last_X = input_transform(train_X[-1])
proximal_test_X = input_transform(test_X)
qEI_prox = ProximalAcquisitionFunction(
qEI,
proximal_weights=proximal_weights,
transformed_weighting=transformed_weighting,
)
qei = qEI(test_X)
mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights))
test_prox_weight = torch.exp(
mv_normal.log_prob(proximal_test_X)
) / torch.exp(mv_normal.log_prob(last_X))
qei_prox = qEI_prox(test_X)
self.assertTrue(
torch.allclose(qei_prox, qei * test_prox_weight.flatten())
)
self.assertEqual(qei_prox.shape, torch.Size([4]))
# test acquisition function with
# negative values w/o SoftPlus transform specified
negative_acqf = NegativeAcquisitionFunction(model)
bad_neg_prox = ProximalAcquisitionFunction(
negative_acqf, proximal_weights=proximal_weights
)
with self.assertRaisesRegex(
RuntimeError, "Cannot use proximal biasing for negative"
):
bad_neg_prox(test_X)
# test gradient
test_X = torch.rand(
1, 3, device=self.device, dtype=dtype, requires_grad=True
)
ei_prox = EI_prox(test_X)
ei_prox.backward()
# test model without train_inputs
bad_model = DummyModel()
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(
ExpectedImprovement(bad_model, 0.0), proximal_weights
)
# test proximal weights that do not match training_inputs
train_X = torch.rand(5, 1, 3, device=self.device, dtype=dtype)
train_Y = train_X.norm(dim=-1, keepdim=True)
model = SingleTaskGP(train_X, train_Y).to(device=self.device).eval()
with self.assertRaises(ValueError):
ProximalAcquisitionFunction(
ExpectedImprovement(model, 0.0), proximal_weights[:1]
)
with self.assertRaises(ValueError):
ProximalAcquisitionFunction(
ExpectedImprovement(model, 0.0),
torch.rand(3, 3, device=self.device, dtype=dtype),
)
# test for x_pending points
pending_acq = DummyAcquisitionFunction(model)
pending_acq.set_X_pending(torch.rand(3, 3, device=self.device, dtype=dtype))
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(pending_acq, proximal_weights)
# test model with multi-batch training inputs
train_X = torch.rand(5, 2, 3, device=self.device, dtype=dtype)
train_Y = train_X.norm(dim=-1, keepdim=True)
bad_single_task = (
SingleTaskGP(train_X, train_Y).to(device=self.device).eval()
)
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(
ExpectedImprovement(bad_single_task, 0.0), proximal_weights
)
# test a multi-output SingleTaskGP model
train_X = torch.rand(5, 3, device=self.device, dtype=dtype)
train_Y = torch.rand(5, 2, device=self.device, dtype=dtype)
multi_output_model = SingleTaskGP(train_X, train_Y).to(device=self.device)
ptransform = ScalarizedPosteriorTransform(
weights=torch.ones(2, dtype=dtype, device=self.device)
)
ei = ExpectedImprovement(
multi_output_model, 0.0, posterior_transform=ptransform
)
acq = ProximalAcquisitionFunction(ei, proximal_weights)
acq(test_X)
def test_proximal_model_list(self):
for dtype in (torch.float, torch.double):
proximal_weights = torch.ones(3, device=self.device, dtype=dtype)
# test with model-list model for complex objective optimization
train_X = torch.rand(5, 3, device=self.device, dtype=dtype)
train_Y = train_X.norm(dim=-1, keepdim=True)
gp = SingleTaskGP(train_X, train_Y).to(device=self.device)
model = ModelListGP(gp, gp)
scalarized_posterior_transform = ScalarizedPosteriorTransform(
torch.ones(2, device=self.device, dtype=dtype)
)
mc_linear_objective = LinearMCObjective(
torch.ones(2, device=self.device, dtype=dtype)
)
EI = ExpectedImprovement(
model, best_f=0.0, posterior_transform=scalarized_posterior_transform
)
test_X = torch.rand(1, 3, device=self.device, dtype=dtype)
EI_prox = ProximalAcquisitionFunction(EI, proximal_weights=proximal_weights)
ei = EI(test_X)
mv_normal = MultivariateNormal(train_X[-1], torch.diag(proximal_weights))
test_prox_weight = torch.exp(mv_normal.log_prob(test_X)) / torch.exp(
mv_normal.log_prob(train_X[-1])
)
# test calculation
ei_prox = EI_prox(test_X)
self.assertAllClose(ei_prox, ei * test_prox_weight)
self.assertEqual(ei_prox.shape, torch.Size([1]))
# test MC acquisition function
qEI = qExpectedImprovement(model, best_f=0.0, objective=mc_linear_objective)
test_X = torch.rand(4, 1, 3, device=self.device, dtype=dtype)
qEI_prox = ProximalAcquisitionFunction(
qEI, proximal_weights=proximal_weights
)
qei = qEI(test_X)
mv_normal = MultivariateNormal(train_X[-1], torch.diag(proximal_weights))
test_prox_weight = torch.exp(mv_normal.log_prob(test_X)) / torch.exp(
mv_normal.log_prob(train_X[-1])
)
qei_prox = qEI_prox(test_X)
self.assertAllClose(qei_prox, qei * test_prox_weight.flatten())
self.assertEqual(qei_prox.shape, torch.Size([4]))
# test gradient
test_X = torch.rand(
1, 3, device=self.device, dtype=dtype, requires_grad=True
)
ei_prox = EI_prox(test_X)
ei_prox.backward()
# test proximal weights that do not match training_inputs
expected_err_msg = (
"`proximal_weights` must be a one dimensional tensor with "
"same feature dimension as model."
)
with self.assertRaisesRegex(ValueError, expected_err_msg):
ProximalAcquisitionFunction(
ExpectedImprovement(
model, 0.0, posterior_transform=scalarized_posterior_transform
),
proximal_weights[:1],
)
with self.assertRaisesRegex(ValueError, expected_err_msg):
ProximalAcquisitionFunction(
ExpectedImprovement(
model, 0.0, posterior_transform=scalarized_posterior_transform
),
torch.rand(3, 3, device=self.device, dtype=dtype),
)
# test for x_pending points
pending_acq = DummyAcquisitionFunction(model)
pending_acq.set_X_pending(torch.rand(3, 3, device=self.device, dtype=dtype))
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(pending_acq, proximal_weights)
# test model with multi-batch training inputs
train_X = torch.rand(5, 2, 3, device=self.device, dtype=dtype)
train_Y = train_X.norm(dim=-1, keepdim=True)
bad_model = ModelListGP(
SingleTaskGP(train_X, train_Y).to(device=self.device),
SingleTaskGP(train_X, train_Y).to(device=self.device),
)
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(
ExpectedImprovement(
bad_model,
0.0,
posterior_transform=scalarized_posterior_transform,
),
proximal_weights,
)
# try using unequal training sets
train_X = torch.rand(5, 3, device=self.device, dtype=dtype)
train_Y = train_X.norm(dim=-1, keepdim=True)
bad_model = ModelListGP(
SingleTaskGP(train_X[:-1], train_Y[:-1]).to(device=self.device),
SingleTaskGP(train_X, train_Y).to(device=self.device),
)
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(
ExpectedImprovement(
bad_model,
0.0,
posterior_transform=scalarized_posterior_transform,
),
proximal_weights,
)
# try with unequal input transforms
train_X = torch.rand(5, 3, device=self.device, dtype=dtype)
train_Y = train_X.norm(dim=-1, keepdim=True)
bad_model = ModelListGP(
SingleTaskGP(train_X, train_Y, input_transform=Normalize(3)).to(
device=self.device
),
SingleTaskGP(train_X, train_Y).to(device=self.device),
)
with self.assertRaises(UnsupportedError):
ProximalAcquisitionFunction(
ExpectedImprovement(
bad_model,
0.0,
posterior_transform=scalarized_posterior_transform,
),
proximal_weights,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import torch
from botorch.acquisition.predictive_entropy_search import qPredictiveEntropySearch
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.utils.testing import BotorchTestCase
def get_model(train_X, train_Y, use_model_list, standardize_model):
num_objectives = train_Y.shape[-1]
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model
class TestQPredictiveEntropySearch(BotorchTestCase):
def test_predictive_entropy_search(self):
torch.manual_seed(1)
tkwargs = {"device": self.device}
num_objectives = 1
for (dtype, use_model_list, standardize_model, maximize,) in product(
(torch.float, torch.double),
(False, True),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
input_dim = 2
train_X = torch.rand(4, input_dim, **tkwargs)
train_Y = torch.rand(4, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
num_samples = 20
optimal_inputs = torch.rand(num_samples, input_dim, **tkwargs)
# test acquisition
X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)]
for i in range(len(X_pending_list)):
X_pending = X_pending_list[i]
acq = qPredictiveEntropySearch(
model=model,
optimal_inputs=optimal_inputs,
maximize=maximize,
X_pending=X_pending,
)
test_Xs = [
torch.rand(4, 1, input_dim, **tkwargs),
torch.rand(4, 3, input_dim, **tkwargs),
torch.rand(4, 5, 1, input_dim, **tkwargs),
torch.rand(4, 5, 3, input_dim, **tkwargs),
]
for j in range(len(test_Xs)):
acq_X = acq(test_Xs[j])
# assess shape
self.assertTrue(acq_X.shape == test_Xs[j].shape[:-2])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from unittest import mock
import torch
from botorch.acquisition.objective import GenericMCObjective
from botorch.acquisition.utils import (
compute_best_feasible_objective,
expand_trace_observations,
get_acquisition_function,
get_infeasible_cost,
get_optimal_samples,
project_to_sample_points,
project_to_target_fidelity,
prune_inferior_points,
)
from botorch.exceptions.errors import DeprecationError, UnsupportedError
from botorch.models import SingleTaskGP
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestGetAcquisitionFunctionDeprecation(BotorchTestCase):
def test_get_acquisition_function_deprecation(self):
msg = (
"`get_acquisition_function` has been moved to"
" `botorch.acquisition.factory`."
)
with self.assertRaisesRegex(DeprecationError, msg):
get_acquisition_function()
class TestConstraintUtils(BotorchTestCase):
def test_compute_best_feasible_objective(self):
for dtype in (torch.float, torch.double):
with self.subTest(dtype=dtype):
tkwargs = {"dtype": dtype, "device": self.device}
n = 5
X = torch.arange(n, **tkwargs).view(-1, 1)
for batch_shape, sample_shape in itertools.product(
(torch.Size([]), torch.Size([2])),
(torch.Size([1]), torch.Size([3])),
):
means = torch.arange(n, **tkwargs).view(-1, 1)
if len(batch_shape) > 0:
view_means = means.view(1, *means.shape)
means = view_means.expand(batch_shape + means.shape)
if sample_shape[0] == 1:
samples = means.unsqueeze(0)
else:
samples = torch.stack([means, means + 1, means + 4], dim=0)
variances = torch.tensor(
[0.09, 0.25, 0.36, 0.25, 0.09], **tkwargs
).view(-1, 1)
mm = MockModel(MockPosterior(mean=means, variance=variances))
# testing all feasible points
obj = samples.squeeze(-1)
constraints = [lambda samples: -torch.ones_like(samples[..., 0])]
best_f = compute_best_feasible_objective(
samples=samples, obj=obj, constraints=constraints
)
self.assertAllClose(best_f, obj.amax(dim=-1, keepdim=True))
# testing with some infeasible points
con_cutoff = 3.0
best_f = compute_best_feasible_objective(
samples=samples,
obj=obj,
constraints=[
lambda samples: samples[..., 0] - (con_cutoff + 1 / 2)
],
model=mm,
X_baseline=X,
)
if sample_shape[0] == 3:
# under some samples, all baseline points are infeasible, so
# the best_f is set to the negative infeasible cost for
# for samples where no point is feasible
expected_best_f = torch.tensor(
[
3.0,
3.0,
-get_infeasible_cost(
X=X,
model=mm,
).item(),
],
**tkwargs,
).view(-1, 1)
if len(batch_shape) > 0:
expected_best_f = expected_best_f.unsqueeze(1)
expected_best_f = expected_best_f.expand(
*sample_shape, *batch_shape, 1
)
else:
expected_best_f = torch.full(
sample_shape + batch_shape + torch.Size([1]),
con_cutoff,
**tkwargs,
)
self.assertAllClose(best_f, expected_best_f)
# test some feasible points with infeasible obi
if sample_shape[0] == 3:
best_f = compute_best_feasible_objective(
samples=samples,
obj=obj,
constraints=[
lambda samples: samples[..., 0] - (con_cutoff + 1 / 2)
],
infeasible_obj=torch.ones(1, **tkwargs),
)
expected_best_f[-1] = 1
self.assertAllClose(best_f, expected_best_f)
# testing with no feasible points and infeasible obj
infeasible_obj = torch.tensor(torch.pi, **tkwargs)
expected_best_f = torch.full(
sample_shape + batch_shape + torch.Size([1]),
torch.pi,
**tkwargs,
)
best_f = compute_best_feasible_objective(
samples=samples,
obj=obj,
constraints=[lambda X: torch.ones_like(X[..., 0])],
infeasible_obj=infeasible_obj,
)
self.assertAllClose(best_f, expected_best_f)
# testing with no feasible points and not infeasible obj
def objective(Y, X):
return Y.squeeze(-1) - 5.0
best_f = compute_best_feasible_objective(
samples=samples,
obj=obj,
constraints=[lambda X: torch.ones_like(X[..., 0])],
model=mm,
X_baseline=X,
objective=objective,
)
expected_best_f = torch.full(
sample_shape + batch_shape + torch.Size([1]),
-get_infeasible_cost(X=X, model=mm, objective=objective).item(),
**tkwargs,
)
self.assertAllClose(best_f, expected_best_f)
with self.assertRaisesRegex(ValueError, "Must specify `model`"):
best_f = compute_best_feasible_objective(
samples=means,
obj=obj,
constraints=[lambda X: torch.ones_like(X[..., 0])],
X_baseline=X,
)
with self.assertRaisesRegex(
ValueError, "Must specify `X_baseline`"
):
best_f = compute_best_feasible_objective(
samples=means,
obj=obj,
constraints=[lambda X: torch.ones_like(X[..., 0])],
model=mm,
)
def test_get_infeasible_cost(self):
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
X = torch.ones(5, 1, **tkwargs)
means = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], **tkwargs).view(-1, 1)
variances = torch.tensor([0.09, 0.25, 0.36, 0.25, 0.09], **tkwargs).view(
-1, 1
)
mm = MockModel(MockPosterior(mean=means, variance=variances))
# means - 6 * std = [-0.8, -1, -0.6, 1, 3.2]. After applying the
# objective, the minimum becomes -6.0, so 6.0 should be returned.
M = get_infeasible_cost(
X=X, model=mm, objective=lambda Y, X: Y.squeeze(-1) - 5.0
)
self.assertAllClose(M, torch.tensor([6.0], **tkwargs))
M = get_infeasible_cost(
X=X, model=mm, objective=lambda Y, X: Y.squeeze(-1) - 5.0 - X[0, 0]
)
self.assertAllClose(M, torch.tensor([7.0], **tkwargs))
# test it with using also X in the objective
# Test default objective (squeeze last dim).
M2 = get_infeasible_cost(X=X, model=mm)
self.assertAllClose(M2, torch.tensor([1.0], **tkwargs))
# Test multi-output.
m_ = means.repeat(1, 2)
m_[:, 1] -= 10
mm = MockModel(MockPosterior(mean=m_, variance=variances.expand(-1, 2)))
M3 = get_infeasible_cost(X=X, model=mm)
self.assertAllClose(M3, torch.tensor([1.0, 11.0], **tkwargs))
# With a batched model.
means = means.expand(2, 4, -1, -1)
variances = variances.expand(2, 4, -1, -1)
mm = MockModel(MockPosterior(mean=means, variance=variances))
M4 = get_infeasible_cost(X=X, model=mm)
self.assertAllClose(M4, torch.tensor([1.0], **tkwargs))
class TestPruneInferiorPoints(BotorchTestCase):
def test_prune_inferior_points(self):
for dtype in (torch.float, torch.double):
X = torch.rand(3, 2, device=self.device, dtype=dtype)
# the event shape is `q x t` = 3 x 1
samples = torch.tensor(
[[-1.0], [0.0], [1.0]], device=self.device, dtype=dtype
)
mm = MockModel(MockPosterior(samples=samples))
# test that a batched X raises errors
with self.assertRaises(UnsupportedError):
prune_inferior_points(model=mm, X=X.expand(2, 3, 2))
# test marginalize_dim
mm2 = MockModel(MockPosterior(samples=samples.expand(2, 3, 1)))
X_pruned = prune_inferior_points(model=mm2, X=X, marginalize_dim=-3)
with self.assertRaises(UnsupportedError):
# test error raised when marginalize_dim is not specified with
# a batch model
prune_inferior_points(model=mm2, X=X)
self.assertTrue(torch.equal(X_pruned, X[[-1]]))
# test that a batched model raises errors when there are multiple batch dims
mm2 = MockModel(MockPosterior(samples=samples.expand(1, 2, 3, 1)))
with self.assertRaises(UnsupportedError):
prune_inferior_points(model=mm2, X=X)
# test that invalid max_frac is checked properly
with self.assertRaises(ValueError):
prune_inferior_points(model=mm, X=X, max_frac=1.1)
# test basic behaviour
X_pruned = prune_inferior_points(model=mm, X=X)
self.assertTrue(torch.equal(X_pruned, X[[-1]]))
# test custom objective
neg_id_obj = GenericMCObjective(lambda Y, X: -(Y.squeeze(-1)))
X_pruned = prune_inferior_points(model=mm, X=X, objective=neg_id_obj)
self.assertTrue(torch.equal(X_pruned, X[[0]]))
# test non-repeated samples (requires mocking out MockPosterior's rsample)
samples = torch.tensor(
[[[3.0], [0.0], [0.0]], [[0.0], [2.0], [0.0]], [[0.0], [0.0], [1.0]]],
device=self.device,
dtype=dtype,
)
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points(model=mm, X=X)
self.assertTrue(torch.equal(X_pruned, X))
# test max_frac limiting
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points(model=mm, X=X, max_frac=2 / 3)
if self.device.type == "cuda":
# sorting has different order on cuda
self.assertTrue(torch.equal(X_pruned, torch.stack([X[2], X[1]], dim=0)))
else:
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test that zero-probability is in fact pruned
samples[2, 0, 0] = 10
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points(model=mm, X=X)
self.assertTrue(torch.equal(X_pruned, X[:2]))
class TestFidelityUtils(BotorchTestCase):
def test_project_to_target_fidelity(self):
for batch_shape, dtype in itertools.product(
([], [2]), (torch.float, torch.double)
):
X = torch.rand(*batch_shape, 3, 4, device=self.device, dtype=dtype)
# test default behavior
X_proj = project_to_target_fidelity(X)
ones = torch.ones(*X.shape[:-1], 1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(X_proj[..., :, [-1]], ones))
self.assertTrue(torch.equal(X_proj[..., :-1], X[..., :-1]))
# test custom target fidelity
target_fids = {2: 0.5}
X_proj = project_to_target_fidelity(X, target_fidelities=target_fids)
self.assertTrue(torch.equal(X_proj[..., :, [2]], 0.5 * ones))
# test multiple target fidelities
target_fids = {2: 0.5, 0: 0.1}
X_proj = project_to_target_fidelity(X, target_fidelities=target_fids)
self.assertTrue(torch.equal(X_proj[..., :, [0]], 0.1 * ones))
self.assertTrue(torch.equal(X_proj[..., :, [2]], 0.5 * ones))
# test gradients
X.requires_grad_(True)
X_proj = project_to_target_fidelity(X, target_fidelities=target_fids)
out = (X_proj**2).sum()
out.backward()
self.assertTrue(torch.all(X.grad[..., [0, 2]] == 0))
self.assertTrue(torch.equal(X.grad[..., [1, 3]], 2 * X[..., [1, 3]]))
def test_expand_trace_observations(self):
for batch_shape, dtype in itertools.product(
([], [2]), (torch.float, torch.double)
):
q, d = 3, 4
X = torch.rand(*batch_shape, q, d, device=self.device, dtype=dtype)
# test nullop behavior
self.assertTrue(torch.equal(expand_trace_observations(X), X))
self.assertTrue(
torch.equal(expand_trace_observations(X, fidelity_dims=[1]), X)
)
# test default behavior
num_tr = 2
X_expanded = expand_trace_observations(X, num_trace_obs=num_tr)
self.assertEqual(
X_expanded.shape, torch.Size(batch_shape + [q * (1 + num_tr), d])
)
for i in range(num_tr):
X_sub = X_expanded[..., q * i : q * (i + 1), :]
self.assertTrue(torch.equal(X_sub[..., :-1], X[..., :-1]))
X_sub_expected = (1 - i / (num_tr + 1)) * X[..., :q, -1]
self.assertTrue(torch.equal(X_sub[..., -1], X_sub_expected))
# test custom fidelity dims
fdims = [0, 2]
num_tr = 3
X_expanded = expand_trace_observations(
X, fidelity_dims=fdims, num_trace_obs=num_tr
)
self.assertEqual(
X_expanded.shape, torch.Size(batch_shape + [q * (1 + num_tr), d])
)
for j, i in itertools.product([1, 3], range(num_tr)):
X_sub = X_expanded[..., q * i : q * (i + 1), j]
self.assertTrue(torch.equal(X_sub, X[..., j]))
for j, i in itertools.product(fdims, range(num_tr)):
X_sub = X_expanded[..., q * i : q * (i + 1), j]
X_sub_expected = (1 - i / (1 + num_tr)) * X[..., :q, j]
self.assertTrue(torch.equal(X_sub, X_sub_expected))
# test gradients
num_tr = 2
fdims = [1]
X.requires_grad_(True)
X_expanded = expand_trace_observations(
X, fidelity_dims=fdims, num_trace_obs=num_tr
)
out = X_expanded.sum()
out.backward()
grad_exp = torch.full_like(X, 1 + num_tr)
grad_exp[..., fdims] = 1 + sum(
(i + 1) / (num_tr + 1) for i in range(num_tr)
)
self.assertAllClose(X.grad, grad_exp)
def test_project_to_sample_points(self):
for batch_shape, dtype in itertools.product(
([], [2]), (torch.float, torch.double)
):
q, d, p, d_prime = 1, 12, 7, 4
X = torch.rand(*batch_shape, q, d, device=self.device, dtype=dtype)
sample_points = torch.rand(p, d_prime, device=self.device, dtype=dtype)
X_augmented = project_to_sample_points(X=X, sample_points=sample_points)
self.assertEqual(X_augmented.shape, torch.Size(batch_shape + [p, d]))
if batch_shape == [2]:
self.assertAllClose(X_augmented[0, :, -d_prime:], sample_points)
else:
self.assertAllClose(X_augmented[:, -d_prime:], sample_points)
class TestGetOptimalSamples(BotorchTestCase):
def test_get_optimal_samples(self):
dims = 3
dtype = torch.float64
for_testing_speed_kwargs = {"raw_samples": 50, "num_restarts": 3}
num_optima = 7
batch_shape = (3,)
bounds = torch.tensor([[0, 1]] * dims, dtype=dtype).T
X = torch.rand(*batch_shape, 4, dims, dtype=dtype)
Y = torch.sin(X).sum(dim=-1, keepdim=True).to(dtype)
model = SingleTaskGP(X, Y)
X_opt, f_opt = get_optimal_samples(
model, bounds, num_optima=num_optima, **for_testing_speed_kwargs
)
X_opt, f_opt_min = get_optimal_samples(
model,
bounds,
num_optima=num_optima,
maximize=False,
**for_testing_speed_kwargs,
)
correct_X_shape = (num_optima,) + batch_shape + (dims,)
correct_f_shape = (num_optima,) + batch_shape + (1,)
self.assertEqual(X_opt.shape, correct_X_shape)
self.assertEqual(f_opt.shape, correct_f_shape)
# asserting that the solutions found by minimization the samples are smaller
# than those found by maximization
self.assertTrue(torch.all(f_opt_min < f_opt))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.acquisition import (
ExpectedImprovement,
qExpectedImprovement,
qMultiStepLookahead,
)
from botorch.acquisition.multi_step_lookahead import make_best_f, warmstart_multistep
from botorch.acquisition.objective import IdentityMCObjective
from botorch.exceptions.errors import UnsupportedError
from botorch.models import SingleTaskGP
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase
class TestMultiStepLookahead(BotorchTestCase):
def test_qMS_init(self):
d = 2
q = 1
num_data = 3
q_batch_sizes = [1, 1, 1]
num_fantasies = [2, 2, 1]
t_batch_size = [2]
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
bounds = bounds.repeat(1, d)
train_X = torch.rand(num_data, d, device=self.device, dtype=dtype)
train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
# exactly one of samplers or num_fantasies
with self.assertRaises(UnsupportedError):
qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[qExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
inner_mc_samples=[2] * 4,
)
# cannot use qMS as its own valfunc_cls
with self.assertRaises(UnsupportedError):
qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[qMultiStepLookahead] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
inner_mc_samples=[2] * 4,
)
# construct using samplers
samplers = [
SobolQMCNormalSampler(sample_shape=torch.Size([nf]))
for nf in num_fantasies
]
qMS = qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[qExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
inner_mc_samples=[2] * 4,
samplers=samplers,
)
self.assertEqual(qMS.num_fantasies, num_fantasies)
# use default valfunc_cls, valfun_argfacs, inner_mc_samples
qMS = qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
samplers=samplers,
)
self.assertEqual(len(qMS._valfunc_cls), 4)
self.assertEqual(len(qMS.inner_samplers), 4)
self.assertEqual(len(qMS._valfunc_argfacs), 4)
# _construct_inner_samplers error catching tests below
# AnalyticAcquisitionFunction with MCAcquisitionObjective
with self.assertRaises(UnsupportedError):
qMultiStepLookahead(
model=model,
objective=IdentityMCObjective(),
batch_sizes=q_batch_sizes,
valfunc_cls=[ExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
)
# AnalyticAcquisitionFunction and q > 1
with self.assertRaises(UnsupportedError):
qMultiStepLookahead(
model=model,
batch_sizes=[2, 2, 2],
valfunc_cls=[ExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
inner_mc_samples=[2] * 4,
)
# AnalyticAcquisitionFunction and inner_mc_samples
with self.assertWarns(Warning):
qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[ExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
inner_mc_samples=[2] * 4,
)
# test warmstarting
qMS = qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
samplers=samplers,
)
q_prime = qMS.get_augmented_q_batch_size(q)
eval_X = torch.rand(
t_batch_size + [q_prime, d], device=self.device, dtype=dtype
)
warmstarted_X = warmstart_multistep(
acq_function=qMS,
bounds=bounds,
num_restarts=5,
raw_samples=10,
full_optimizer=eval_X,
)
self.assertEqual(warmstarted_X.shape, torch.Size([5, q_prime, d]))
with self.assertRaisesRegex(
UnsupportedError,
"`qMultiStepLookahead` got a non-MC `objective`. This is not supported."
" Use `posterior_transform` and `objective=None` instead.",
):
qMultiStepLookahead(model=model, batch_sizes=q_batch_sizes, objective="cat")
def test_qMS(self):
d = 2
q = 1
num_data = 3
q_batch_sizes = [1, 1, 1]
num_fantasies = [2, 2, 1]
t_batch_size = [2]
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
bounds = bounds.repeat(1, d)
train_X = torch.rand(num_data, d, device=self.device, dtype=dtype)
train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
# default evaluation testsÎ
qMS = qMultiStepLookahead(
model=model,
batch_sizes=[1, 1, 1],
num_fantasies=num_fantasies,
)
q_prime = qMS.get_augmented_q_batch_size(q)
eval_X = torch.rand(
t_batch_size + [q_prime, d], device=self.device, dtype=dtype
)
result = qMS(eval_X)
self.assertEqual(result.shape, torch.Size(t_batch_size))
qMS = qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[qExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
inner_mc_samples=[2] * 4,
)
result = qMS(eval_X)
self.assertEqual(result.shape, torch.Size(t_batch_size))
# get induced fantasy model, with collapse_fantasy_base_samples
fant_model = qMS.get_induced_fantasy_model(eval_X)
self.assertEqual(
fant_model.train_inputs[0].shape,
torch.Size(
num_fantasies[::-1]
+ t_batch_size
+ [num_data + sum(q_batch_sizes), d]
),
)
# collapse fantasy base samples
qMS = qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[qExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
inner_mc_samples=[2] * 4,
collapse_fantasy_base_samples=False,
)
q_prime = qMS.get_augmented_q_batch_size(q)
eval_X = torch.rand(
t_batch_size + [q_prime, d], device=self.device, dtype=dtype
)
result = qMS(eval_X)
self.assertEqual(result.shape, torch.Size(t_batch_size))
self.assertEqual(
qMS.samplers[0]._get_batch_range(model.posterior(eval_X)), (-3, -2)
)
# get induced fantasy model, without collapse_fantasy_base_samples
fant_model = qMS.get_induced_fantasy_model(eval_X)
self.assertEqual(
fant_model.train_inputs[0].shape,
torch.Size(
num_fantasies[::-1]
+ t_batch_size
+ [num_data + sum(q_batch_sizes), d]
),
)
# X_pending
X_pending = torch.rand(5, d, device=self.device, dtype=dtype)
qMS = qMultiStepLookahead(
model=model,
batch_sizes=q_batch_sizes,
valfunc_cls=[qExpectedImprovement] * 4,
valfunc_argfacs=[make_best_f] * 4,
num_fantasies=num_fantasies,
inner_mc_samples=[2] * 4,
X_pending=X_pending,
)
q_prime = qMS.get_augmented_q_batch_size(q)
eval_X = torch.rand(
t_batch_size + [q_prime, d], device=self.device, dtype=dtype
)
result = qMS(eval_X)
self.assertEqual(result.shape, torch.Size(t_batch_size))
# add dummy base_weights to samplers
samplers = [
SobolQMCNormalSampler(sample_shape=torch.Size([nf]))
for nf in num_fantasies
]
for s in samplers:
s.base_weights = torch.ones(
s.sample_shape[0], 1, device=self.device, dtype=dtype
)
qMS = qMultiStepLookahead(
model=model,
batch_sizes=[1, 1, 1],
samplers=samplers,
)
q_prime = qMS.get_augmented_q_batch_size(q)
eval_X = torch.rand(
t_batch_size + [q_prime, d], device=self.device, dtype=dtype
)
result = qMS(eval_X)
self.assertEqual(result.shape, torch.Size(t_batch_size))
# extract candidates
cand = qMS.extract_candidates(eval_X)
self.assertEqual(cand.shape, torch.Size(t_batch_size + [q, d]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from copy import deepcopy
from functools import partial
from itertools import product
from math import pi
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.monte_carlo import (
MCAcquisitionFunction,
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
SampleReducingMCAcquisitionFunction,
)
from botorch.acquisition.objective import (
ConstrainedMCObjective,
GenericMCObjective,
IdentityMCObjective,
PosteriorTransform,
ScalarizedPosteriorTransform,
)
from botorch.exceptions import BotorchWarning, UnsupportedError
from botorch.models import SingleTaskGP
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.low_rank import sample_cached_cholesky
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from botorch.utils.transforms import standardize
from torch import Tensor
class DummyMCAcquisitionFunction(MCAcquisitionFunction):
def forward(self, X):
pass
class DummyReducingMCAcquisitionFunction(SampleReducingMCAcquisitionFunction):
def _sample_forward(self, X):
pass
class DummyNonScalarizingPosteriorTransform(PosteriorTransform):
scalarize = False
def evaluate(self, Y):
pass # pragma: no cover
def forward(self, posterior):
pass # pragma: no cover
def infeasible_con(samples: Tensor) -> Tensor:
return torch.ones_like(samples[..., 0])
def feasible_con(samples: Tensor) -> Tensor:
return -torch.ones_like(samples[..., 0])
class TestMCAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
for acqf_class in (MCAcquisitionFunction, SampleReducingMCAcquisitionFunction):
with self.assertRaises(TypeError):
acqf_class()
# raise if model is multi-output, but no outcome transform or objective
# are given
no = "botorch.utils.testing.MockModel.num_outputs"
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
for dummy in (
DummyMCAcquisitionFunction,
DummyReducingMCAcquisitionFunction,
):
with self.assertRaises(UnsupportedError):
dummy(model=mm)
# raise if model is multi-output, but outcome transform does not
# scalarize and no objetive is given
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
ptf = DummyNonScalarizingPosteriorTransform()
with self.assertRaises(UnsupportedError):
dummy(model=mm, posterior_transform=ptf)
class TestQExpectedImprovement(BotorchTestCase):
def test_q_expected_improvement(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, **tkwargs)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
# test initialization
for k in ["objective", "sampler"]:
self.assertIn(k, acqf._modules)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test shifting best_f value
acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
# TODO: Test batched best_f, batched model, batched evaluation
# basic test, no resample
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = torch.zeros(1, 2, 1, **tkwargs)
res = acqf(X)
X2 = torch.zeros(1, 1, 1, **tkwargs, requires_grad=True)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_q_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 2 x 1
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch model, batched best_f values
sampler = IIDNormalSampler(sample_shape=torch.Size([3]))
acqf = qExpectedImprovement(
model=mm, best_f=torch.Tensor([0, 0]), sampler=sampler
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test shifting best_f value
acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 2.0)
self.assertEqual(res[1].item(), 1.0)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQNoisyExpectedImprovement(BotorchTestCase):
def test_q_noisy_expected_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 2 x 1
samples_noisy = torch.tensor([0.0, 1.0], device=self.device, dtype=dtype)
samples_noisy = samples_noisy.view(1, 2, 1)
# X_baseline is `q' x d` = 1 x 1
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
samples_noisy_pending = torch.tensor(
[1.0, 0.0, 0.0], device=self.device, dtype=dtype
)
samples_noisy_pending = samples_noisy_pending.view(1, 3, 1)
mm_noisy_pending = MockModel(MockPosterior(samples=samples_noisy_pending))
acqf = qNoisyExpectedImprovement(
model=mm_noisy_pending,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_q_noisy_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 3 x 1
samples_noisy = torch.zeros(2, 3, 1, device=self.device, dtype=dtype)
samples_noisy[0, -1, 0] = 1.0
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
def test_prune_baseline(self):
no = "botorch.utils.testing.MockModel.num_outputs"
prune = "botorch.acquisition.monte_carlo.prune_inferior_points"
for dtype in (torch.float, torch.double):
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(MockPosterior(samples=X_baseline))
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qNoisyExpectedImprovement(
model=mm,
X_baseline=X_baseline,
prune_baseline=True,
cache_root=False,
)
mock_prune.assert_called_once()
self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qNoisyExpectedImprovement(
model=mm,
X_baseline=X_baseline,
prune_baseline=True,
marginalize_dim=-3,
cache_root=False,
)
_, kwargs = mock_prune.call_args
self.assertEqual(kwargs["marginalize_dim"], -3)
def test_cache_root(self):
sample_cached_path = (
"botorch.acquisition.cached_cholesky.sample_cached_cholesky"
)
raw_state_dict = {
"likelihood.noise_covar.raw_noise": torch.tensor(
[[0.0895], [0.2594]], dtype=torch.float64
),
"mean_module.raw_constant": torch.tensor(
[-0.4545, -0.1285], dtype=torch.float64
),
"covar_module.raw_outputscale": torch.tensor(
[1.4876, 1.4897], dtype=torch.float64
),
"covar_module.base_kernel.raw_lengthscale": torch.tensor(
[[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64
),
}
# test batched models (e.g. for MCMC)
for train_batch_shape, m, dtype in product(
(torch.Size([]), torch.Size([3])), (1, 2), (torch.float, torch.double)
):
state_dict = deepcopy(raw_state_dict)
for k, v in state_dict.items():
if m == 1:
v = v[0]
if len(train_batch_shape) > 0:
v = v.unsqueeze(0).expand(*train_batch_shape, *v.shape)
state_dict[k] = v
tkwargs = {"device": self.device, "dtype": dtype}
if m == 2:
objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
else:
objective = None
for k, v in state_dict.items():
state_dict[k] = v.to(**tkwargs)
all_close_kwargs = (
{
"atol": 1e-1,
"rtol": 0.0,
}
if dtype == torch.float
else {"atol": 1e-4, "rtol": 0.0}
)
torch.manual_seed(1234)
train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs)
train_Y = (
torch.sin(train_X * 2 * pi)
+ torch.randn(*train_batch_shape, 3, 2, **tkwargs)
)[..., :m]
train_Y = standardize(train_Y)
model = SingleTaskGP(
train_X,
train_Y,
)
if len(train_batch_shape) > 0:
X_baseline = train_X[0]
else:
X_baseline = train_X
model.load_state_dict(state_dict, strict=False)
sampler = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0)
torch.manual_seed(0)
acqf = qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler,
objective=objective,
prune_baseline=False,
cache_root=True,
)
orig_base_samples = acqf.base_sampler.base_samples.detach().clone()
sampler2 = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0)
sampler2.base_samples = orig_base_samples
torch.manual_seed(0)
acqf_no_cache = qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler2,
objective=objective,
prune_baseline=False,
cache_root=False,
)
for q, batch_shape in product(
(1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3]))
):
acqf.q_in = -1
acqf_no_cache.q_in = -1
test_X = (
0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs)
).requires_grad_(True)
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val = acqf(test_X)
mock_sample_cached.assert_called_once()
val.sum().backward()
base_samples = acqf.sampler.base_samples.detach().clone()
X_grad = test_X.grad.clone()
test_X2 = test_X.detach().clone().requires_grad_(True)
acqf_no_cache.sampler.base_samples = base_samples
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val2 = acqf_no_cache(test_X2)
mock_sample_cached.assert_not_called()
self.assertAllClose(val, val2, **all_close_kwargs)
val2.sum().backward()
self.assertAllClose(X_grad, test_X2.grad, **all_close_kwargs)
# test we fall back to standard sampling for
# ill-conditioned covariances
acqf._baseline_L = torch.zeros_like(acqf._baseline_L)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
with torch.no_grad():
acqf(test_X)
self.assertEqual(sum(issubclass(w.category, BotorchWarning) for w in ws), 1)
# test w/ posterior transform
X_baseline = torch.rand(2, 1)
model = SingleTaskGP(X_baseline, torch.randn(2, 1))
pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
with mock.patch.object(
qNoisyExpectedImprovement,
"_compute_root_decomposition",
) as mock_cache_root:
acqf = qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=IIDNormalSampler(sample_shape=torch.Size([1])),
posterior_transform=pt,
prune_baseline=False,
cache_root=True,
)
tf_post = model.posterior(X_baseline, posterior_transform=pt)
self.assertTrue(
torch.allclose(
tf_post.mean, mock_cache_root.call_args[-1]["posterior"].mean
)
)
# testing constraints
n, d, m = 8, 1, 3
X_baseline = torch.rand(n, d)
model = SingleTaskGP(X_baseline, torch.randn(n, m)) # batched model
nei_args = {
"model": model,
"X_baseline": X_baseline,
"prune_baseline": False,
"cache_root": True,
"posterior_transform": ScalarizedPosteriorTransform(weights=torch.ones(m)),
"sampler": SobolQMCNormalSampler(sample_shape=torch.Size([5])),
}
acqf = qNoisyExpectedImprovement(**nei_args)
X = torch.randn_like(X_baseline)
for con in [feasible_con, infeasible_con]:
with self.subTest(con=con):
target = "botorch.acquisition.utils.get_infeasible_cost"
infcost = torch.tensor([3], device=self.device, dtype=dtype)
with mock.patch(target, return_value=infcost):
cacqf = qNoisyExpectedImprovement(**nei_args, constraints=[con])
_, obj = cacqf._get_samples_and_objectives(X)
best_feas_f = cacqf.compute_best_f(obj)
if con is feasible_con:
self.assertAllClose(best_feas_f, acqf.compute_best_f(obj))
else:
self.assertAllClose(
best_feas_f, torch.full_like(obj[..., [0]], -infcost.item())
)
# TODO: Test different objectives (incl. constraints)
class TestQProbabilityOfImprovement(BotorchTestCase):
def test_q_probability_of_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = mm._posterior._samples.expand(-1, 2, -1)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_q_probability_of_improvement_batch(self):
# the event shape is `b x q x t` = 2 x 2 x 1
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# test batch model, batched best_f values
sampler = IIDNormalSampler(sample_shape=torch.Size([3]))
acqf = qProbabilityOfImprovement(
model=mm, best_f=torch.Tensor([0, 0]), sampler=sampler
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQSimpleRegret(BotorchTestCase):
def test_q_simple_regret(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = mm._posterior._samples.expand(1, 2, 1)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_q_simple_regret_batch(self):
# the event shape is `b x q x t` = 2 x 2 x 1
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQUpperConfidenceBound(BotorchTestCase):
def test_q_upper_confidence_bound(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = mm._posterior._samples.expand(1, 2, 1)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_q_upper_confidence_bound_batch(self):
# TODO: T41739913 Implement tests for all MCAcquisitionFunctions
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertTrue(torch.equal(acqf.X_pending, X))
mm._posterior._samples = torch.zeros(
2, 4, 1, device=self.device, dtype=dtype
)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
# TODO: Test different objectives (incl. constraints)
class TestMCAcquisitionFunctionWithConstraints(BotorchTestCase):
def test_mc_acquisition_function_with_constraints(self):
for dtype in (torch.float, torch.double):
with self.subTest(dtype=dtype):
num_samples, n, q, d, m = 5, 4, 1, 3, 1
X = torch.randn(n, q, d, device=self.device, dtype=dtype)
samples = torch.randn(
num_samples, n, q, m, device=self.device, dtype=dtype
)
mm = MockModel(MockPosterior(samples=samples))
nei_args = {
"model": mm,
"X_baseline": X,
"prune_baseline": False,
}
for acqf_constructor in [
partial(qProbabilityOfImprovement, model=mm, best_f=0.0),
partial(qExpectedImprovement, model=mm, best_f=0.0),
# cache_root=True not supported by MockModel, see test_cache_root
partial(qNoisyExpectedImprovement, cache_root=False, **nei_args),
partial(qNoisyExpectedImprovement, cache_root=True, **nei_args),
]:
acqf = acqf_constructor()
mm._posterior._samples = (
torch.cat((samples, samples), dim=-2)
if isinstance(acqf, qNoisyExpectedImprovement)
else samples
)
with self.subTest(acqf_class=type(acqf)):
for con in [feasible_con, infeasible_con]:
cacqf = acqf_constructor(constraints=[con])
# for NEI test
target = "botorch.acquisition.utils.get_infeasible_cost"
inf_cost = torch.tensor(3, device=self.device, dtype=dtype)
with mock.patch(target, return_value=inf_cost):
vals = cacqf(X)
# NOTE: this is only true for q = 1
expected_vals = acqf(X) * (con(samples) < 0).squeeze()
self.assertAllClose(vals, expected_vals)
with self.assertRaisesRegex(
ValueError,
"ConstrainedMCObjective as well as constraints passed",
):
acqf_constructor(
constraints=[feasible_con],
objective=ConstrainedMCObjective(
objective=IdentityMCObjective,
constraints=[feasible_con],
),
)
# Forcing negative samples, which will throw an error with simple
# regret because the acquisition utility is negative.
samples = -torch.rand(n, q, m, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
cacqf = qSimpleRegret(model=mm, constraints=[feasible_con])
with self.assertRaisesRegex(
ValueError,
"Constraint-weighting requires unconstrained "
"acquisition values to be non-negative",
):
cacqf(X)
# Test highlighting both common and different behavior of the old
# `ConstrainedMCObjective` and new `constraints` implementation.
# 1. Highlighting difference:
q = 1
samples = torch.randn(n, q, m, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
constrained_objective = ConstrainedMCObjective(
objective=IdentityMCObjective(),
constraints=[infeasible_con],
infeasible_cost=0.0,
)
# The old `ConstrainedMCObjective`-based implementation does not scale
# the best_f value by the feasibility indicator, while the new
# `constraints`-based implementation does. Therefore, the old version
# yields an acquisition value of 1, even though the constraint is not
# satisfied.
best_f = -1.0
old_acqf = qExpectedImprovement(
model=mm, best_f=best_f, objective=constrained_objective
)
new_acqf = qExpectedImprovement(
model=mm, best_f=best_f, constraints=[infeasible_con]
)
old_val = old_acqf(X)
self.assertAllClose(old_val, torch.ones_like(old_val))
new_val = new_acqf(X)
self.assertAllClose(new_val, torch.zeros_like(new_val))
# 2. Highlighting commonality:
# When best_f = 0 and infeasible_cost = 0, both implementations yield
# the same results.
constrained_objective = ConstrainedMCObjective(
objective=IdentityMCObjective(),
constraints=[feasible_con],
infeasible_cost=0.0,
)
best_f = 0.0
old_acqf = qExpectedImprovement(
model=mm, best_f=best_f, objective=constrained_objective
)
new_acqf = qExpectedImprovement(
model=mm, best_f=best_f, constraints=[feasible_con]
)
old_val = old_acqf(X)
new_val = new_acqf(X)
self.assertAllClose(new_val, old_val)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
from unittest import mock
import torch
from botorch.acquisition.cost_aware import InverseCostWeightedUtility
from botorch.acquisition.max_value_entropy_search import (
_sample_max_value_Gumbel,
_sample_max_value_Thompson,
qLowerBoundMaxValueEntropy,
qMaxValueEntropy,
qMultiFidelityLowerBoundMaxValueEntropy,
qMultiFidelityMaxValueEntropy,
)
from botorch.acquisition.objective import (
PosteriorTransform,
ScalarizedPosteriorTransform,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from torch import Tensor
class MESMockModel(MockModel):
r"""Mock object that implements dummy methods and feeds through specified outputs"""
def __init__(self, num_outputs=1, batch_shape=None):
r"""
Args:
num_outputs: The number of outputs.
batch_shape: The batch shape of the model. For details see
`botorch.models.model.Model.batch_shape`.
"""
super().__init__(None)
self._num_outputs = num_outputs
self._batch_shape = torch.Size() if batch_shape is None else batch_shape
def posterior(
self,
X: Tensor,
observation_noise: bool = False,
posterior_transform: Optional[PosteriorTransform] = None,
) -> MockPosterior:
m_shape = X.shape[:-1]
r_shape = list(X.shape[:-2]) + [1, 1]
mvn = MultivariateNormal(
mean=torch.zeros(m_shape, dtype=X.dtype, device=X.device),
covariance_matrix=torch.eye(
m_shape[-1], dtype=X.dtype, device=X.device
).repeat(r_shape),
)
if self.num_outputs > 1:
mvn = mvn = MultitaskMultivariateNormal.from_independent_mvns(
mvns=[mvn] * self.num_outputs
)
posterior = GPyTorchPosterior(mvn)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
def forward(self, X: Tensor) -> MultivariateNormal:
return self.posterior(X).distribution
@property
def batch_shape(self) -> torch.Size:
return self._batch_shape
@property
def num_outputs(self) -> int:
return self._num_outputs
class NoBatchShapeMESMockModel(MESMockModel):
# For some reason it's really hard to mock this property to raise a
# NotImplementedError, so let's just make a class for it.
@property
def batch_shape(self) -> torch.Size:
raise NotImplementedError
class TestMaxValueEntropySearch(BotorchTestCase):
def test_q_max_value_entropy(self):
for dtype in (torch.float, torch.double):
torch.manual_seed(7)
mm = MESMockModel()
with self.assertRaises(TypeError):
qMaxValueEntropy(mm)
candidate_set = torch.rand(1000, 2, device=self.device, dtype=dtype)
# test error in case of batch GP model
mm = MESMockModel(batch_shape=torch.Size([2]))
with self.assertRaises(NotImplementedError):
qMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
mm = MESMockModel()
train_inputs = torch.rand(5, 10, 2, device=self.device, dtype=dtype)
with self.assertRaises(NotImplementedError):
qMaxValueEntropy(
mm, candidate_set, num_mv_samples=10, train_inputs=train_inputs
)
# test that init works if batch_shape is not implemented on the model
mm = NoBatchShapeMESMockModel()
qMaxValueEntropy(
mm,
candidate_set,
num_mv_samples=10,
)
# test error when number of outputs > 1 and no transform is given.
mm = MESMockModel()
mm._num_outputs = 2
with self.assertRaises(UnsupportedError):
qMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
# test with X_pending is None
mm = MESMockModel()
train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype)
mm.train_inputs = (train_inputs,)
qMVE = qMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
# test initialization
self.assertEqual(qMVE.num_fantasies, 16)
self.assertEqual(qMVE.num_mv_samples, 10)
self.assertIsInstance(qMVE.sampler, SobolQMCNormalSampler)
self.assertEqual(qMVE.sampler.sample_shape, torch.Size([128]))
self.assertIsInstance(qMVE.fantasies_sampler, SobolQMCNormalSampler)
self.assertEqual(qMVE.fantasies_sampler.sample_shape, torch.Size([16]))
self.assertEqual(qMVE.use_gumbel, True)
self.assertEqual(qMVE.posterior_max_values.shape, torch.Size([10, 1]))
# test evaluation
X = torch.rand(1, 2, device=self.device, dtype=dtype)
self.assertEqual(qMVE(X).shape, torch.Size([1]))
# test set X pending to None in case of _init_model exists
qMVE.set_X_pending(None)
self.assertEqual(qMVE.model, qMVE._init_model)
# test with use_gumbel = False
qMVE = qMaxValueEntropy(
mm, candidate_set, num_mv_samples=10, use_gumbel=False
)
self.assertEqual(qMVE(X).shape, torch.Size([1]))
# test with X_pending is not None
with mock.patch.object(
MESMockModel, "fantasize", return_value=mm
) as patch_f:
qMVE = qMaxValueEntropy(
mm,
candidate_set,
num_mv_samples=10,
X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
)
patch_f.assert_called_once()
# Test with multi-output model w/ transform.
mm = MESMockModel(num_outputs=2)
pt = ScalarizedPosteriorTransform(
weights=torch.ones(2, device=self.device, dtype=dtype)
)
for gumbel in (True, False):
qMVE = qMaxValueEntropy(
mm,
candidate_set,
num_mv_samples=10,
use_gumbel=gumbel,
posterior_transform=pt,
)
self.assertEqual(qMVE(X).shape, torch.Size([1]))
def test_q_lower_bound_max_value_entropy(self):
for dtype in (torch.float, torch.double):
torch.manual_seed(7)
mm = MESMockModel()
with self.assertRaises(TypeError):
qLowerBoundMaxValueEntropy(mm)
candidate_set = torch.rand(1000, 2, device=self.device, dtype=dtype)
# test error in case of batch GP model
# train_inputs = torch.rand(5, 10, 2, device=self.device, dtype=dtype)
# mm.train_inputs = (train_inputs,)
mm = MESMockModel(batch_shape=torch.Size([2]))
with self.assertRaises(NotImplementedError):
qLowerBoundMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
# test error when number of outputs > 1 and no transform
mm = MESMockModel()
mm._num_outputs = 2
with self.assertRaises(UnsupportedError):
qLowerBoundMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
mm._num_outputs = 1
# test with X_pending is None
mm = MESMockModel()
train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype)
mm.train_inputs = (train_inputs,)
qGIBBON = qLowerBoundMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
# test initialization
self.assertEqual(qGIBBON.num_mv_samples, 10)
self.assertEqual(qGIBBON.use_gumbel, True)
self.assertEqual(qGIBBON.posterior_max_values.shape, torch.Size([10, 1]))
# test evaluation
X = torch.rand(1, 2, device=self.device, dtype=dtype)
self.assertEqual(qGIBBON(X).shape, torch.Size([1]))
# test with use_gumbel = False
qGIBBON = qLowerBoundMaxValueEntropy(
mm, candidate_set, num_mv_samples=10, use_gumbel=False
)
self.assertEqual(qGIBBON(X).shape, torch.Size([1]))
# test with X_pending is not None
qGIBBON = qLowerBoundMaxValueEntropy(
mm,
candidate_set,
num_mv_samples=10,
use_gumbel=False,
X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
)
self.assertEqual(qGIBBON(X).shape, torch.Size([1]))
# Test with multi-output model w/ transform.
mm = MESMockModel(num_outputs=2)
pt = ScalarizedPosteriorTransform(
weights=torch.ones(2, device=self.device, dtype=dtype)
)
qGIBBON = qLowerBoundMaxValueEntropy(
mm,
candidate_set,
num_mv_samples=10,
use_gumbel=False,
X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
posterior_transform=pt,
)
with self.assertRaisesRegex(UnsupportedError, "X_pending is not None"):
qGIBBON(X)
def test_q_multi_fidelity_max_value_entropy(
self, acqf_class=qMultiFidelityMaxValueEntropy
):
for dtype in (torch.float, torch.double):
torch.manual_seed(7)
mm = MESMockModel()
train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype)
mm.train_inputs = (train_inputs,)
candidate_set = torch.rand(10, 2, device=self.device, dtype=dtype)
qMF_MVE = acqf_class(
model=mm, candidate_set=candidate_set, num_mv_samples=10
)
# test initialization
self.assertEqual(qMF_MVE.num_fantasies, 16)
self.assertEqual(qMF_MVE.num_mv_samples, 10)
self.assertIsInstance(qMF_MVE.sampler, SobolQMCNormalSampler)
self.assertIsInstance(qMF_MVE.cost_sampler, SobolQMCNormalSampler)
self.assertEqual(qMF_MVE.sampler.sample_shape, torch.Size([128]))
self.assertIsInstance(qMF_MVE.fantasies_sampler, SobolQMCNormalSampler)
self.assertEqual(qMF_MVE.fantasies_sampler.sample_shape, torch.Size([16]))
self.assertIsInstance(qMF_MVE.expand, Callable)
self.assertIsInstance(qMF_MVE.project, Callable)
self.assertIsNone(qMF_MVE.X_pending)
self.assertEqual(qMF_MVE.posterior_max_values.shape, torch.Size([10, 1]))
self.assertIsInstance(
qMF_MVE.cost_aware_utility, InverseCostWeightedUtility
)
# test evaluation
X = torch.rand(1, 2, device=self.device, dtype=dtype)
self.assertEqual(qMF_MVE(X).shape, torch.Size([1]))
# Test with multi-output model w/ transform.
mm = MESMockModel(num_outputs=2)
pt = ScalarizedPosteriorTransform(
weights=torch.ones(2, device=self.device, dtype=dtype)
)
qMF_MVE = acqf_class(
model=mm,
candidate_set=candidate_set,
num_mv_samples=10,
posterior_transform=pt,
)
X = torch.rand(1, 2, device=self.device, dtype=dtype)
self.assertEqual(qMF_MVE(X).shape, torch.Size([1]))
def test_q_multi_fidelity_lower_bound_max_value_entropy(self):
# Same test as for MF-MES since GIBBON only changes in the way it computes the
# information gain.
self.test_q_multi_fidelity_max_value_entropy(
acqf_class=qMultiFidelityLowerBoundMaxValueEntropy
)
def test_sample_max_value_Gumbel(self):
for dtype in (torch.float, torch.double):
torch.manual_seed(7)
mm = MESMockModel()
candidate_set = torch.rand(3, 10, 2, device=self.device, dtype=dtype)
samples = _sample_max_value_Gumbel(mm, candidate_set, 5)
self.assertEqual(samples.shape, torch.Size([5, 3]))
# Test with multi-output model w/ transform.
mm = MESMockModel(num_outputs=2)
pt = ScalarizedPosteriorTransform(
weights=torch.ones(2, device=self.device, dtype=dtype)
)
samples = _sample_max_value_Gumbel(
mm, candidate_set, 5, posterior_transform=pt
)
self.assertEqual(samples.shape, torch.Size([5, 3]))
def test_sample_max_value_Thompson(self):
for dtype in (torch.float, torch.double):
torch.manual_seed(7)
mm = MESMockModel()
candidate_set = torch.rand(3, 10, 2, device=self.device, dtype=dtype)
samples = _sample_max_value_Thompson(mm, candidate_set, 5)
self.assertEqual(samples.shape, torch.Size([5, 3]))
# Test with multi-output model w/ transform.
mm = MESMockModel(num_outputs=2)
pt = ScalarizedPosteriorTransform(
weights=torch.ones(2, device=self.device, dtype=dtype)
)
samples = _sample_max_value_Thompson(
mm, candidate_set, 5, posterior_transform=pt
)
self.assertEqual(samples.shape, torch.Size([5, 3]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import torch
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.acquisition.prior_guided import PriorGuidedAcquisitionFunction
from botorch.models import SingleTaskGP
from botorch.utils.testing import BotorchTestCase
from botorch.utils.transforms import match_batch_shape
from torch.nn import Module
class DummyPrior(Module):
def forward(self, X):
p = torch.distributions.Normal(0, 1)
# sum over d dimensions
return p.log_prob(X).sum(dim=-1).exp()
def get_val_prob(test_X, test_X_exp, af, prior):
with torch.no_grad():
val = af(test_X)
prob = prior(test_X_exp)
return val, prob
def get_weighted_val(ei_val, prob, exponent, use_log):
if use_log:
return prob * exponent + ei_val
return prob.pow(exponent) * ei_val
class TestPriorGuidedAcquisitionFunction(BotorchTestCase):
def setUp(self):
super().setUp()
self.prior = DummyPrior()
self.train_X = torch.rand(5, 3, dtype=torch.double, device=self.device)
self.train_Y = self.train_X.norm(dim=-1, keepdim=True)
def test_prior_guided_analytic_acquisition_function(self):
for dtype in (torch.float, torch.double):
model = SingleTaskGP(
self.train_X.to(dtype=dtype), self.train_Y.to(dtype=dtype)
)
ei = ExpectedImprovement(model, best_f=0.0)
for batch_shape, use_log, exponent in product(
([], [2]),
(False, True),
(1.0, 2.0),
):
af = PriorGuidedAcquisitionFunction(
acq_function=ei,
prior_module=self.prior,
log=use_log,
prior_exponent=exponent,
)
test_X = torch.rand(*batch_shape, 1, 3, dtype=dtype, device=self.device)
test_X_exp = test_X.unsqueeze(0) if batch_shape == [] else test_X
with torch.no_grad():
ei_val = ei(test_X_exp).unsqueeze(-1)
val, prob = get_val_prob(test_X, test_X_exp, af, self.prior)
weighted_val = get_weighted_val(ei_val, prob, exponent, use_log)
expected_val = weighted_val.squeeze(-1)
self.assertTrue(torch.allclose(val, expected_val))
# test that q>1 and a non SampleReducing AF raises an exception
msg = (
"q-batches with q>1 are only supported using "
"SampleReducingMCAcquisitionFunction."
)
test_X = torch.rand(2, 3, dtype=dtype, device=self.device)
with self.assertRaisesRegex(NotImplementedError, msg):
af(test_X)
def test_prior_guided_mc_acquisition_function(self):
for dtype in (torch.float, torch.double):
model = SingleTaskGP(
self.train_X.to(dtype=dtype), self.train_Y.to(dtype=dtype)
)
ei = qExpectedImprovement(model, best_f=0.0)
for batch_shape, q, use_log, exponent in product(
([], [2]),
(1, 2),
(False, True),
(1.0, 2.0),
):
af = PriorGuidedAcquisitionFunction(
acq_function=ei,
prior_module=self.prior,
log=use_log,
prior_exponent=exponent,
)
test_X = torch.rand(*batch_shape, q, 3, dtype=dtype, device=self.device)
test_X_exp = test_X.unsqueeze(0) if batch_shape == [] else test_X
val, prob = get_val_prob(test_X, test_X_exp, af, self.prior)
ei_val = ei._non_reduced_forward(test_X_exp)
weighted_val = get_weighted_val(ei_val, prob, exponent, use_log)
expected_val = ei._sample_reduction(ei._q_reduction(weighted_val))
self.assertTrue(torch.allclose(val, expected_val))
# test set_X_pending
X_pending = torch.rand(2, 3, dtype=dtype, device=self.device)
af.X_pending = X_pending
self.assertTrue(torch.equal(X_pending, af.X_pending))
# unsqueeze batch dim
test_X_exp_with_pending = torch.cat(
[test_X_exp, match_batch_shape(X_pending, test_X_exp)], dim=-2
)
with torch.no_grad():
val = af(test_X)
prob = self.prior(test_X_exp_with_pending)
ei_val = ei._non_reduced_forward(test_X_exp_with_pending)
if use_log:
weighted_val = prob * exponent + ei_val
else:
weighted_val = prob.pow(exponent) * ei_val
expected_val = ei._sample_reduction(ei._q_reduction(weighted_val))
self.assertTrue(torch.equal(val, expected_val))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from copy import deepcopy
from itertools import product
from math import pi
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition import (
LogImprovementMCAcquisitionFunction,
qLogExpectedImprovement,
qLogNoisyExpectedImprovement,
)
from botorch.acquisition.input_constructors import ACQF_INPUT_CONSTRUCTOR_REGISTRY
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
)
from botorch.acquisition.objective import (
ConstrainedMCObjective,
GenericMCObjective,
IdentityMCObjective,
PosteriorTransform,
ScalarizedPosteriorTransform,
)
from botorch.exceptions import BotorchWarning, UnsupportedError
from botorch.exceptions.errors import BotorchError
from botorch.models import SingleTaskGP
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.low_rank import sample_cached_cholesky
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from botorch.utils.transforms import standardize
from torch import Tensor
def infeasible_con(samples: Tensor) -> Tensor:
return torch.ones_like(samples[..., 0])
def feasible_con(samples: Tensor) -> Tensor:
return -torch.ones_like(samples[..., 0])
class DummyLogImprovementAcquisitionFunction(LogImprovementMCAcquisitionFunction):
def _sample_forward(self, X):
pass
class DummyNonScalarizingPosteriorTransform(PosteriorTransform):
scalarize = False
def evaluate(self, Y):
pass # pragma: no cover
def forward(self, posterior):
pass # pragma: no cover
class TestLogImprovementAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
LogImprovementMCAcquisitionFunction()
# raise if model is multi-output, but no outcome transform or objective
# are given
no = "botorch.utils.testing.MockModel.num_outputs"
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
with self.assertRaises(UnsupportedError):
DummyLogImprovementAcquisitionFunction(model=mm)
# raise if model is multi-output, but outcome transform does not
# scalarize and no objetive is given
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
ptf = DummyNonScalarizingPosteriorTransform()
with self.assertRaises(UnsupportedError):
DummyLogImprovementAcquisitionFunction(
model=mm, posterior_transform=ptf
)
mm = MockModel(MockPosterior())
objective = ConstrainedMCObjective(
IdentityMCObjective(),
constraints=[lambda samples: torch.zeros_like(samples[..., 0])],
)
with self.assertRaisesRegex(
BotorchError,
"Log-Improvement should not be used with `ConstrainedMCObjective`.",
):
DummyLogImprovementAcquisitionFunction(model=mm, objective=objective)
class TestQLogExpectedImprovement(BotorchTestCase):
def test_q_log_expected_improvement(self):
self.assertIn(qLogExpectedImprovement, ACQF_INPUT_CONSTRUCTOR_REGISTRY.keys())
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, **tkwargs)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
log_acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler)
self.assertFalse(acqf._fat) # different default behavior
self.assertTrue(log_acqf._fat)
# test initialization
for k in ["objective", "sampler"]:
self.assertIn(k, acqf._modules)
self.assertIn(k, log_acqf._modules)
res = acqf(X).item()
self.assertEqual(res, 0.0)
exp_log_res = log_acqf(X).exp().item()
# Due to the smooth approximation, the value at zero should be close to, but
# not exactly zero, and upper-bounded by the tau hyperparameter.
self.assertTrue(0 < exp_log_res)
self.assertTrue(exp_log_res <= log_acqf.tau_relu)
# test shifting best_f value downward to see non-zero improvement
best_f = -1
acqf = qExpectedImprovement(model=mm, best_f=best_f, sampler=sampler)
log_acqf = qLogExpectedImprovement(model=mm, best_f=best_f, sampler=sampler)
res, exp_log_res = acqf(X), log_acqf(X).exp()
expected_val = -best_f
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, self.device.type)
self.assertEqual(res.item(), expected_val)
# Further away from zero, the value is numerically indistinguishable with
# single precision arithmetic.
self.assertEqual(exp_log_res.dtype, dtype)
self.assertEqual(exp_log_res.device.type, self.device.type)
self.assertTrue(expected_val <= exp_log_res.item())
self.assertTrue(exp_log_res.item() <= expected_val + log_acqf.tau_relu)
# test shifting best_f value upward to see advantage of LogEI
best_f = 1
acqf = qExpectedImprovement(model=mm, best_f=best_f, sampler=sampler)
log_acqf = qLogExpectedImprovement(model=mm, best_f=best_f, sampler=sampler)
res, log_res = acqf(X), log_acqf(X)
exp_log_res = log_res.exp()
expected_val = 0
self.assertEqual(res.item(), expected_val)
self.assertTrue(expected_val <= exp_log_res.item())
self.assertTrue(exp_log_res.item() <= expected_val + log_acqf.tau_relu)
# However, the log value is large and negative with non-vanishing gradients
self.assertGreater(-1, log_res.item())
self.assertGreater(log_res.item(), -100)
# NOTE: The following tests are adapted from the qEI tests.
# basic test, no resample
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertTrue(0 < res.exp().item())
self.assertTrue(res.exp().item() < acqf.tau_relu)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertTrue(0 < res.exp().item())
self.assertTrue(res.exp().item() < acqf.tau_relu)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = torch.zeros(1, 2, 1, **tkwargs)
res = acqf(X)
X2 = torch.zeros(1, 1, 1, **tkwargs, requires_grad=True)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
# testing with illegal taus
with self.assertRaisesRegex(ValueError, "tau_max is not a scalar:"):
qLogExpectedImprovement(
model=mm, best_f=0, tau_max=torch.tensor([1, 2])
)
with self.assertRaisesRegex(ValueError, "tau_relu is non-positive:"):
qLogExpectedImprovement(model=mm, best_f=0, tau_relu=-2)
def test_q_log_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 2 x 1
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler)
exp_log_res = acqf(X).exp()
# with no approximations (qEI): self.assertEqual(res[0].item(), 1.0)
# in the batch case, the values get adjusted toward
self.assertEqual(exp_log_res.dtype, dtype)
self.assertEqual(exp_log_res.device.type, self.device.type)
self.assertTrue(1.0 <= exp_log_res[0].item())
self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu)
# self.assertAllClose(exp_log_res[0], torch.ones_like(exp_log_res[0]), )
# with no approximations (qEI): self.assertEqual(res[1].item(), 0.0)
self.assertTrue(0 < exp_log_res[1].item())
self.assertTrue(exp_log_res[1].item() <= acqf.tau_relu)
# test batch model, batched best_f values
sampler = IIDNormalSampler(sample_shape=torch.Size([3]))
acqf = qLogExpectedImprovement(
model=mm, best_f=torch.Tensor([0, 0]), sampler=sampler
)
exp_log_res = acqf(X).exp()
# with no approximations (qEI): self.assertEqual(res[0].item(), 1.0)
self.assertTrue(1.0 <= exp_log_res[0].item())
self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu)
# with no approximations (qEI): self.assertEqual(res[1].item(), 0.0)
self.assertTrue(0 < exp_log_res[1].item())
self.assertTrue(exp_log_res[1].item() <= acqf.tau_relu)
# test shifting best_f value
acqf = qLogExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
exp_log_res = acqf(X).exp()
# with no approximations (qEI): self.assertEqual(res[0].item(), 2.0)
# TODO: figure out numerically stable tests and principled tolerances
# With q > 1, maximum value can get moved down due to L_q-norm approximation
# of the maximum over the q-batch.
safe_upper_lower_bound = 1.999
self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item())
self.assertTrue(exp_log_res[0].item() <= 2.0 + acqf.tau_relu + acqf.tau_max)
# with no approximations (qEI): self.assertEqual(res[1].item(), 1.0)
self.assertTrue(1.0 <= exp_log_res[1].item())
# ocurring ~tau_max error when all candidates in a q-batch have the
# acquisition value
self.assertTrue(exp_log_res[1].item() <= 1.0 + acqf.tau_relu + acqf.tau_max)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler)
# res = acqf(X) # 1-dim batch
exp_log_res = acqf(X).exp() # 1-dim batch
# with no approximations (qEI): self.assertEqual(res[0].item(), 1.0)
safe_upper_lower_bound = 0.999
self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item())
self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu)
# with no approximations (qEI): self.assertEqual(res[1].item(), 0.0)
self.assertTrue(0.0 <= exp_log_res[1].item())
self.assertTrue(exp_log_res[1].item() <= 0.0 + acqf.tau_relu)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
exp_log_res = acqf(X.expand(2, 2, 1)).exp() # 2-dim batch
# self.assertEqual(res[0].item(), 1.0)
safe_upper_lower_bound = 0.999
self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item())
self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu)
# self.assertEqual(res[1].item(), 0.0)
self.assertTrue(0.0 <= exp_log_res[1].item())
self.assertTrue(exp_log_res[1].item() <= 0.0 + acqf.tau_relu)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler)
exp_log_res = acqf(X).exp()
# self.assertEqual(res[0].item(), 1.0)
safe_upper_lower_bound = 0.999
self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item())
self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu)
# self.assertEqual(res[1].item(), 0.0)
self.assertTrue(0.0 <= exp_log_res[1].item())
self.assertTrue(exp_log_res[1].item() <= 0.0 + acqf.tau_relu)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# # TODO: Test different objectives (incl. constraints)
class TestQLogNoisyExpectedImprovement(BotorchTestCase):
def test_q_log_noisy_expected_improvement(self):
self.assertIn(
qLogNoisyExpectedImprovement, ACQF_INPUT_CONSTRUCTOR_REGISTRY.keys()
)
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 2 x 1
samples_noisy = torch.tensor([0.0, 1.0], device=self.device, dtype=dtype)
samples_noisy = samples_noisy.view(1, 2, 1)
# X_baseline is `q' x d` = 1 x 1
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
kwargs = {
"model": mm_noisy,
"X_baseline": X_baseline,
"sampler": sampler,
"prune_baseline": False,
"cache_root": False,
}
acqf = qNoisyExpectedImprovement(**kwargs)
log_acqf = qLogNoisyExpectedImprovement(**kwargs)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
log_res = log_acqf(X)
self.assertEqual(log_res.dtype, dtype)
self.assertEqual(log_res.device.type, self.device.type)
self.assertAllClose(log_res.exp().item(), 1.0)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
kwargs = {
"model": mm_noisy,
"X_baseline": X_baseline,
"sampler": sampler,
"prune_baseline": False,
"cache_root": False,
}
log_acqf = qLogNoisyExpectedImprovement(**kwargs)
log_res = log_acqf(X)
self.assertEqual(log_res.exp().item(), 1.0)
self.assertEqual(
log_acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])
)
bs = log_acqf.sampler.base_samples.clone()
log_acqf(X)
self.assertTrue(torch.equal(log_acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
kwargs = {
"model": mm_noisy,
"X_baseline": X_baseline,
"sampler": sampler,
"prune_baseline": False,
"cache_root": False,
}
log_acqf = qLogNoisyExpectedImprovement(**kwargs)
log_res = log_acqf(X)
self.assertEqual(log_res.exp().item(), 1.0)
self.assertEqual(
log_acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])
)
bs = log_acqf.sampler.base_samples.clone()
log_acqf(X)
self.assertTrue(torch.equal(log_acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
samples_noisy_pending = torch.tensor(
[1.0, 0.0, 0.0], device=self.device, dtype=dtype
)
samples_noisy_pending = samples_noisy_pending.view(1, 3, 1)
mm_noisy_pending = MockModel(MockPosterior(samples=samples_noisy_pending))
kwargs = {
"model": mm_noisy_pending,
"X_baseline": X_baseline,
"sampler": sampler,
"prune_baseline": False,
"cache_root": False,
}
# copy for log version
log_acqf = qLogNoisyExpectedImprovement(**kwargs)
log_acqf.set_X_pending()
self.assertIsNone(log_acqf.X_pending)
log_acqf.set_X_pending(None)
self.assertIsNone(log_acqf.X_pending)
log_acqf.set_X_pending(X)
self.assertEqual(log_acqf.X_pending, X)
log_acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
log_acqf.set_X_pending(X2)
self.assertEqual(log_acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_q_noisy_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 3 x 1
samples_noisy = torch.zeros(2, 3, 1, device=self.device, dtype=dtype)
samples_noisy[0, -1, 0] = 1.0
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
kwargs = {
"model": mm_noisy,
"X_baseline": X_baseline,
"sampler": sampler,
"prune_baseline": False,
"cache_root": False,
}
acqf = qLogNoisyExpectedImprovement(**kwargs)
res = acqf(X).exp()
expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device)
self.assertAllClose(res, expected_res, atol=acqf.tau_relu)
self.assertGreater(res[1].item(), 0.0)
self.assertGreater(acqf.tau_relu, res[1].item())
# test batch mode
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qLogNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X).exp() # 1-dim batch
expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device)
self.assertAllClose(res, expected_res, atol=acqf.tau_relu)
self.assertGreater(res[1].item(), 0.0)
self.assertGreater(acqf.tau_relu, res[1].item())
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)).exp() # 2-dim batch
expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device)
self.assertAllClose(res, expected_res, atol=acqf.tau_relu)
self.assertGreater(res[1].item(), 0.0)
self.assertGreater(acqf.tau_relu, res[1].item())
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qLogNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=False,
)
res = acqf(X).exp()
expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device)
self.assertAllClose(res, expected_res, atol=acqf.tau_relu)
self.assertGreater(res[1].item(), 0.0)
self.assertGreater(acqf.tau_relu, res[1].item())
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
def test_prune_baseline(self):
no = "botorch.utils.testing.MockModel.num_outputs"
prune = "botorch.acquisition.logei.prune_inferior_points"
for dtype in (torch.float, torch.double):
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(MockPosterior(samples=X_baseline))
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qLogNoisyExpectedImprovement(
model=mm,
X_baseline=X_baseline,
prune_baseline=True,
cache_root=False,
)
mock_prune.assert_called_once()
self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qLogNoisyExpectedImprovement(
model=mm,
X_baseline=X_baseline,
prune_baseline=True,
marginalize_dim=-3,
cache_root=False,
)
_, kwargs = mock_prune.call_args
self.assertEqual(kwargs["marginalize_dim"], -3)
def test_cache_root(self):
sample_cached_path = (
"botorch.acquisition.cached_cholesky.sample_cached_cholesky"
)
raw_state_dict = {
"likelihood.noise_covar.raw_noise": torch.tensor(
[[0.0895], [0.2594]], dtype=torch.float64
),
"mean_module.raw_constant": torch.tensor(
[-0.4545, -0.1285], dtype=torch.float64
),
"covar_module.raw_outputscale": torch.tensor(
[1.4876, 1.4897], dtype=torch.float64
),
"covar_module.base_kernel.raw_lengthscale": torch.tensor(
[[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64
),
}
# test batched models (e.g. for MCMC)
for train_batch_shape, m, dtype in product(
(torch.Size([]), torch.Size([3])), (1, 2), (torch.float, torch.double)
):
state_dict = deepcopy(raw_state_dict)
for k, v in state_dict.items():
if m == 1:
v = v[0]
if len(train_batch_shape) > 0:
v = v.unsqueeze(0).expand(*train_batch_shape, *v.shape)
state_dict[k] = v
tkwargs = {"device": self.device, "dtype": dtype}
if m == 2:
objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
else:
objective = None
for k, v in state_dict.items():
state_dict[k] = v.to(**tkwargs)
all_close_kwargs = (
{
"atol": 1e-1,
"rtol": 0.0,
}
if dtype == torch.float
else {"atol": 1e-4, "rtol": 0.0}
)
torch.manual_seed(1234)
train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs)
train_Y = (
torch.sin(train_X * 2 * pi)
+ torch.randn(*train_batch_shape, 3, 2, **tkwargs)
)[..., :m]
train_Y = standardize(train_Y)
model = SingleTaskGP(
train_X,
train_Y,
)
if len(train_batch_shape) > 0:
X_baseline = train_X[0]
else:
X_baseline = train_X
model.load_state_dict(state_dict, strict=False)
sampler = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0)
torch.manual_seed(0)
acqf = qLogNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler,
objective=objective,
prune_baseline=False,
cache_root=True,
)
orig_base_samples = acqf.base_sampler.base_samples.detach().clone()
sampler2 = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0)
sampler2.base_samples = orig_base_samples
torch.manual_seed(0)
acqf_no_cache = qLogNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler2,
objective=objective,
prune_baseline=False,
cache_root=False,
)
for q, batch_shape in product(
(1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3]))
):
acqf.q_in = -1
acqf_no_cache.q_in = -1
test_X = (
0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs)
).requires_grad_(True)
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val = acqf(test_X).exp()
mock_sample_cached.assert_called_once()
val.sum().backward()
base_samples = acqf.sampler.base_samples.detach().clone()
X_grad = test_X.grad.clone()
test_X2 = test_X.detach().clone().requires_grad_(True)
acqf_no_cache.sampler.base_samples = base_samples
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val2 = acqf_no_cache(test_X2).exp()
mock_sample_cached.assert_not_called()
self.assertAllClose(val, val2, **all_close_kwargs)
val2.sum().backward()
self.assertAllClose(X_grad, test_X2.grad, **all_close_kwargs)
# test we fall back to standard sampling for
# ill-conditioned covariances
acqf._baseline_L = torch.zeros_like(acqf._baseline_L)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
with torch.no_grad():
acqf(test_X)
self.assertEqual(sum(issubclass(w.category, BotorchWarning) for w in ws), 1)
# test w/ posterior transform
X_baseline = torch.rand(2, 1)
model = SingleTaskGP(X_baseline, torch.randn(2, 1))
pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
with mock.patch.object(
qLogNoisyExpectedImprovement,
"_compute_root_decomposition",
) as mock_cache_root:
acqf = qLogNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=IIDNormalSampler(sample_shape=torch.Size([1])),
posterior_transform=pt,
prune_baseline=False,
cache_root=True,
)
tf_post = model.posterior(X_baseline, posterior_transform=pt)
self.assertTrue(
torch.allclose(
tf_post.mean, mock_cache_root.call_args[-1]["posterior"].mean
)
)
# testing constraints
n, d, m = 8, 1, 3
X_baseline = torch.rand(n, d)
model = SingleTaskGP(X_baseline, torch.randn(n, m)) # batched model
nei_args = {
"model": model,
"X_baseline": X_baseline,
"prune_baseline": False,
"cache_root": True,
"posterior_transform": ScalarizedPosteriorTransform(weights=torch.ones(m)),
"sampler": SobolQMCNormalSampler(torch.Size([5])),
}
acqf = qLogNoisyExpectedImprovement(**nei_args)
X = torch.randn_like(X_baseline)
for con in [feasible_con, infeasible_con]:
with self.subTest(con=con):
target = "botorch.acquisition.utils.get_infeasible_cost"
infcost = torch.tensor([3], device=self.device, dtype=dtype)
with mock.patch(target, return_value=infcost):
cacqf = qLogNoisyExpectedImprovement(**nei_args, constraints=[con])
_, obj = cacqf._get_samples_and_objectives(X)
best_feas_f = cacqf.compute_best_f(obj)
if con is feasible_con:
self.assertAllClose(best_feas_f, acqf.compute_best_f(obj))
else:
self.assertAllClose(
best_feas_f, torch.full_like(obj[..., [0]], -infcost.item())
)
# TODO: Test different objectives (incl. constraints)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from botorch import settings
from botorch.acquisition.decoupled import DecoupledAcquisitionFunction
from botorch.exceptions import BotorchTensorDimensionError, BotorchWarning
from botorch.logging import shape_to_str
from botorch.models import ModelListGP, SingleTaskGP
from botorch.utils.testing import BotorchTestCase
class DummyDecoupledAcquisitionFunction(DecoupledAcquisitionFunction):
def forward(self, X):
pass
class TestDecoupledAcquisitionFunction(BotorchTestCase):
def test_decoupled_acquisition_function(self):
msg = (
"Can't instantiate abstract class DecoupledAcquisitionFunction"
" with abstract method forward"
)
with self.assertRaisesRegex(TypeError, msg):
DecoupledAcquisitionFunction()
# test raises error if model is not ModelList
msg = "DummyDecoupledAcquisitionFunction requires using a ModelList."
model = SingleTaskGP(
torch.rand(1, 3, device=self.device), torch.rand(1, 2, device=self.device)
)
with self.assertRaisesRegex(ValueError, msg):
DummyDecoupledAcquisitionFunction(model=model)
m = SingleTaskGP(
torch.rand(1, 3, device=self.device), torch.rand(1, 1, device=self.device)
)
model = ModelListGP(m, m)
# basic test
af = DummyDecoupledAcquisitionFunction(model=model)
self.assertIs(af.model, model)
self.assertIsNone(af.X_evaluation_mask)
self.assertIsNone(af.X_pending)
# test set X_evaluation_mask
# test wrong number of outputs
eval_mask = torch.randint(0, 2, (2, 3), device=self.device).bool()
msg = (
"Expected X_evaluation_mask to be `q x m`, but got shape"
f" {shape_to_str(eval_mask.shape)}."
)
with self.assertRaisesRegex(BotorchTensorDimensionError, msg):
af.X_evaluation_mask = eval_mask
# test more than 2 dimensions
eval_mask.unsqueeze_(0)
msg = (
"Expected X_evaluation_mask to be `q x m`, but got shape"
f" {shape_to_str(eval_mask.shape)}."
)
with self.assertRaisesRegex(BotorchTensorDimensionError, msg):
af.X_evaluation_mask = eval_mask
# set eval_mask
eval_mask = eval_mask[0, :, :2]
af.X_evaluation_mask = eval_mask
self.assertIs(af.X_evaluation_mask, eval_mask)
# test set_X_pending
X_pending = torch.rand(1, 1, device=self.device)
msg = (
"If `self.X_evaluation_mask` is not None, then "
"`X_pending_evaluation_mask` must be provided."
)
with self.assertRaisesRegex(ValueError, msg):
af.set_X_pending(X_pending=X_pending)
af.X_evaluation_mask = None
X_pending = X_pending.requires_grad_(True)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
af.set_X_pending(X_pending)
self.assertEqual(af.X_pending, X_pending)
self.assertEqual(sum(issubclass(w.category, BotorchWarning) for w in ws), 1)
self.assertIsNone(af.X_evaluation_mask)
# test setting X_pending with X_pending_evaluation_mask
X_pending = torch.rand(3, 1, device=self.device)
# test raises exception
# wrong number of outputs, wrong number of dims, wrong number of rows
for shape in ([3, 1], [1, 3, 2], [1, 2]):
eval_mask = torch.randint(0, 2, shape, device=self.device).bool()
msg = (
f"Expected `X_pending_evaluation_mask` of shape `{X_pending.shape[0]} "
f"x {model.num_outputs}`, but got "
f"{shape_to_str(eval_mask.shape)}."
)
with self.assertRaisesRegex(BotorchTensorDimensionError, msg):
af.set_X_pending(
X_pending=X_pending, X_pending_evaluation_mask=eval_mask
)
eval_mask = torch.randint(0, 2, (3, 2), device=self.device).bool()
af.set_X_pending(X_pending=X_pending, X_pending_evaluation_mask=eval_mask)
self.assertTrue(torch.equal(af.X_pending, X_pending))
self.assertIs(af.X_pending_evaluation_mask, eval_mask)
# test construct_evaluation_mask
# X_evaluation_mask is None
X = torch.rand(4, 5, 2, device=self.device)
X_eval_mask = af.construct_evaluation_mask(X=X)
expected_eval_mask = torch.cat(
[torch.ones(X.shape[1:], dtype=torch.bool, device=self.device), eval_mask],
dim=0,
)
self.assertTrue(torch.equal(X_eval_mask, expected_eval_mask))
# test X_evaluation_mask is not None
# test wrong shape
af.X_evaluation_mask = torch.zeros(1, 2, dtype=bool, device=self.device)
msg = "Expected the -2 dimension of X and X_evaluation_mask to match."
with self.assertRaisesRegex(BotorchTensorDimensionError, msg):
af.construct_evaluation_mask(X=X)
af.X_evaluation_mask = torch.randint(0, 2, (5, 2), device=self.device).bool()
X_eval_mask = af.construct_evaluation_mask(X=X)
expected_eval_mask = torch.cat([af.X_evaluation_mask, eval_mask], dim=0)
self.assertTrue(torch.equal(X_eval_mask, expected_eval_mask))
# test setting X_pending as None
af.set_X_pending(X_pending=None, X_pending_evaluation_mask=None)
self.assertIsNone(af.X_pending)
self.assertIsNone(af.X_pending_evaluation_mask)
# test construct_evaluation_mask when X_pending is None
self.assertTrue(
torch.equal(af.construct_evaluation_mask(X=X), af.X_evaluation_mask)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from botorch import settings
from botorch.acquisition.cost_aware import (
CostAwareUtility,
GenericCostAwareUtility,
InverseCostWeightedUtility,
)
from botorch.exceptions.warnings import CostAwareWarning
from botorch.models.deterministic import GenericDeterministicModel
from botorch.sampling import IIDNormalSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestCostAwareUtilities(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
CostAwareUtility()
def test_GenericCostAwareUtility(self):
def cost(X, deltas, **kwargs):
return deltas.mean(dim=-1) / X[..., 1].sum(dim=-1)
for dtype in (torch.float, torch.double):
u = GenericCostAwareUtility(cost)
X = torch.rand(3, 2, device=self.device, dtype=dtype)
deltas = torch.rand(5, 3, device=self.device, dtype=dtype)
self.assertIsInstance(u, GenericCostAwareUtility)
self.assertTrue(torch.equal(u(X, deltas), cost(X, deltas)))
X = torch.rand(4, 3, 2, device=self.device, dtype=dtype)
deltas = torch.rand(5, 4, 3, device=self.device, dtype=dtype)
self.assertIsInstance(u, GenericCostAwareUtility)
self.assertTrue(torch.equal(u(X, deltas), cost(X, deltas)))
def test_InverseCostWeightedUtility(self):
for batch_shape in ([], [2]):
for dtype in (torch.float, torch.double):
# the event shape is `batch_shape x q x t`
mean = 1 + torch.rand(
*batch_shape, 2, 1, device=self.device, dtype=dtype
)
mm = MockModel(MockPosterior(mean=mean))
X = torch.randn(*batch_shape, 3, 2, device=self.device, dtype=dtype)
deltas = torch.rand(4, *batch_shape, device=self.device, dtype=dtype)
# test that sampler is required if use_mean=False
icwu = InverseCostWeightedUtility(mm, use_mean=False)
with self.assertRaises(RuntimeError):
icwu(X, deltas)
# check warning for negative cost
mm = MockModel(MockPosterior(mean=mean.clamp_max(-1e-6)))
icwu = InverseCostWeightedUtility(mm)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
icwu(X, deltas)
self.assertTrue(
any(issubclass(w.category, CostAwareWarning) for w in ws)
)
# basic test
mm = MockModel(MockPosterior(mean=mean))
icwu = InverseCostWeightedUtility(mm)
ratios = icwu(X, deltas)
self.assertTrue(
torch.equal(ratios, deltas / mean.squeeze(-1).sum(dim=-1))
)
# sampling test
samples = 1 + torch.rand( # event shape is q x m
*batch_shape, 3, 1, device=self.device, dtype=dtype
)
mm = MockModel(MockPosterior(samples=samples))
icwu = InverseCostWeightedUtility(mm, use_mean=False)
ratios = icwu(
X, deltas, sampler=IIDNormalSampler(sample_shape=torch.Size([4]))
)
self.assertTrue(
torch.equal(ratios, deltas / samples.squeeze(-1).sum(dim=-1))
)
# test min cost
mm = MockModel(MockPosterior(mean=mean))
icwu = InverseCostWeightedUtility(mm, min_cost=1.5)
ratios = icwu(X, deltas)
self.assertTrue(
torch.equal(
ratios, deltas / mean.clamp_min(1.5).squeeze(-1).sum(dim=-1)
)
)
# test evaluation_mask
multi_output_mean = torch.cat([mean, 2 * mean], dim=-1)
def cost_fn(X):
return multi_output_mean
mm = GenericDeterministicModel(f=cost_fn, num_outputs=2)
icwu = InverseCostWeightedUtility(mm)
eval_mask = torch.zeros(3, 2, dtype=torch.bool, device=self.device)
eval_mask[:, 1] = True # 1 objective is evaluated
ratios = icwu(X, deltas, X_evaluation_mask=eval_mask)
self.assertTrue(
torch.equal(ratios, deltas / multi_output_mean[..., 1].sum(dim=-1))
)
eval_mask[:, 0] = True # both objectives are evaluated
ratios = icwu(X, deltas, X_evaluation_mask=eval_mask)
self.assertAllClose(
ratios, deltas / multi_output_mean.sum(dim=(-1, -2))
)
# test eval_mask where not all rows are the same
eval_mask[0, 1] = False
msg = (
"Currently, all candidates must be evaluated "
"on the same outputs."
)
with self.assertRaisesRegex(NotImplementedError, msg):
icwu(X, deltas, X_evaluation_mask=eval_mask)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import math
from typing import Any, Callable, Sequence, Type
from unittest import mock
from unittest.mock import MagicMock
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import (
ExpectedImprovement,
LogExpectedImprovement,
LogNoisyExpectedImprovement,
LogProbabilityOfImprovement,
NoisyExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
UpperConfidenceBound,
)
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.input_constructors import (
_field_is_shared,
_register_acqf_input_constructor,
acqf_input_constructor,
ACQF_INPUT_CONSTRUCTOR_REGISTRY,
construct_inputs_mf_base,
get_acqf_input_constructor,
get_best_f_analytic,
get_best_f_mc,
)
from botorch.acquisition.joint_entropy_search import qJointEntropySearch
from botorch.acquisition.knowledge_gradient import (
qKnowledgeGradient,
qMultiFidelityKnowledgeGradient,
)
from botorch.acquisition.logei import (
qLogExpectedImprovement,
qLogNoisyExpectedImprovement,
TAU_MAX,
TAU_RELU,
)
from botorch.acquisition.max_value_entropy_search import (
qMaxValueEntropy,
qMultiFidelityMaxValueEntropy,
)
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.multi_objective import (
ExpectedHypervolumeImprovement,
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.multi_output_risk_measures import (
MultiOutputExpectation,
)
from botorch.acquisition.multi_objective.objective import (
IdentityAnalyticMultiOutputObjective,
IdentityMCMultiOutputObjective,
WeightedMCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import get_default_partitioning_alpha
from botorch.acquisition.objective import (
LinearMCObjective,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from botorch.acquisition.utils import (
expand_trace_observations,
project_to_target_fidelity,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models import FixedNoiseGP, MultiTaskGP, SingleTaskGP
from botorch.models.deterministic import FixedSingleSampleModel
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.constraints import get_outcome_constraint_transforms
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class DummyAcquisitionFunction(AcquisitionFunction):
...
class InputConstructorBaseTestCase(BotorchTestCase):
def setUp(self, suppress_input_warnings: bool = True) -> None:
super().setUp(suppress_input_warnings=suppress_input_warnings)
self.mock_model = MockModel(
posterior=MockPosterior(mean=None, variance=None, base_shape=(1,))
)
X1 = torch.rand(3, 2)
X2 = torch.rand(3, 2)
Y1 = torch.rand(3, 1)
Y2 = torch.rand(3, 1)
self.blockX_blockY = SupervisedDataset.dict_from_iter(X1, Y1)
self.blockX_multiY = SupervisedDataset.dict_from_iter(X1, (Y1, Y2))
self.multiX_multiY = SupervisedDataset.dict_from_iter((X1, X2), (Y1, Y2))
self.bounds = 2 * [(0.0, 1.0)]
class TestInputConstructorUtils(InputConstructorBaseTestCase):
def test_field_is_shared(self) -> None:
self.assertTrue(_field_is_shared(self.blockX_multiY, "X"))
self.assertFalse(_field_is_shared(self.blockX_multiY, "Y"))
with self.assertRaisesRegex(AttributeError, "has no field"):
self.assertFalse(_field_is_shared(self.blockX_multiY, "foo"))
def test_get_best_f_analytic(self) -> None:
with self.assertRaisesRegex(
NotImplementedError, "Currently only block designs are supported."
):
get_best_f_analytic(training_data=self.multiX_multiY)
best_f = get_best_f_analytic(training_data=self.blockX_blockY)
self.assertEqual(best_f, get_best_f_analytic(self.blockX_blockY[0]))
best_f_expected = self.blockX_blockY[0].Y.squeeze().max()
self.assertEqual(best_f, best_f_expected)
with self.assertRaisesRegex(
NotImplementedError,
"Analytic acquisition functions currently only work with "
"multi-output models if provided with a",
):
get_best_f_analytic(training_data=self.blockX_multiY)
weights = torch.rand(2)
post_tf = ScalarizedPosteriorTransform(weights=weights)
best_f_tf = get_best_f_analytic(
training_data=self.blockX_multiY, posterior_transform=post_tf
)
multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1)
best_f_expected = post_tf.evaluate(multi_Y).max()
self.assertEqual(best_f_tf, best_f_expected)
def test_get_best_f_mc(self) -> None:
with self.assertRaisesRegex(
NotImplementedError, "Currently only block designs are supported."
):
get_best_f_mc(training_data=self.multiX_multiY)
best_f = get_best_f_mc(training_data=self.blockX_blockY)
self.assertEqual(best_f, get_best_f_mc(self.blockX_blockY[0]))
best_f_expected = self.blockX_blockY[0].Y.max(dim=0).values
self.assertAllClose(best_f, best_f_expected)
with self.assertRaisesRegex(UnsupportedError, "require an objective"):
get_best_f_mc(training_data=self.blockX_multiY)
obj = LinearMCObjective(weights=torch.rand(2))
best_f = get_best_f_mc(training_data=self.blockX_multiY, objective=obj)
multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1)
best_f_expected = (multi_Y @ obj.weights).amax(dim=-1, keepdim=True)
self.assertAllClose(best_f, best_f_expected)
post_tf = ScalarizedPosteriorTransform(weights=torch.ones(2))
best_f = get_best_f_mc(
training_data=self.blockX_multiY, posterior_transform=post_tf
)
best_f_expected = (multi_Y.sum(dim=-1)).amax(dim=-1, keepdim=True)
self.assertAllClose(best_f, best_f_expected)
@mock.patch("botorch.acquisition.input_constructors.optimize_acqf")
def test_optimize_objective(self, mock_optimize_acqf):
from botorch.acquisition.input_constructors import optimize_objective
mock_model = self.mock_model
bounds = torch.rand(2, len(self.bounds))
A = torch.rand(1, bounds.shape[-1])
b = torch.zeros([1, 1])
idx = A[0].nonzero(as_tuple=False).squeeze()
inequality_constraints = ((idx, -A[0, idx], -b[0, 0]),)
with self.subTest("scalarObjective_linearConstraints"):
post_tf = ScalarizedPosteriorTransform(weights=torch.rand(bounds.shape[-1]))
_ = optimize_objective(
model=mock_model,
bounds=bounds,
q=1,
posterior_transform=post_tf,
linear_constraints=(A, b),
fixed_features=None,
)
kwargs = mock_optimize_acqf.call_args[1]
self.assertIsInstance(kwargs["acq_function"], PosteriorMean)
self.assertTrue(torch.equal(kwargs["bounds"], bounds))
self.assertEqual(len(kwargs["inequality_constraints"]), 1)
for a, b in zip(
kwargs["inequality_constraints"][0], inequality_constraints[0]
):
self.assertTrue(torch.equal(a, b))
with self.subTest("mcObjective_fixedFeatures"):
_ = optimize_objective(
model=mock_model,
bounds=bounds,
q=1,
objective=LinearMCObjective(weights=torch.rand(bounds.shape[-1])),
fixed_features={0: 0.5},
)
kwargs = mock_optimize_acqf.call_args[1]
self.assertIsInstance(
kwargs["acq_function"], FixedFeatureAcquisitionFunction
)
self.assertIsInstance(kwargs["acq_function"].acq_func, qSimpleRegret)
self.assertTrue(torch.equal(kwargs["bounds"], bounds[:, 1:]))
def test__allow_only_specific_variable_kwargs__raises(self) -> None:
input_constructor = get_acqf_input_constructor(ExpectedImprovement)
with self.assertRaisesRegex(
TypeError,
"Unexpected keyword argument `hat` when constructing input arguments",
):
input_constructor(
model=self.mock_model, training_data=self.blockX_blockY, hat="car"
)
def test__register_acqf_input_constructor(self) -> None:
with self.assertRaisesRegex(RuntimeError, "not registered"):
get_acqf_input_constructor(DummyAcquisitionFunction)
dummy_constructor = MagicMock()
_register_acqf_input_constructor(
acqf_cls=DummyAcquisitionFunction,
input_constructor=dummy_constructor,
)
input_constructor = get_acqf_input_constructor(DummyAcquisitionFunction)
self.assertIs(input_constructor, dummy_constructor)
# Clean up changes to the global registry (leads to failure of other tests).
ACQF_INPUT_CONSTRUCTOR_REGISTRY.pop(DummyAcquisitionFunction)
class TestAnalyticAcquisitionFunctionInputConstructors(InputConstructorBaseTestCase):
def test_acqf_input_constructor(self) -> None:
with self.assertRaisesRegex(RuntimeError, "not registered"):
get_acqf_input_constructor(DummyAcquisitionFunction)
with self.assertRaisesRegex(ValueError, "duplicate"):
acqf_input_constructor(ExpectedImprovement)(lambda x: x)
def test_construct_inputs_posterior_mean(self) -> None:
c = get_acqf_input_constructor(PosteriorMean)
mock_model = self.mock_model
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["posterior_transform"])
# test instantiation
acqf = PosteriorMean(**kwargs)
self.assertIs(acqf.model, mock_model)
post_tf = ScalarizedPosteriorTransform(weights=torch.rand(1))
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
posterior_transform=post_tf,
)
self.assertIs(kwargs["model"], mock_model)
self.assertIs(kwargs["posterior_transform"], post_tf)
# test instantiation
acqf = PosteriorMean(**kwargs)
self.assertIs(acqf.model, mock_model)
def test_construct_inputs_best_f(self) -> None:
for acqf_cls in [
ExpectedImprovement,
LogExpectedImprovement,
ProbabilityOfImprovement,
LogProbabilityOfImprovement,
]:
with self.subTest(acqf_cls=acqf_cls):
c = get_acqf_input_constructor(acqf_cls)
mock_model = self.mock_model
kwargs = c(
model=mock_model, training_data=self.blockX_blockY, maximize=False
)
best_f_expected = self.blockX_blockY[0].Y.squeeze().max()
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["posterior_transform"])
self.assertEqual(kwargs["best_f"], best_f_expected)
self.assertFalse(kwargs["maximize"])
acqf = acqf_cls(**kwargs)
self.assertIs(acqf.model, mock_model)
kwargs = c(
model=mock_model, training_data=self.blockX_blockY, best_f=0.1
)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["posterior_transform"])
self.assertEqual(kwargs["best_f"], 0.1)
self.assertTrue(kwargs["maximize"])
acqf = acqf_cls(**kwargs)
self.assertIs(acqf.model, mock_model)
def test_construct_inputs_ucb(self) -> None:
c = get_acqf_input_constructor(UpperConfidenceBound)
mock_model = self.mock_model
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["posterior_transform"])
self.assertEqual(kwargs["beta"], 0.2)
self.assertTrue(kwargs["maximize"])
acqf = UpperConfidenceBound(**kwargs)
self.assertIs(mock_model, acqf.model)
kwargs = c(
model=mock_model, training_data=self.blockX_blockY, beta=0.1, maximize=False
)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["posterior_transform"])
self.assertEqual(kwargs["beta"], 0.1)
self.assertFalse(kwargs["maximize"])
acqf = UpperConfidenceBound(**kwargs)
self.assertIs(mock_model, acqf.model)
def test_construct_inputs_noisy_ei(self) -> None:
for acqf_cls in [NoisyExpectedImprovement, LogNoisyExpectedImprovement]:
with self.subTest(acqf_cls=acqf_cls):
c = get_acqf_input_constructor(acqf_cls)
mock_model = FixedNoiseGP(
train_X=torch.rand((2, 2)),
train_Y=torch.rand((2, 1)),
train_Yvar=torch.rand((2, 1)),
)
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(
torch.equal(kwargs["X_observed"], self.blockX_blockY[0].X)
)
self.assertEqual(kwargs["num_fantasies"], 20)
self.assertTrue(kwargs["maximize"])
acqf = acqf_cls(**kwargs)
self.assertTrue(acqf.maximize)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
num_fantasies=10,
maximize=False,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(
torch.equal(kwargs["X_observed"], self.blockX_blockY[0].X)
)
self.assertEqual(kwargs["num_fantasies"], 10)
self.assertFalse(kwargs["maximize"])
acqf = acqf_cls(**kwargs)
self.assertFalse(acqf.maximize)
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
c(model=mock_model, training_data=self.multiX_multiY)
def test_construct_inputs_constrained_analytic_eubo(self) -> None:
# create dummy modellist gp
n = 10
X = torch.linspace(0, 0.95, n).unsqueeze(dim=-1)
Y1, Y2 = torch.sin(X * (2 * math.pi)), torch.cos(X * (2 * math.pi))
# 3 tasks
train_X = torch.cat(
[torch.nn.functional.pad(X, (1, 0), value=i) for i in range(3)]
)
train_Y = torch.cat([Y1, Y2]) # train_Y is a 1d tensor with shape (2n,)
# model list of 2, so model.num_outputs is 4
model = ModelListGP(
*[MultiTaskGP(train_X, train_Y, task_feature=0) for i in range(2)]
)
self.assertEqual(model.num_outputs, 6)
c = get_acqf_input_constructor(AnalyticExpectedUtilityOfBestOption)
mock_pref_model = self.mock_model
# assume we only have a preference model with 2 outcomes
mock_pref_model.dim = 2
mock_pref_model.datapoints = torch.tensor([])
# test basic construction
kwargs = c(model=model, pref_model=mock_pref_model)
self.assertIsInstance(kwargs["outcome_model"], FixedSingleSampleModel)
self.assertIs(kwargs["pref_model"], mock_pref_model)
self.assertIsNone(kwargs["previous_winner"])
# test instantiation
AnalyticExpectedUtilityOfBestOption(**kwargs)
# test previous_winner
previous_winner = torch.randn(mock_pref_model.dim)
kwargs = c(
model=model,
pref_model=mock_pref_model,
previous_winner=previous_winner,
)
self.assertTrue(torch.equal(kwargs["previous_winner"], previous_winner))
# test instantiation
AnalyticExpectedUtilityOfBestOption(**kwargs)
# test sample_multiplier
torch.manual_seed(123)
kwargs = c(
model=model,
pref_model=mock_pref_model,
sample_multiplier=1e6,
)
# w by default is drawn from std normal and very unlikely to be > 10.0
self.assertTrue((kwargs["outcome_model"].w.abs() > 10.0).all())
# Check w has the right dimension that agrees with the preference model
self.assertEqual(kwargs["outcome_model"].w.shape[-1], mock_pref_model.dim)
class TestMCAcquisitionFunctionInputConstructors(InputConstructorBaseTestCase):
def test_construct_inputs_mc_base(self) -> None:
c = get_acqf_input_constructor(qSimpleRegret)
mock_model = self.mock_model
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
acqf = qSimpleRegret(**kwargs)
self.assertIs(acqf.model, mock_model)
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective=objective,
X_pending=X_pending,
)
self.assertIs(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
acqf = qSimpleRegret(**kwargs)
self.assertIs(acqf.model, mock_model)
# TODO: Test passing through of sampler
def test_construct_inputs_qEI(self) -> None:
c = get_acqf_input_constructor(qExpectedImprovement)
mock_model = self.mock_model
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertIsNone(kwargs["constraints"])
self.assertIsInstance(kwargs["eta"], float)
self.assertLess(kwargs["eta"], 1)
acqf = qExpectedImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.blockX_multiY,
objective=objective,
X_pending=X_pending,
)
self.assertIs(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
self.assertIsInstance(kwargs["eta"], float)
self.assertLess(kwargs["eta"], 1)
acqf = qExpectedImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1)
best_f_expected = objective(multi_Y).max()
self.assertEqual(kwargs["best_f"], best_f_expected)
# Check explicitly specifying `best_f`.
best_f_expected = best_f_expected - 1 # Random value.
kwargs = c(
model=mock_model,
training_data=self.blockX_multiY,
objective=objective,
X_pending=X_pending,
best_f=best_f_expected,
)
self.assertEqual(kwargs["best_f"], best_f_expected)
acqf = qExpectedImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
self.assertEqual(acqf.best_f, best_f_expected)
# test passing constraints
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
constraints = get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
)
kwargs = c(
model=mock_model,
training_data=self.blockX_multiY,
objective=objective,
X_pending=X_pending,
best_f=best_f_expected,
constraints=constraints,
)
self.assertIs(kwargs["constraints"], constraints)
acqf = qExpectedImprovement(**kwargs)
self.assertEqual(acqf.best_f, best_f_expected)
# testing qLogEI input constructor
log_constructor = get_acqf_input_constructor(qLogExpectedImprovement)
log_kwargs = log_constructor(
model=mock_model,
training_data=self.blockX_blockY,
objective=objective,
X_pending=X_pending,
best_f=best_f_expected,
constraints=constraints,
)
# includes strict superset of kwargs tested above
self.assertLessEqual(kwargs.items(), log_kwargs.items())
self.assertIn("fat", log_kwargs)
self.assertIn("tau_max", log_kwargs)
self.assertEqual(log_kwargs["tau_max"], TAU_MAX)
self.assertIn("tau_relu", log_kwargs)
self.assertEqual(log_kwargs["tau_relu"], TAU_RELU)
self.assertIs(log_kwargs["constraints"], constraints)
acqf = qLogExpectedImprovement(**log_kwargs)
self.assertIs(acqf.model, mock_model)
self.assertIs(acqf.objective, objective)
def test_construct_inputs_qNEI(self) -> None:
c = get_acqf_input_constructor(qNoisyExpectedImprovement)
mock_model = SingleTaskGP(
train_X=torch.rand((2, 2)), train_Y=torch.rand((2, 1))
)
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertIs(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertTrue(kwargs["prune_baseline"])
self.assertTrue(torch.equal(kwargs["X_baseline"], self.blockX_blockY[0].X))
self.assertIsNone(kwargs["constraints"])
self.assertIsInstance(kwargs["eta"], float)
self.assertLess(kwargs["eta"], 1)
acqf = qNoisyExpectedImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
c(model=mock_model, training_data=self.multiX_multiY)
X_baseline = torch.rand(2, 2)
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
constraints = get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
X_baseline=X_baseline,
prune_baseline=False,
constraints=constraints,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertFalse(kwargs["prune_baseline"])
self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
self.assertIsInstance(kwargs["eta"], float)
self.assertLess(kwargs["eta"], 1)
self.assertIs(kwargs["constraints"], constraints)
acqf = qNoisyExpectedImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
# testing qLogNEI input constructor
log_constructor = get_acqf_input_constructor(qLogNoisyExpectedImprovement)
log_kwargs = log_constructor(
model=mock_model,
training_data=self.blockX_blockY,
X_baseline=X_baseline,
prune_baseline=False,
constraints=constraints,
)
# includes strict superset of kwargs tested above
self.assertLessEqual(kwargs.items(), log_kwargs.items())
self.assertIn("fat", log_kwargs)
self.assertIn("tau_max", log_kwargs)
self.assertEqual(log_kwargs["tau_max"], TAU_MAX)
self.assertIn("tau_relu", log_kwargs)
self.assertEqual(log_kwargs["tau_relu"], TAU_RELU)
self.assertIs(log_kwargs["constraints"], constraints)
acqf = qLogNoisyExpectedImprovement(**log_kwargs)
self.assertIs(acqf.model, mock_model)
def test_construct_inputs_qPI(self) -> None:
c = get_acqf_input_constructor(qProbabilityOfImprovement)
mock_model = self.mock_model
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["tau"], 1e-3)
self.assertIsNone(kwargs["constraints"])
self.assertIsInstance(kwargs["eta"], float)
self.assertLess(kwargs["eta"], 1)
acqf = qProbabilityOfImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.blockX_multiY,
objective=objective,
X_pending=X_pending,
tau=1e-2,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["tau"], 1e-2)
self.assertIsInstance(kwargs["eta"], float)
self.assertLess(kwargs["eta"], 1)
multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1)
best_f_expected = objective(multi_Y).max()
self.assertEqual(kwargs["best_f"], best_f_expected)
acqf = qProbabilityOfImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
self.assertIs(acqf.objective, objective)
# Check explicitly specifying `best_f`.
best_f_expected = best_f_expected - 1 # Random value.
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
constraints = get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
)
kwargs = c(
model=mock_model,
training_data=self.blockX_multiY,
objective=objective,
X_pending=X_pending,
tau=1e-2,
best_f=best_f_expected,
constraints=constraints,
)
self.assertEqual(kwargs["best_f"], best_f_expected)
self.assertIs(kwargs["constraints"], constraints)
acqf = qProbabilityOfImprovement(**kwargs)
self.assertIs(acqf.model, mock_model)
self.assertIs(acqf.objective, objective)
def test_construct_inputs_qUCB(self) -> None:
c = get_acqf_input_constructor(qUpperConfidenceBound)
mock_model = self.mock_model
kwargs = c(model=mock_model, training_data=self.blockX_blockY)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["beta"], 0.2)
acqf = qUpperConfidenceBound(**kwargs)
self.assertIs(acqf.model, mock_model)
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective=objective,
X_pending=X_pending,
beta=0.1,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["beta"], 0.1)
acqf = qUpperConfidenceBound(**kwargs)
self.assertIs(acqf.model, mock_model)
class TestMultiObjectiveAcquisitionFunctionInputConstructors(
InputConstructorBaseTestCase
):
def test_construct_inputs_EHVI(self) -> None:
c = get_acqf_input_constructor(ExpectedHypervolumeImprovement)
mock_model = mock.Mock()
objective_thresholds = torch.rand(6)
# test error on non-block designs
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
c(
model=mock_model,
training_data=self.multiX_multiY,
objective_thresholds=objective_thresholds,
)
# test error on unsupported outcome constraints
with self.assertRaises(NotImplementedError):
c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
constraints=mock.Mock(),
)
# test with Y_pmean supplied explicitly
Y_pmean = torch.rand(3, 6)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
Y_pmean=Y_pmean,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsInstance(kwargs["objective"], IdentityAnalyticMultiOutputObjective)
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
partitioning = kwargs["partitioning"]
alpha_expected = get_default_partitioning_alpha(6)
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, alpha_expected)
self.assertTrue(torch.equal(partitioning._neg_ref_point, -objective_thresholds))
Y_pmean = torch.rand(3, 2)
objective_thresholds = torch.rand(2)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
Y_pmean=Y_pmean,
)
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, objective_thresholds))
# test with custom objective
weights = torch.rand(2)
obj = WeightedMCMultiOutputObjective(weights=weights)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=obj,
Y_pmean=Y_pmean,
alpha=0.05,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective)
ref_point_expected = objective_thresholds * weights
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, 0.05)
self.assertTrue(torch.equal(partitioning._neg_ref_point, -ref_point_expected))
# Test without providing Y_pmean (computed from model)
mean = torch.rand(1, 2)
variance = torch.ones(1, 1)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = c(
model=mm,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
)
self.assertIsInstance(kwargs["objective"], IdentityAnalyticMultiOutputObjective)
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, objective_thresholds))
self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
# Test with risk measures.
for use_preprocessing in (True, False):
obj = MultiOutputExpectation(
n_w=3,
preprocessing_function=WeightedMCMultiOutputObjective(
torch.tensor([-1.0, -1.0])
)
if use_preprocessing
else None,
)
kwargs = c(
model=mm,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=obj,
)
expected_obj_t = (
-objective_thresholds if use_preprocessing else objective_thresholds
)
self.assertIs(kwargs["objective"], obj)
self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, expected_obj_t))
def test_construct_inputs_qEHVI(self) -> None:
c = get_acqf_input_constructor(qExpectedHypervolumeImprovement)
objective_thresholds = torch.rand(2)
# Test defaults
mm = SingleTaskGP(torch.rand(1, 2), torch.rand(1, 2))
mean = mm.posterior(self.blockX_blockY[0].X).mean
kwargs = c(
model=mm,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
)
self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective)
ref_point_expected = objective_thresholds
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, ref_point_expected))
self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
sampler = kwargs["sampler"]
self.assertIsInstance(sampler, SobolQMCNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([128]))
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["constraints"])
self.assertEqual(kwargs["eta"], 1e-3)
# Test IID sampler
kwargs = c(
model=mm,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
qmc=False,
mc_samples=64,
)
sampler = kwargs["sampler"]
self.assertIsInstance(sampler, IIDNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([64]))
# Test outcome constraints and custom inputs
mean = torch.tensor([[1.0, 0.25], [0.5, 1.0]])
variance = torch.ones(1, 1)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
weights = torch.rand(2)
obj = WeightedMCMultiOutputObjective(weights=weights)
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
constraints = get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
)
X_pending = torch.rand(1, 2)
kwargs = c(
model=mm,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=obj,
constraints=constraints,
X_pending=X_pending,
alpha=0.05,
eta=1e-2,
)
self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective)
ref_point_expected = objective_thresholds * weights
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, 0.05)
self.assertTrue(torch.equal(partitioning._neg_ref_point, -ref_point_expected))
Y_expected = mean[:1] * weights
self.assertTrue(torch.equal(partitioning._neg_Y, -Y_expected))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIs(kwargs["constraints"], constraints)
self.assertEqual(kwargs["eta"], 1e-2)
# Test check for block designs
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
c(
model=mm,
training_data=self.multiX_multiY,
objective_thresholds=objective_thresholds,
objective=obj,
constraints=constraints,
X_pending=X_pending,
alpha=0.05,
eta=1e-2,
)
# Test custom sampler
custom_sampler = SobolQMCNormalSampler(sample_shape=torch.Size([16]), seed=1234)
kwargs = c(
model=mm,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
sampler=custom_sampler,
)
sampler = kwargs["sampler"]
self.assertIsInstance(sampler, SobolQMCNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([16]))
self.assertEqual(sampler.seed, 1234)
def test_construct_inputs_qNEHVI(self) -> None:
c = get_acqf_input_constructor(qNoisyExpectedHypervolumeImprovement)
objective_thresholds = torch.rand(2)
# Test defaults
kwargs = c(
model=SingleTaskGP(torch.rand(1, 2), torch.rand(1, 2)),
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
)
ref_point_expected = objective_thresholds
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
self.assertTrue(torch.equal(kwargs["X_baseline"], self.blockX_blockY[0].X))
self.assertIsInstance(kwargs["sampler"], SobolQMCNormalSampler)
self.assertEqual(kwargs["sampler"].sample_shape, torch.Size([128]))
self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective)
self.assertIsNone(kwargs["constraints"])
self.assertIsNone(kwargs["X_pending"])
self.assertEqual(kwargs["eta"], 1e-3)
self.assertTrue(kwargs["prune_baseline"])
self.assertEqual(kwargs["alpha"], 0.0)
self.assertTrue(kwargs["cache_pending"])
self.assertEqual(kwargs["max_iep"], 0)
self.assertTrue(kwargs["incremental_nehvi"])
self.assertTrue(kwargs["cache_root"])
# Test check for block designs
mock_model = mock.Mock()
mock_model.num_outputs = 2
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
c(
model=mock_model,
training_data=self.multiX_multiY,
objective_thresholds=objective_thresholds,
)
# Test custom inputs
weights = torch.rand(2)
objective = WeightedMCMultiOutputObjective(weights=weights)
X_baseline = torch.rand(2, 2)
sampler = IIDNormalSampler(sample_shape=torch.Size([4]))
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
constraints = get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
)
X_pending = torch.rand(1, 2)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=objective,
X_baseline=X_baseline,
sampler=sampler,
constraints=constraints,
X_pending=X_pending,
eta=1e-2,
prune_baseline=True,
alpha=0.0,
cache_pending=False,
max_iep=1,
incremental_nehvi=False,
cache_root=False,
)
ref_point_expected = objective(objective_thresholds)
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
sampler_ = kwargs["sampler"]
self.assertIsInstance(sampler_, IIDNormalSampler)
self.assertEqual(sampler_.sample_shape, torch.Size([4]))
self.assertEqual(kwargs["objective"], objective)
self.assertIs(kwargs["constraints"], constraints)
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertEqual(kwargs["eta"], 1e-2)
self.assertTrue(kwargs["prune_baseline"])
self.assertEqual(kwargs["alpha"], 0.0)
self.assertFalse(kwargs["cache_pending"])
self.assertEqual(kwargs["max_iep"], 1)
self.assertFalse(kwargs["incremental_nehvi"])
self.assertFalse(kwargs["cache_root"])
# Test with risk measures.
with self.assertRaisesRegex(UnsupportedError, "feasibility-weighted"):
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=MultiOutputExpectation(n_w=3),
constraints=constraints,
)
for use_preprocessing in (True, False):
obj = MultiOutputExpectation(
n_w=3,
preprocessing_function=WeightedMCMultiOutputObjective(
torch.tensor([-1.0, -1.0])
)
if use_preprocessing
else None,
)
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=obj,
)
expected_obj_t = (
-objective_thresholds if use_preprocessing else objective_thresholds
)
self.assertIs(kwargs["objective"], obj)
self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t))
# Test default alpha for many objectives/
mock_model.num_outputs = 5
kwargs = c(
model=mock_model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
)
self.assertEqual(kwargs["alpha"], 0.0)
def test_construct_inputs_kg(self) -> None:
current_value = torch.tensor(1.23)
with mock.patch(
target="botorch.acquisition.input_constructors.optimize_objective",
return_value=(None, current_value),
):
from botorch.acquisition import input_constructors
func = input_constructors.get_acqf_input_constructor(qKnowledgeGradient)
kwargs = func(
model=mock.Mock(),
training_data=self.blockX_blockY,
objective=LinearMCObjective(torch.rand(2)),
bounds=self.bounds,
num_fantasies=33,
)
self.assertEqual(kwargs["num_fantasies"], 33)
self.assertEqual(kwargs["current_value"], current_value)
def test_construct_inputs_mes(self) -> None:
func = get_acqf_input_constructor(qMaxValueEntropy)
model = SingleTaskGP(train_X=torch.ones((3, 2)), train_Y=torch.zeros((3, 1)))
kwargs = func(
model=model,
training_data=self.blockX_blockY,
objective=LinearMCObjective(torch.rand(2)),
bounds=self.bounds,
candidate_size=17,
maximize=False,
)
self.assertFalse(kwargs["maximize"])
self.assertGreaterEqual(kwargs["candidate_set"].min(), 0.0)
self.assertLessEqual(kwargs["candidate_set"].max(), 1.0)
self.assertEqual(
[int(s) for s in kwargs["candidate_set"].shape], [17, len(self.bounds)]
)
acqf = qMaxValueEntropy(**kwargs)
self.assertIs(acqf.model, model)
def test_construct_inputs_mf_base(self) -> None:
target_fidelities = {0: 0.123}
fidelity_weights = {0: 0.456}
cost_intercept = 0.789
num_trace_observations = 0
with self.subTest("test_fully_specified"):
kwargs = construct_inputs_mf_base(
target_fidelities=target_fidelities,
fidelity_weights=fidelity_weights,
cost_intercept=cost_intercept,
num_trace_observations=num_trace_observations,
)
X = torch.rand(3, 2)
self.assertIsInstance(kwargs["expand"], Callable)
self.assertTrue(
torch.equal(
kwargs["expand"](X),
expand_trace_observations(
X=X,
fidelity_dims=sorted(target_fidelities),
num_trace_obs=num_trace_observations,
),
)
)
self.assertIsInstance(kwargs["project"], Callable)
self.assertTrue(
torch.equal(
kwargs["project"](X),
project_to_target_fidelity(X, target_fidelities=target_fidelities),
)
)
cm = kwargs["cost_aware_utility"].cost_model
w = torch.tensor(list(fidelity_weights.values()), dtype=cm.weights.dtype)
self.assertEqual(cm.fixed_cost, cost_intercept)
self.assertAllClose(cm.weights, w)
with self.subTest("test_missing_fidelity_weights"):
kwargs = construct_inputs_mf_base(
target_fidelities=target_fidelities,
cost_intercept=cost_intercept,
)
cm = kwargs["cost_aware_utility"].cost_model
self.assertAllClose(cm.weights, torch.ones_like(cm.weights))
with self.subTest("test_mismatched_weights"):
with self.assertRaisesRegex(
RuntimeError, "Must provide the same indices for"
):
construct_inputs_mf_base(
target_fidelities={0: 1.0},
fidelity_weights={1: 0.5},
cost_intercept=cost_intercept,
)
def test_construct_inputs_mfkg(self) -> None:
constructor_args = {
"model": None,
"training_data": self.blockX_blockY,
"objective": None,
"bounds": self.bounds,
"num_fantasies": 123,
"target_fidelities": {0: 0.987},
"fidelity_weights": {0: 0.654},
"cost_intercept": 0.321,
}
with mock.patch(
target="botorch.acquisition.input_constructors.construct_inputs_mf_base",
return_value={"foo": 0},
), mock.patch(
target="botorch.acquisition.input_constructors.construct_inputs_qKG",
return_value={"bar": 1},
):
from botorch.acquisition import input_constructors
input_constructor = input_constructors.get_acqf_input_constructor(
qMultiFidelityKnowledgeGradient
)
inputs_mfkg = input_constructor(**constructor_args)
inputs_test = {"foo": 0, "bar": 1}
self.assertEqual(inputs_mfkg, inputs_test)
def test_construct_inputs_mfmes(self) -> None:
target_fidelities = {0: 0.987}
constructor_args = {
"model": None,
"training_data": self.blockX_blockY,
"objective": None,
"bounds": self.bounds,
"num_fantasies": 123,
"candidate_size": 17,
"target_fidelities": target_fidelities,
"fidelity_weights": {0: 0.654},
"cost_intercept": 0.321,
}
current_value = torch.tensor(1.23)
with mock.patch(
target="botorch.acquisition.input_constructors.construct_inputs_mf_base",
return_value={"foo": 0},
), mock.patch(
target="botorch.acquisition.input_constructors.construct_inputs_qMES",
return_value={"bar": 1},
), mock.patch(
target="botorch.acquisition.input_constructors.optimize_objective",
return_value=(None, current_value),
):
from botorch.acquisition import input_constructors
input_constructor = input_constructors.get_acqf_input_constructor(
qMultiFidelityMaxValueEntropy
)
inputs_mfmes = input_constructor(**constructor_args)
inputs_test = {
"foo": 0,
"bar": 1,
"current_value": current_value,
"target_fidelities": target_fidelities,
}
self.assertEqual(inputs_mfmes, inputs_test)
def test_construct_inputs_jes(self) -> None:
func = get_acqf_input_constructor(qJointEntropySearch)
# we need to run optimize_posterior_samples, so we sort of need
# a real model as there is no other (apparent) option
model = SingleTaskGP(self.blockX_blockY[0].X, self.blockX_blockY[0].Y)
kwargs = func(
model=model,
training_data=self.blockX_blockY,
objective=LinearMCObjective(torch.rand(2)),
bounds=self.bounds,
num_optima=17,
maximize=False,
)
self.assertFalse(kwargs["maximize"])
self.assertEqual(self.blockX_blockY[0].X.dtype, kwargs["optimal_inputs"].dtype)
self.assertEqual(len(kwargs["optimal_inputs"]), 17)
self.assertEqual(len(kwargs["optimal_outputs"]), 17)
# asserting that, for the non-batch case, the optimal inputs are
# of shape N x D and outputs are N x 1
self.assertEqual(len(kwargs["optimal_inputs"].shape), 2)
self.assertEqual(len(kwargs["optimal_outputs"].shape), 2)
qJointEntropySearch(**kwargs)
class TestInstantiationFromInputConstructor(InputConstructorBaseTestCase):
def _test_constructor_base(
self,
classes: Sequence[Type[AcquisitionFunction]],
**input_constructor_kwargs: Any,
) -> None:
for cls_ in classes:
with self.subTest(cls_.__name__, cls_=cls_):
acqf_kwargs = get_acqf_input_constructor(cls_)(
**input_constructor_kwargs
)
# no assertions; we are just testing that this doesn't error
cls_(**acqf_kwargs)
def test_constructors_like_PosteriorMean(self) -> None:
classes = [PosteriorMean, UpperConfidenceBound, qUpperConfidenceBound]
self._test_constructor_base(classes=classes, model=self.mock_model)
def test_constructors_like_ExpectedImprovement(self) -> None:
classes = [
ExpectedImprovement,
LogExpectedImprovement,
ProbabilityOfImprovement,
LogProbabilityOfImprovement,
NoisyExpectedImprovement,
LogNoisyExpectedImprovement,
qExpectedImprovement,
qLogExpectedImprovement,
qNoisyExpectedImprovement,
qLogNoisyExpectedImprovement,
qProbabilityOfImprovement,
]
model = FixedNoiseGP(
train_X=torch.rand((4, 2)),
train_Y=torch.rand((4, 1)),
train_Yvar=torch.ones((4, 1)),
)
self._test_constructor_base(
classes=classes, model=model, training_data=self.blockX_blockY
)
def test_constructors_like_qNEHVI(self) -> None:
objective_thresholds = torch.tensor([0.1, 0.2])
model = SingleTaskGP(train_X=torch.rand((3, 2)), train_Y=torch.rand((3, 2)))
# The EHVI and qEHVI input constructors are not working
classes = [
qNoisyExpectedHypervolumeImprovement,
# ExpectedHypervolumeImprovement,
# qExpectedHypervolumeImprovement,
]
self._test_constructor_base(
classes=classes,
model=model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
)
def test_constructors_like_qMaxValueEntropy(self) -> None:
bounds = torch.ones((1, 2))
classes = [qMaxValueEntropy, qKnowledgeGradient]
self._test_constructor_base(
classes=classes,
model=SingleTaskGP(train_X=torch.rand((3, 1)), train_Y=torch.rand((3, 1))),
training_data=self.blockX_blockY,
bounds=bounds,
)
def test_constructors_like_qMultiFidelityKnowledgeGradient(self) -> None:
classes = [
qMultiFidelityKnowledgeGradient,
# currently the input constructor for qMFMVG is not working
# qMultiFidelityMaxValueEntropy
]
self._test_constructor_base(
classes=classes,
model=SingleTaskGP(train_X=torch.rand((3, 1)), train_Y=torch.rand((3, 1))),
training_data=self.blockX_blockY,
bounds=torch.ones((1, 2)),
target_fidelities={0: 0.987},
)
def test_eubo(self) -> None:
model = SingleTaskGP(train_X=torch.rand((3, 2)), train_Y=torch.rand((3, 2)))
pref_model = self.mock_model
pref_model.dim = 2
pref_model.datapoints = torch.tensor([])
classes = [AnalyticExpectedUtilityOfBestOption]
self._test_constructor_base(
classes=classes,
model=model,
pref_model=pref_model,
)
def test_qjes(self) -> None:
model = SingleTaskGP(self.blockX_blockY[0].X, self.blockX_blockY[0].Y)
self._test_constructor_base(
classes=[qJointEntropySearch],
model=model,
bounds=self.bounds,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from unittest import mock
import torch
from botorch.acquisition import logei, monte_carlo
from botorch.acquisition.factory import get_acquisition_function
from botorch.acquisition.multi_objective import (
MCMultiOutputObjective,
monte_carlo as moo_monte_carlo,
)
from botorch.acquisition.objective import (
MCAcquisitionObjective,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.utils import compute_best_feasible_objective
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from gpytorch.distributions import MultivariateNormal
from torch import Tensor
class DummyMCObjective(MCAcquisitionObjective):
def forward(self, samples: Tensor, X=None) -> Tensor:
return samples.sum(-1)
class DummyMCMultiOutputObjective(MCMultiOutputObjective):
def forward(self, samples: Tensor, X=None) -> Tensor:
return samples
class TestGetAcquisitionFunction(BotorchTestCase):
def setUp(self):
super().setUp()
self.model = MockModel(MockPosterior())
self.objective = DummyMCObjective()
self.X_observed = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
self.X_pending = torch.tensor([[1.0, 3.0, 4.0]])
self.mc_samples = 250
self.qmc = True
self.ref_point = [0.0, 0.0]
self.mo_objective = DummyMCMultiOutputObjective()
self.Y = torch.tensor([[1.0, 2.0]]) # (2 x 1)-dim multi-objective outcomes
self.seed = 1
@mock.patch(f"{monte_carlo.__name__}.qExpectedImprovement")
def test_GetQEI(self, mock_acqf):
n = len(self.X_observed)
mean = torch.arange(n, dtype=torch.double).view(-1, 1)
var = torch.ones_like(mean)
self.model = MockModel(MockPosterior(mean=mean, variance=var))
common_kwargs = {
"model": self.model,
"objective": self.objective,
"X_observed": self.X_observed,
"X_pending": self.X_pending,
"mc_samples": self.mc_samples,
"seed": self.seed,
}
acqf = get_acquisition_function(
acquisition_function_name="qEI",
**common_kwargs,
marginalize_dim=0,
)
self.assertEqual(acqf, mock_acqf.return_value)
best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
mock_acqf.assert_called_once_with(
model=self.model,
best_f=best_f,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
constraints=None,
eta=1e-3,
)
# test batched model
self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
common_kwargs.update({"model": self.model})
acqf = get_acquisition_function(
acquisition_function_name="qEI", **common_kwargs
)
self.assertEqual(acqf, mock_acqf.return_value)
# test batched model without marginalize dim
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
# test w/ posterior transform
pm = torch.tensor([1.0, 2.0])
mvn = MultivariateNormal(pm, torch.eye(2))
self.model._posterior.distribution = mvn
self.model._posterior._mean = pm.unsqueeze(-1)
common_kwargs.update({"model": self.model})
pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
acqf = get_acquisition_function(
acquisition_function_name="qEI",
**common_kwargs,
posterior_transform=pt,
marginalize_dim=0,
)
self.assertEqual(mock_acqf.call_args[-1]["best_f"].item(), -1.0)
# with constraints
upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5
constraints = [lambda samples: samples[..., 0] - upper_bound]
eta = math.pi * 1e-2 # testing non-standard eta
acqf = get_acquisition_function(
acquisition_function_name="qEI",
**common_kwargs,
marginalize_dim=0,
constraints=constraints,
eta=eta,
)
self.assertEqual(acqf, mock_acqf.return_value)
best_feasible_f = compute_best_feasible_objective(
samples=mean,
obj=self.objective(mean),
constraints=constraints,
model=self.model,
objective=self.objective,
X_baseline=self.X_observed,
)
mock_acqf.assert_called_with(
model=self.model,
best_f=best_feasible_f,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
constraints=constraints,
eta=eta,
)
@mock.patch(f"{logei.__name__}.qLogExpectedImprovement")
def test_GetQLogEI(self, mock_acqf):
n = len(self.X_observed)
mean = torch.arange(n, dtype=torch.double).view(-1, 1)
var = torch.ones_like(mean)
self.model = MockModel(MockPosterior(mean=mean, variance=var))
common_kwargs = {
"model": self.model,
"objective": self.objective,
"X_observed": self.X_observed,
"X_pending": self.X_pending,
"mc_samples": self.mc_samples,
"seed": self.seed,
}
acqf = get_acquisition_function(
acquisition_function_name="qLogEI",
**common_kwargs,
marginalize_dim=0,
)
self.assertEqual(acqf, mock_acqf.return_value)
best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
mock_acqf.assert_called_once_with(
model=self.model,
best_f=best_f,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
constraints=None,
eta=1e-3,
)
# test batched model
self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
common_kwargs.update({"model": self.model})
acqf = get_acquisition_function(
acquisition_function_name="qLogEI", **common_kwargs
)
self.assertEqual(acqf, mock_acqf.return_value)
# test batched model without marginalize dim
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
# test w/ posterior transform
pm = torch.tensor([1.0, 2.0])
mvn = MultivariateNormal(pm, torch.eye(2))
self.model._posterior.distribution = mvn
self.model._posterior._mean = pm.unsqueeze(-1)
common_kwargs.update({"model": self.model})
pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
acqf = get_acquisition_function(
acquisition_function_name="qLogEI",
**common_kwargs,
posterior_transform=pt,
marginalize_dim=0,
)
self.assertEqual(mock_acqf.call_args[-1]["best_f"].item(), -1.0)
# with constraints
upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5
constraints = [lambda samples: samples[..., 0] - upper_bound]
eta = math.pi * 1e-2 # testing non-standard eta
acqf = get_acquisition_function(
acquisition_function_name="qLogEI",
**common_kwargs,
marginalize_dim=0,
constraints=constraints,
eta=eta,
)
self.assertEqual(acqf, mock_acqf.return_value)
best_feasible_f = compute_best_feasible_objective(
samples=mean,
obj=self.objective(mean),
constraints=constraints,
model=self.model,
objective=self.objective,
X_baseline=self.X_observed,
)
mock_acqf.assert_called_with(
model=self.model,
best_f=best_feasible_f,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
constraints=constraints,
eta=eta,
)
@mock.patch(f"{monte_carlo.__name__}.qProbabilityOfImprovement")
def test_GetQPI(self, mock_acqf):
# basic test
n = len(self.X_observed)
mean = torch.arange(n, dtype=torch.double).view(-1, 1)
var = torch.ones_like(mean)
self.model = MockModel(MockPosterior(mean=mean, variance=var))
acqf = get_acquisition_function(
acquisition_function_name="qPI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
)
self.assertEqual(acqf, mock_acqf.return_value)
best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
mock_acqf.assert_called_once_with(
model=self.model,
best_f=best_f,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
tau=1e-3,
constraints=None,
eta=1e-3,
)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
# test with different tau, non-qmc
acqf = get_acquisition_function(
acquisition_function_name="qPI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
tau=1.0,
)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs["tau"], 1.0)
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 2)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
acqf = get_acquisition_function(
acquisition_function_name="qPI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
tau=1.0,
)
# test batched model
self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
acqf = get_acquisition_function(
acquisition_function_name="qPI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
)
self.assertEqual(acqf, mock_acqf.return_value)
# with constraints
n = len(self.X_observed)
mean = torch.arange(n, dtype=torch.double).view(-1, 1)
var = torch.ones_like(mean)
self.model = MockModel(MockPosterior(mean=mean, variance=var))
upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5
constraints = [lambda samples: samples[..., 0] - upper_bound]
eta = math.pi * 1e-2 # testing non-standard eta
acqf = get_acquisition_function(
acquisition_function_name="qPI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
marginalize_dim=0,
constraints=constraints,
eta=eta,
)
self.assertEqual(acqf, mock_acqf.return_value)
best_feasible_f = compute_best_feasible_objective(
samples=mean,
obj=self.objective(mean),
constraints=constraints,
model=self.model,
objective=self.objective,
X_baseline=self.X_observed,
)
mock_acqf.assert_called_with(
model=self.model,
best_f=best_feasible_f,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
tau=1e-3,
constraints=constraints,
eta=eta,
)
@mock.patch(f"{monte_carlo.__name__}.qNoisyExpectedImprovement")
def test_GetQNEI(self, mock_acqf):
# basic test
n = len(self.X_observed)
mean = torch.arange(n, dtype=torch.double).view(-1, 1)
var = torch.ones_like(mean)
self.model = MockModel(MockPosterior(mean=mean, variance=var))
common_kwargs = {
"model": self.model,
"objective": self.objective,
"X_observed": self.X_observed,
"X_pending": self.X_pending,
"mc_samples": self.mc_samples,
"seed": self.seed,
}
acqf = get_acquisition_function(
acquisition_function_name="qNEI",
**common_kwargs,
marginalize_dim=0,
)
self.assertEqual(acqf, mock_acqf.return_value)
self.assertEqual(mock_acqf.call_count, 1)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertEqual(kwargs["marginalize_dim"], 0)
self.assertEqual(kwargs["cache_root"], True)
# test with cache_root = False
acqf = get_acquisition_function(
acquisition_function_name="qNEI",
**common_kwargs,
marginalize_dim=0,
cache_root=False,
)
self.assertEqual(acqf, mock_acqf.return_value)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(kwargs["cache_root"], False)
# test with non-qmc, no X_pending
common_kwargs.update({"X_pending": None})
acqf = get_acquisition_function(
acquisition_function_name="qNEI",
**common_kwargs,
)
self.assertEqual(mock_acqf.call_count, 3)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
self.assertEqual(kwargs["X_pending"], None)
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
# with constraints
upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5
constraints = [lambda samples: samples[..., 0] - upper_bound]
eta = math.pi * 1e-2 # testing non-standard eta
common_kwargs.update({"X_pending": self.X_pending})
acqf = get_acquisition_function(
acquisition_function_name="qNEI",
**common_kwargs,
marginalize_dim=0,
constraints=constraints,
eta=eta,
)
self.assertEqual(acqf, mock_acqf.return_value)
mock_acqf.assert_called_with(
model=self.model,
X_baseline=self.X_observed,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
prune_baseline=True,
marginalize_dim=0,
cache_root=True,
constraints=constraints,
eta=eta,
)
@mock.patch(f"{logei.__name__}.qLogNoisyExpectedImprovement")
def test_GetQLogNEI(self, mock_acqf):
# basic test
n = len(self.X_observed)
mean = torch.arange(n, dtype=torch.double).view(-1, 1)
var = torch.ones_like(mean)
self.model = MockModel(MockPosterior(mean=mean, variance=var))
common_kwargs = {
"model": self.model,
"objective": self.objective,
"X_observed": self.X_observed,
"X_pending": self.X_pending,
"mc_samples": self.mc_samples,
"seed": self.seed,
}
acqf = get_acquisition_function(
acquisition_function_name="qLogNEI",
**common_kwargs,
marginalize_dim=0,
)
self.assertEqual(acqf, mock_acqf.return_value)
self.assertEqual(mock_acqf.call_count, 1)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertEqual(kwargs["marginalize_dim"], 0)
self.assertEqual(kwargs["cache_root"], True)
# test with cache_root = False
acqf = get_acquisition_function(
acquisition_function_name="qLogNEI",
**common_kwargs,
marginalize_dim=0,
cache_root=False,
)
self.assertEqual(acqf, mock_acqf.return_value)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(kwargs["cache_root"], False)
# test with non-qmc, no X_pending
common_kwargs.update({"X_pending": None})
acqf = get_acquisition_function(
acquisition_function_name="qLogNEI",
**common_kwargs,
)
self.assertEqual(mock_acqf.call_count, 3)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
self.assertEqual(kwargs["X_pending"], None)
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
# with constraints
upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5
constraints = [lambda samples: samples[..., 0] - upper_bound]
eta = math.pi * 1e-2 # testing non-standard eta
common_kwargs.update({"X_pending": self.X_pending})
acqf = get_acquisition_function(
acquisition_function_name="qLogNEI",
**common_kwargs,
marginalize_dim=0,
constraints=constraints,
eta=eta,
)
self.assertEqual(acqf, mock_acqf.return_value)
mock_acqf.assert_called_with(
model=self.model,
X_baseline=self.X_observed,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
prune_baseline=True,
marginalize_dim=0,
cache_root=True,
constraints=constraints,
eta=eta,
)
@mock.patch(f"{monte_carlo.__name__}.qSimpleRegret")
def test_GetQSR(self, mock_acqf):
# basic test
acqf = get_acquisition_function(
acquisition_function_name="qSR",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
)
self.assertEqual(acqf, mock_acqf.return_value)
mock_acqf.assert_called_once_with(
model=self.model,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
# test with non-qmc
acqf = get_acquisition_function(
acquisition_function_name="qSR",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 2)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
@mock.patch(f"{monte_carlo.__name__}.qUpperConfidenceBound")
def test_GetQUCB(self, mock_acqf):
# make sure beta is specified
with self.assertRaises(ValueError):
acqf = get_acquisition_function(
acquisition_function_name="qUCB",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
)
acqf = get_acquisition_function(
acquisition_function_name="qUCB",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
beta=0.3,
)
self.assertEqual(acqf, mock_acqf.return_value)
mock_acqf.assert_called_once_with(
model=self.model,
beta=0.3,
sampler=mock.ANY,
objective=self.objective,
posterior_transform=None,
X_pending=self.X_pending,
)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
# test with different tau, non-qmc
acqf = get_acquisition_function(
acquisition_function_name="qUCB",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
beta=0.2,
)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs["beta"], 0.2)
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 2)
self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
@mock.patch(f"{moo_monte_carlo.__name__}.qExpectedHypervolumeImprovement")
def test_GetQEHVI(self, mock_acqf):
# make sure ref_point is specified
with self.assertRaises(ValueError):
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
Y=self.Y,
)
# make sure Y is specified
with self.assertRaises(ValueError):
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
ref_point=self.ref_point,
)
# posterior transforms are not supported
with self.assertRaises(NotImplementedError):
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
posterior_transform=ScalarizedPosteriorTransform(weights=torch.rand(2)),
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
ref_point=self.ref_point,
)
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
ref_point=self.ref_point,
Y=self.Y,
)
self.assertEqual(acqf, mock_acqf.return_value)
mock_acqf.assert_called_once_with(
constraints=None,
eta=1e-3,
model=self.model,
objective=self.mo_objective,
ref_point=self.ref_point,
partitioning=mock.ANY,
sampler=mock.ANY,
X_pending=self.X_pending,
)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
ref_point=self.ref_point,
Y=self.Y,
)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs["ref_point"], self.ref_point)
sampler = kwargs["sampler"]
self.assertIsInstance(kwargs["objective"], DummyMCMultiOutputObjective)
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 2)
# test that approximate partitioning is used when alpha > 0
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
ref_point=self.ref_point,
Y=self.Y,
alpha=0.1,
)
_, kwargs = mock_acqf.call_args
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, 0.1)
# test constraints
acqf = get_acquisition_function(
acquisition_function_name="qEHVI",
model=self.model,
objective=self.mo_objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
constraints=[lambda Y: Y[..., -1]],
eta=1e-2,
seed=2,
ref_point=self.ref_point,
Y=self.Y,
)
_, kwargs = mock_acqf.call_args
partitioning = kwargs["partitioning"]
self.assertEqual(partitioning.pareto_Y.shape[0], 0)
self.assertEqual(kwargs["eta"], 1e-2)
@mock.patch(f"{moo_monte_carlo.__name__}.qNoisyExpectedHypervolumeImprovement")
def test_GetQNEHVI(self, mock_acqf):
# make sure ref_point is specified
with self.assertRaises(ValueError):
acqf = get_acquisition_function(
acquisition_function_name="qNEHVI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
)
acqf = get_acquisition_function(
acquisition_function_name="qNEHVI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
ref_point=self.ref_point,
)
self.assertEqual(acqf, mock_acqf.return_value)
mock_acqf.assert_called_once_with(
constraints=None,
eta=1e-3,
model=self.model,
X_baseline=self.X_observed,
objective=self.objective,
ref_point=self.ref_point,
sampler=mock.ANY,
prune_baseline=True,
alpha=0.0,
X_pending=self.X_pending,
marginalize_dim=None,
cache_root=True,
)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
sampler = kwargs["sampler"]
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 1)
# test with non-qmc
acqf = get_acquisition_function(
acquisition_function_name="qNEHVI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
ref_point=self.ref_point,
)
self.assertEqual(mock_acqf.call_count, 2)
args, kwargs = mock_acqf.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs["ref_point"], self.ref_point)
sampler = kwargs["sampler"]
ref_point = kwargs["ref_point"]
self.assertEqual(ref_point, self.ref_point)
self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
self.assertEqual(sampler.seed, 2)
# test passing alpha
acqf = get_acquisition_function(
acquisition_function_name="qNEHVI",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=2,
ref_point=self.ref_point,
alpha=0.01,
)
self.assertEqual(mock_acqf.call_count, 3)
args, kwargs = mock_acqf.call_args
self.assertEqual(kwargs["alpha"], 0.01)
def test_GetUnknownAcquisitionFunction(self):
with self.assertRaises(NotImplementedError):
get_acquisition_function(
acquisition_function_name="foo",
model=self.model,
objective=self.objective,
X_observed=self.X_observed,
X_pending=self.X_pending,
mc_samples=self.mc_samples,
seed=self.seed,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from botorch.acquisition.objective import LinearMCObjective
from botorch.acquisition.risk_measures import (
CVaR,
Expectation,
RiskMeasureMCObjective,
VaR,
WorstCase,
)
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
class NotSoAbstractRiskMeasure(RiskMeasureMCObjective):
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
prepared_samples = self._prepare_samples(samples)
return prepared_samples.sum(dim=-1)
class TestRiskMeasureMCObjective(BotorchTestCase):
def test_risk_measure_mc_objective(self):
# abstract raises
with self.assertRaises(TypeError):
RiskMeasureMCObjective(n_w=3)
for dtype in (torch.float, torch.double):
samples = torch.tensor(
[[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]],
device=self.device,
dtype=dtype,
)
obj = NotSoAbstractRiskMeasure(n_w=3)
# MO samples without weights
with self.assertRaises(RuntimeError):
obj(torch.ones(3, 2, device=self.device, dtype=dtype))
# test _prepare_samples
expected_samples = torch.tensor(
[[[1.0, 0.5, 2.0], [3.0, 1.0, 5.0]]],
device=self.device,
dtype=dtype,
)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, expected_samples))
# test batches
samples = torch.rand(5, 3, 6, 1, device=self.device, dtype=dtype)
expected_samples = samples.view(5, 3, 2, 3)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, expected_samples))
# negating with preprocessing function.
obj = NotSoAbstractRiskMeasure(
n_w=3,
preprocessing_function=LinearMCObjective(
weights=torch.tensor([-1.0], device=self.device, dtype=dtype)
),
)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, -expected_samples))
# MO with weights
obj = NotSoAbstractRiskMeasure(
n_w=2,
preprocessing_function=LinearMCObjective(
weights=torch.tensor([1.0, 2.0], device=self.device, dtype=dtype)
),
)
samples = torch.tensor(
[
[
[1.0, 2.0],
[0.5, 0.7],
[2.0, 1.5],
[3.0, 4.0],
[1.0, 0.0],
[5.0, 3.0],
]
],
device=self.device,
dtype=dtype,
)
expected_samples = torch.tensor(
[[[5.0, 1.9], [5.0, 11.0], [1.0, 11.0]]],
device=self.device,
dtype=dtype,
)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, expected_samples))
class TestCVaR(BotorchTestCase):
def test_cvar(self):
obj = CVaR(alpha=0.5, n_w=3)
self.assertEqual(obj.alpha_idx, 1)
with self.assertRaises(ValueError):
CVaR(alpha=3, n_w=3)
for dtype in (torch.float, torch.double):
obj = CVaR(alpha=0.5, n_w=3)
samples = torch.tensor(
[[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[0.75, 2.0]], device=self.device, dtype=dtype),
)
)
# w/ preprocessing function
obj = CVaR(
alpha=0.5,
n_w=3,
preprocessing_function=LinearMCObjective(
weights=torch.tensor([-1.0], device=self.device, dtype=dtype)
),
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[-1.5, -4.0]], device=self.device, dtype=dtype),
)
)
class TestVaR(BotorchTestCase):
def test_var(self):
for dtype in (torch.float, torch.double):
obj = VaR(alpha=0.5, n_w=3)
samples = torch.tensor(
[[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[1.0, 3.0]], device=self.device, dtype=dtype),
)
)
# w/ preprocessing function
obj = VaR(
alpha=0.5,
n_w=3,
preprocessing_function=LinearMCObjective(
weights=torch.tensor([-1.0], device=self.device, dtype=dtype)
),
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[-1.0, -3.0]], device=self.device, dtype=dtype),
)
)
class TestWorstCase(BotorchTestCase):
def test_worst_case(self):
for dtype in (torch.float, torch.double):
obj = WorstCase(n_w=3)
samples = torch.tensor(
[[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[0.5, 1.0]], device=self.device, dtype=dtype),
)
)
# w/ preprocessing function
obj = WorstCase(
n_w=3,
preprocessing_function=LinearMCObjective(
weights=torch.tensor([-1.0], device=self.device, dtype=dtype)
),
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[-2.0, -5.0]], device=self.device, dtype=dtype),
)
)
class TestExpectation(BotorchTestCase):
def test_expectation(self):
for dtype in (torch.float, torch.double):
obj = Expectation(n_w=3)
samples = torch.tensor(
[[[1.0], [0.5], [1.5], [3.0], [1.0], [5.0]]],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[1.0, 3.0]], device=self.device, dtype=dtype),
)
)
# w/ preprocessing function
samples = torch.tensor(
[
[
[1.0, 3.0],
[0.5, 1.0],
[1.5, 2.0],
[3.0, 1.0],
[1.0, 2.0],
[5.0, 3.0],
]
],
device=self.device,
dtype=dtype,
)
obj = Expectation(
n_w=3,
preprocessing_function=LinearMCObjective(
weights=torch.tensor([-1.0, 2.0], device=self.device, dtype=dtype)
),
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor([[3.0, 1.0]], device=self.device, dtype=dtype),
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.acquisition.acquisition import (
AcquisitionFunction,
MCSamplerMixin,
MultiModelAcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.models.model import ModelDict
from botorch.sampling.normal import IIDNormalSampler
from botorch.sampling.stochastic_samplers import StochasticSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class DummyMCAcqf(AcquisitionFunction, MCSamplerMixin):
def __init__(self, model, sampler):
r"""Dummy acqf for testing MCSamplerMixin."""
super().__init__(model)
MCSamplerMixin.__init__(self, sampler)
def forward(self, X):
raise NotImplementedError
class DummyMultiModelAcqf(MultiModelAcquisitionFunction):
def forward(self, X):
raise NotImplementedError
class TestAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
AcquisitionFunction()
class TestOneShotAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
OneShotAcquisitionFunction()
class TestMCSamplerMixin(BotorchTestCase):
def test_mc_sampler_mixin(self):
mm = MockModel(MockPosterior(samples=torch.rand(1, 2)))
acqf = DummyMCAcqf(model=mm, sampler=None)
self.assertIsNone(acqf.sampler)
samples = acqf.get_posterior_samples(mm._posterior)
self.assertEqual(samples.shape, torch.Size([512, 1, 2]))
self.assertIsInstance(acqf.sampler, StochasticSampler)
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
acqf.sampler = sampler
self.assertIs(acqf.sampler, sampler)
class TestMultiModelAcquisitionFunction(BotorchTestCase):
def test_multi_model_acquisition_function(self):
model_dict = ModelDict(
m1=MockModel(MockPosterior()),
m2=MockModel(MockPosterior()),
)
with self.assertRaises(TypeError):
MultiModelAcquisitionFunction(model_dict=model_dict)
acqf = DummyMultiModelAcqf(model_dict=model_dict)
self.assertIs(acqf.model_dict, model_dict)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from botorch.acquisition import qAnalyticProbabilityOfImprovement
from botorch.acquisition.analytic import (
_compute_log_prob_feas,
_ei_helper,
_log_ei_helper,
AnalyticAcquisitionFunction,
ConstrainedExpectedImprovement,
ExpectedImprovement,
LogConstrainedExpectedImprovement,
LogExpectedImprovement,
LogNoisyExpectedImprovement,
LogProbabilityOfImprovement,
NoisyExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
ScalarizedPosteriorMean,
UpperConfidenceBound,
)
from botorch.acquisition.objective import (
IdentityMCObjective,
ScalarizedPosteriorTransform,
)
from botorch.exceptions import UnsupportedError
from botorch.models import FixedNoiseGP, SingleTaskGP
from botorch.posteriors import GPyTorchPosterior
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
NEI_NOISE = [
[-0.099],
[-0.004],
[0.227],
[-0.182],
[0.018],
[0.334],
[-0.270],
[0.156],
[-0.237],
[0.052],
]
class DummyAnalyticAcquisitionFunction(AnalyticAcquisitionFunction):
def forward(self, X):
pass
class TestAnalyticAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
AnalyticAcquisitionFunction()
# raise if model is multi-output, but no posterior transform is given
mean = torch.zeros(1, 2)
variance = torch.ones(1, 2)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
with self.assertRaises(UnsupportedError):
DummyAnalyticAcquisitionFunction(model=mm)
class TestExpectedImprovement(BotorchTestCase):
def test_expected_improvement(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([[-0.5]], device=self.device, dtype=dtype)
variance = torch.ones(1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
# basic test
module = ExpectedImprovement(model=mm, best_f=0.0)
log_module = LogExpectedImprovement(model=mm, best_f=0.0)
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
ei, log_ei = module(X), log_module(X)
ei_expected = torch.tensor(0.19780, device=self.device, dtype=dtype)
self.assertAllClose(ei, ei_expected, atol=1e-4)
self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4)
# test maximize
module = ExpectedImprovement(model=mm, best_f=0.0, maximize=False)
log_module = LogExpectedImprovement(model=mm, best_f=0.0, maximize=False)
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
ei, log_ei = module(X), log_module(X)
ei_expected = torch.tensor(0.6978, device=self.device, dtype=dtype)
self.assertAllClose(ei, ei_expected, atol=1e-4)
self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4)
with self.assertRaises(UnsupportedError):
module.set_X_pending(None)
with self.assertRaises(UnsupportedError):
log_module.set_X_pending(None)
# test posterior transform (single-output)
mean = torch.tensor([0.5], device=self.device, dtype=dtype)
covar = torch.tensor([[0.16]], device=self.device, dtype=dtype)
mvn = MultivariateNormal(mean, covar)
p = GPyTorchPosterior(mvn)
mm = MockModel(p)
weights = torch.tensor([0.5], device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
ei = ExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
log_ei = LogExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(1, 2, device=self.device, dtype=dtype)
ei_expected = torch.tensor(0.2601, device=self.device, dtype=dtype)
self.assertAllClose(ei(X), ei_expected, atol=1e-4)
self.assertAllClose(log_ei(X), ei_expected.log(), atol=1e-4)
# test posterior transform (multi-output)
mean = torch.tensor([[-0.25, 0.5]], device=self.device, dtype=dtype)
covar = torch.tensor(
[[[0.5, 0.125], [0.125, 0.5]]], device=self.device, dtype=dtype
)
mvn = MultitaskMultivariateNormal(mean, covar)
p = GPyTorchPosterior(mvn)
mm = MockModel(p)
weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
ei = ExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
log_ei = LogExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(1, 2, device=self.device, dtype=dtype)
ei_expected = torch.tensor([0.6910], device=self.device, dtype=dtype)
self.assertAllClose(ei(X), ei_expected, atol=1e-4)
self.assertAllClose(log_ei(X), ei_expected.log(), atol=1e-4)
# making sure we compare the lower branch of _log_ei_helper to _ei_helper
z = torch.tensor(-2.13, dtype=dtype, device=self.device)
self.assertAllClose(_log_ei_helper(z), _ei_helper(z).log(), atol=1e-6)
# numerical stress test for log EI
digits = 100 if dtype == torch.float64 else 20
zero = torch.tensor([0], dtype=dtype, device=self.device)
ten = torch.tensor(10, dtype=dtype, device=self.device)
digits_tensor = torch.arange(0, digits, dtype=dtype, device=self.device)
large_z = ten ** (digits_tensor)
small_z = ten ** (-digits_tensor)
# flipping the appropriate tensors so that elements are in increasing order
test_z = [-large_z.flip(-1), -small_z, zero, small_z.flip(-1), large_z]
for z in test_z:
z.requires_grad = True
y = _log_ei_helper(z) # noqa
# check that y isn't NaN of Inf
self.assertFalse(y.isnan().any())
self.assertFalse(y.isinf().any())
# function values should increase with z
self.assertTrue((y.diff() >= 0).all())
# lets check the backward pass
y.sum().backward()
# check that gradients aren't NaN of Inf
g = z.grad
self.assertFalse(g.isnan().any())
self.assertFalse(g.isinf().any())
self.assertTrue((g >= 0).all()) # gradient is positive for all z
with self.assertRaises(TypeError):
_log_ei_helper(z.to(dtype=torch.float16))
def test_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([-0.5, 0.0, 0.5], device=self.device, dtype=dtype).view(
3, 1, 1
)
variance = torch.ones(3, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
module = ExpectedImprovement(model=mm, best_f=0.0)
log_module = LogExpectedImprovement(model=mm, best_f=0.0)
X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) # dummy
ei, log_ei = module(X), log_module(X)
ei_expected = torch.tensor(
[0.19780, 0.39894, 0.69780], device=self.device, dtype=dtype
)
self.assertAllClose(ei, ei_expected, atol=1e-4)
self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4)
# check for proper error if multi-output model
mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
variance2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
with self.assertRaises(UnsupportedError):
ExpectedImprovement(model=mm2, best_f=0.0)
with self.assertRaises(UnsupportedError):
LogExpectedImprovement(model=mm2, best_f=0.0)
# test posterior transform (single-output)
mean = torch.tensor([[[0.5]], [[0.25]]], device=self.device, dtype=dtype)
covar = torch.tensor(
[[[[0.16]]], [[[0.125]]]], device=self.device, dtype=dtype
)
mvn = MultivariateNormal(mean, covar)
p = GPyTorchPosterior(mvn)
mm = MockModel(p)
weights = torch.tensor([0.5], device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
ei = ExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
log_ei = LogExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(2, 1, 2, device=self.device, dtype=dtype)
ei_expected = torch.tensor(
[[0.2601], [0.1500]], device=self.device, dtype=dtype
)
self.assertAllClose(ei(X), ei_expected, atol=1e-4)
self.assertAllClose(log_ei(X), ei(X).log(), atol=1e-4)
# test posterior transform (multi-output)
mean = torch.tensor(
[[[-0.25, 0.5]], [[0.2, -0.1]]], device=self.device, dtype=dtype
)
covar = torch.tensor(
[[[0.5, 0.125], [0.125, 0.5]], [[0.25, -0.1], [-0.1, 0.25]]],
device=self.device,
dtype=dtype,
)
mvn = MultitaskMultivariateNormal(mean, covar)
p = GPyTorchPosterior(mvn)
mm = MockModel(p)
weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
ei = ExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
log_ei = LogExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(2, 1, 2, device=self.device, dtype=dtype)
ei_expected = torch.tensor(
[0.6910, 0.5371], device=self.device, dtype=dtype
)
self.assertAllClose(ei(X), ei_expected, atol=1e-4)
self.assertAllClose(log_ei(X), ei_expected.log(), atol=1e-4)
# test bad posterior transform class
with self.assertRaises(UnsupportedError):
ExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=IdentityMCObjective()
)
with self.assertRaises(UnsupportedError):
LogExpectedImprovement(
model=mm, best_f=0.0, posterior_transform=IdentityMCObjective()
)
class TestPosteriorMean(BotorchTestCase):
def test_posterior_mean(self):
for dtype in (torch.float, torch.double):
mean = torch.rand(3, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
module = PosteriorMean(model=mm)
X = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
pm = module(X)
self.assertTrue(torch.equal(pm, mean.view(-1)))
module = PosteriorMean(model=mm, maximize=False)
X = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
pm = module(X)
self.assertTrue(torch.equal(pm, -mean.view(-1)))
# check for proper error if multi-output model
mean2 = torch.rand(1, 2, device=self.device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2))
with self.assertRaises(UnsupportedError):
PosteriorMean(model=mm2)
def test_posterior_mean_batch(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([-0.5, 0.0, 0.5], device=self.device, dtype=dtype).view(
3, 1, 1
)
mm = MockModel(MockPosterior(mean=mean))
module = PosteriorMean(model=mm)
X = torch.empty(3, 1, 1, device=self.device, dtype=dtype)
pm = module(X)
self.assertTrue(torch.equal(pm, mean.view(-1)))
# check for proper error if multi-output model
mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2))
with self.assertRaises(UnsupportedError):
PosteriorMean(model=mm2)
class TestProbabilityOfImprovement(BotorchTestCase):
def test_probability_of_improvement(self):
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
variance = torch.ones(1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = {"model": mm, "best_f": 1.96}
module = ProbabilityOfImprovement(**kwargs)
log_module = LogProbabilityOfImprovement(**kwargs)
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
pi, log_pi = module(X), log_module(X)
pi_expected = torch.tensor(0.0250, device=self.device, dtype=dtype)
self.assertAllClose(pi, pi_expected, atol=1e-4)
self.assertAllClose(log_pi.exp(), pi)
kwargs = {"model": mm, "best_f": 1.96, "maximize": False}
module = ProbabilityOfImprovement(**kwargs)
log_module = LogProbabilityOfImprovement(**kwargs)
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
pi, log_pi = module(X), log_module(X)
pi_expected = torch.tensor(0.9750, device=self.device, dtype=dtype)
self.assertAllClose(pi, pi_expected, atol=1e-4)
self.assertAllClose(log_pi.exp(), pi)
# check for proper error if multi-output model
mean2 = torch.rand(1, 2, device=self.device, dtype=dtype)
variance2 = torch.ones_like(mean2)
mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
with self.assertRaises(UnsupportedError):
ProbabilityOfImprovement(model=mm2, best_f=0.0)
with self.assertRaises(UnsupportedError):
LogProbabilityOfImprovement(model=mm2, best_f=0.0)
def test_probability_of_improvement_batch(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([0.0, 0.67449], device=self.device, dtype=dtype).view(
2, 1, 1
)
variance = torch.ones_like(mean)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
module = ProbabilityOfImprovement(model=mm, best_f=0.0)
log_module = LogProbabilityOfImprovement(model=mm, best_f=0.0)
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
pi, log_pi = module(X), log_module(X)
pi_expected = torch.tensor([0.5, 0.75], device=self.device, dtype=dtype)
self.assertAllClose(pi, pi_expected, atol=1e-4)
self.assertAllClose(log_pi.exp(), pi)
# check for proper error if multi-output model
mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
variance2 = torch.ones_like(mean2)
mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
with self.assertRaises(UnsupportedError):
ProbabilityOfImprovement(model=mm2, best_f=0.0)
with self.assertRaises(UnsupportedError):
LogProbabilityOfImprovement(model=mm2, best_f=0.0)
class TestqAnalyticProbabilityOfImprovement(BotorchTestCase):
def test_q_analytic_probability_of_improvement(self):
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, device=self.device, dtype=dtype)
cov = torch.eye(n=1, device=self.device, dtype=dtype)
mvn = MultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
# basic test
module = qAnalyticProbabilityOfImprovement(model=mm, best_f=1.96)
X = torch.rand(1, 2, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor(0.0250, device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# basic test, maximize
module = qAnalyticProbabilityOfImprovement(
model=mm, best_f=1.96, maximize=False
)
X = torch.rand(1, 2, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor(0.9750, device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# basic test, posterior transform (single-output)
mean = torch.ones(1, device=self.device, dtype=dtype)
cov = torch.eye(n=1, device=self.device, dtype=dtype)
mvn = MultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
weights = torch.tensor([0.5], device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
module = qAnalyticProbabilityOfImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(1, 2, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor(0.8413, device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# basic test, posterior transform (multi-output)
mean = torch.ones(1, 2, device=self.device, dtype=dtype)
cov = torch.eye(n=2, device=self.device, dtype=dtype).unsqueeze(0)
mvn = MultitaskMultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
weights = torch.ones(2, device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
module = qAnalyticProbabilityOfImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(1, 1, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor(0.9214, device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# basic test, q = 2
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.eye(n=2, device=self.device, dtype=dtype)
mvn = MultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
module = qAnalyticProbabilityOfImprovement(model=mm, best_f=1.96)
X = torch.zeros(2, 2, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor(0.049375, device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
def test_batch_q_analytic_probability_of_improvement(self):
for dtype in (torch.float, torch.double):
# test batch mode
mean = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype)
cov = (
torch.eye(n=1, device=self.device, dtype=dtype)
.unsqueeze(0)
.repeat(2, 1, 1)
)
mvn = MultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
module = qAnalyticProbabilityOfImprovement(model=mm, best_f=0)
X = torch.rand(2, 1, 1, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor([0.5, 0.8413], device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# test batched model and best_f values
mean = torch.zeros(2, 1, device=self.device, dtype=dtype)
cov = (
torch.eye(n=1, device=self.device, dtype=dtype)
.unsqueeze(0)
.repeat(2, 1, 1)
)
mvn = MultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
best_f = torch.tensor([0.0, -1.0], device=self.device, dtype=dtype)
module = qAnalyticProbabilityOfImprovement(model=mm, best_f=best_f)
X = torch.rand(2, 1, 1, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor([[0.5, 0.8413]], device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# test batched model, output transform (single output)
mean = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype)
cov = (
torch.eye(n=1, device=self.device, dtype=dtype)
.unsqueeze(0)
.repeat(2, 1, 1)
)
mvn = MultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
weights = torch.tensor([0.5], device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
module = qAnalyticProbabilityOfImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(2, 1, 2, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor([0.5, 0.8413], device=self.device, dtype=dtype)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# test batched model, output transform (multiple output)
mean = torch.tensor(
[[[1.0, 1.0]], [[0.0, 1.0]]], device=self.device, dtype=dtype
)
cov = (
torch.eye(n=2, device=self.device, dtype=dtype)
.unsqueeze(0)
.repeat(2, 1, 1)
)
mvn = MultitaskMultivariateNormal(mean=mean, covariance_matrix=cov)
posterior = GPyTorchPosterior(mvn)
mm = MockModel(posterior)
weights = torch.ones(2, device=self.device, dtype=dtype)
transform = ScalarizedPosteriorTransform(weights)
module = qAnalyticProbabilityOfImprovement(
model=mm, best_f=0.0, posterior_transform=transform
)
X = torch.rand(2, 1, 2, device=self.device, dtype=dtype)
pi = module(X)
pi_expected = torch.tensor(
[0.9214, 0.7602], device=self.device, dtype=dtype
)
self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
# test bad posterior transform class
with self.assertRaises(UnsupportedError):
qAnalyticProbabilityOfImprovement(
model=mm, best_f=0.0, posterior_transform=IdentityMCObjective()
)
class TestUpperConfidenceBound(BotorchTestCase):
def test_upper_confidence_bound(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([[0.5]], device=self.device, dtype=dtype)
variance = torch.tensor([[1.0]], device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
module = UpperConfidenceBound(model=mm, beta=1.0)
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
ucb = module(X)
ucb_expected = torch.tensor(1.5, device=self.device, dtype=dtype)
self.assertAllClose(ucb, ucb_expected, atol=1e-4)
module = UpperConfidenceBound(model=mm, beta=1.0, maximize=False)
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
ucb = module(X)
ucb_expected = torch.tensor(0.5, device=self.device, dtype=dtype)
self.assertAllClose(ucb, ucb_expected, atol=1e-4)
# check for proper error if multi-output model
mean2 = torch.rand(1, 2, device=self.device, dtype=dtype)
variance2 = torch.rand(1, 2, device=self.device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
with self.assertRaises(UnsupportedError):
UpperConfidenceBound(model=mm2, beta=1.0)
def test_upper_confidence_bound_batch(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([0.0, 0.5], device=self.device, dtype=dtype).view(
2, 1, 1
)
variance = torch.tensor([1.0, 4.0], device=self.device, dtype=dtype).view(
2, 1, 1
)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
module = UpperConfidenceBound(model=mm, beta=1.0)
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
ucb = module(X)
ucb_expected = torch.tensor([1.0, 2.5], device=self.device, dtype=dtype)
self.assertAllClose(ucb, ucb_expected, atol=1e-4)
# check for proper error if multi-output model
mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
variance2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
with self.assertRaises(UnsupportedError):
UpperConfidenceBound(model=mm2, beta=1.0)
class TestConstrainedExpectedImprovement(BotorchTestCase):
def test_constrained_expected_improvement(self):
for dtype in (torch.float, torch.double):
# one constraint
mean = torch.tensor(
[[-0.5, 0.0]], device=self.device, dtype=dtype
).unsqueeze(dim=-2)
variance = torch.ones(1, 2, device=self.device, dtype=dtype).unsqueeze(
dim=-2
)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = {
"model": mm,
"best_f": 0.0,
"objective_index": 0,
"constraints": {1: [None, 0]},
}
module = ConstrainedExpectedImprovement(**kwargs)
log_module = LogConstrainedExpectedImprovement(**kwargs)
# test initialization
for k in [
"con_lower_inds",
"con_upper_inds",
"con_both_inds",
"con_both",
"con_lower",
"con_upper",
]:
self.assertIn(k, module._buffers)
self.assertIn(k, log_module._buffers)
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
ei = module(X)
ei_expected_unconstrained = torch.tensor(
[0.19780], device=self.device, dtype=dtype
)
ei_expected = ei_expected_unconstrained * 0.5
self.assertAllClose(ei, ei_expected, atol=1e-4)
log_ei = log_module(X)
self.assertAllClose(log_ei, ei.log(), atol=1e-5)
# testing LogCEI and CEI for lower, upper, and simultaneous bounds
for bounds in [[None, 0], [0, None], [0, 1]]:
kwargs["constraints"] = {1: bounds}
module = ConstrainedExpectedImprovement(**kwargs)
log_module = LogConstrainedExpectedImprovement(**kwargs)
ei, log_ei = module(X), log_module(X)
self.assertAllClose(log_ei, ei.log(), atol=1e-5)
constructors = [
ConstrainedExpectedImprovement,
LogConstrainedExpectedImprovement,
]
for constructor in constructors:
# check that error raised if no constraints
with self.assertRaises(ValueError):
module = constructor(
model=mm, best_f=0.0, objective_index=0, constraints={}
)
# check that error raised if objective is a constraint
with self.assertRaises(ValueError):
module = constructor(
model=mm,
best_f=0.0,
objective_index=0,
constraints={0: [None, 0]},
)
# check that error raised if constraint lower > upper
with self.assertRaises(ValueError):
module = constructor(
model=mm, best_f=0.0, objective_index=0, constraints={0: [1, 0]}
)
# three constraints
N = torch.distributions.Normal(loc=0.0, scale=1.0)
a = N.icdf(torch.tensor(0.75)) # get a so that P(-a <= N <= a) = 0.5
mean = torch.tensor(
[[-0.5, 0.0, 5.0, 0.0]], device=self.device, dtype=dtype
).unsqueeze(dim=-2)
variance = torch.ones(1, 4, device=self.device, dtype=dtype).unsqueeze(
dim=-2
)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = {
"model": mm,
"best_f": 0.0,
"objective_index": 0,
"constraints": {1: [None, 0], 2: [5.0, None], 3: [-a, a]},
}
module = ConstrainedExpectedImprovement(**kwargs)
log_module = LogConstrainedExpectedImprovement(**kwargs)
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
ei = module(X)
ei_expected_unconstrained = torch.tensor(
[0.19780], device=self.device, dtype=dtype
)
ei_expected = ei_expected_unconstrained * 0.5 * 0.5 * 0.5
self.assertAllClose(ei, ei_expected, atol=1e-4)
# testing log module with regular implementation
log_ei = log_module(X)
self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4)
# test maximize
kwargs = {
"model": mm,
"best_f": 0.0,
"objective_index": 0,
"constraints": {1: [None, 0]},
"maximize": False,
}
module_min = ConstrainedExpectedImprovement(**kwargs)
log_module_min = LogConstrainedExpectedImprovement(**kwargs)
ei_min = module_min(X)
ei_expected_unconstrained_min = torch.tensor(
[0.6978], device=self.device, dtype=dtype
)
ei_expected_min = ei_expected_unconstrained_min * 0.5
self.assertAllClose(ei_min, ei_expected_min, atol=1e-4)
log_ei_min = log_module_min(X)
self.assertAllClose(log_ei_min, ei_min.log(), atol=1e-4)
# test invalid onstraints
for constructor in constructors:
with self.assertRaises(ValueError):
constructor(
model=mm,
best_f=0.0,
objective_index=0,
constraints={1: [1.0, -1.0]},
)
# numerical stress test for _compute_log_prob_feas, which gets added to
# log_ei in the forward pass, a quantity we already tested above
# the limits here are determined by the largest power of ten x, such that
# x - (b - a) < x
# evaluates to true. In this test, the bounds are a, b = -digits, digits.
digits = 10 if dtype == torch.float64 else 5
zero = torch.tensor([0], dtype=dtype, device=self.device)
ten = torch.tensor(10, dtype=dtype, device=self.device)
digits_tensor = 1 + torch.arange(
-digits, digits, dtype=dtype, device=self.device
)
X_positive = ten ** (digits_tensor)
# flipping -X_positive so that elements are in increasing order
means = torch.cat((-X_positive.flip(-1), zero, X_positive)).unsqueeze(-1)
means.requires_grad = True
log_module = LogConstrainedExpectedImprovement(
model=mm,
best_f=0.0,
objective_index=1,
constraints={0: [-5, 5]},
)
log_prob = _compute_log_prob_feas(
log_module, means=means, sigmas=torch.ones_like(means)
)
log_prob.sum().backward()
self.assertFalse(log_prob.isnan().any())
self.assertFalse(log_prob.isinf().any())
self.assertFalse(means.grad.isnan().any())
self.assertFalse(means.grad.isinf().any())
# probability of feasibility increases until X = 0, decreases from there on
prob_diff = log_prob.diff()
k = len(X_positive)
eps = 1e-6 if dtype == torch.float32 else 1e-15
self.assertTrue((prob_diff[:k] > -eps).all())
self.assertTrue((means.grad[:k] > -eps).all())
# probability has stationary point at zero
mean_grad_at_zero = means.grad[len(X_positive)]
self.assertTrue(
torch.allclose(mean_grad_at_zero, torch.zeros_like(mean_grad_at_zero))
)
# probability increases again
self.assertTrue((prob_diff[-k:] < eps).all())
self.assertTrue((means.grad[-k:] < eps).all())
def test_constrained_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor(
[[-0.5, 0.0, 5.0, 0.0], [0.0, 0.0, 5.0, 0.0], [0.5, 0.0, 5.0, 0.0]],
device=self.device,
dtype=dtype,
).unsqueeze(dim=-2)
variance = torch.ones(3, 4, device=self.device, dtype=dtype).unsqueeze(
dim=-2
)
N = torch.distributions.Normal(loc=0.0, scale=1.0)
a = N.icdf(torch.tensor(0.75)) # get a so that P(-a <= N <= a) = 0.5
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = {
"model": mm,
"best_f": 0.0,
"objective_index": 0,
"constraints": {1: [None, 0], 2: [5.0, None], 3: [-a, a]},
}
module = ConstrainedExpectedImprovement(**kwargs)
log_module = LogConstrainedExpectedImprovement(**kwargs)
X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) # dummy
ei, log_ei = module(X), log_module(X)
self.assertTrue(ei.shape == torch.Size([3]))
self.assertTrue(log_ei.shape == torch.Size([3]))
ei_expected_unconstrained = torch.tensor(
[0.19780, 0.39894, 0.69780], device=self.device, dtype=dtype
)
ei_expected = ei_expected_unconstrained * 0.5 * 0.5 * 0.5
self.assertAllClose(ei, ei_expected, atol=1e-4)
self.assertAllClose(log_ei, ei.log(), atol=1e-4)
class TestNoisyExpectedImprovement(BotorchTestCase):
def _get_model(self, dtype=torch.float):
state_dict = {
"mean_module.raw_constant": torch.tensor([-0.0066]),
"covar_module.raw_outputscale": torch.tensor(1.0143),
"covar_module.base_kernel.raw_lengthscale": torch.tensor([[-0.99]]),
"covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor(
3.0
),
"covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0),
"covar_module.outputscale_prior.concentration": torch.tensor(2.0),
"covar_module.outputscale_prior.rate": torch.tensor(0.1500),
}
train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
-1
)
train_y = torch.sin(train_x * (2 * math.pi))
noise = torch.tensor(NEI_NOISE, device=self.device, dtype=dtype)
train_y += noise
train_yvar = torch.full_like(train_y, 0.25**2)
model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar)
model.load_state_dict(state_dict)
model.to(train_x)
model.eval()
return model
def test_noisy_expected_improvement(self):
for dtype in (torch.float, torch.double):
model = self._get_model(dtype=dtype)
X_observed = model.train_inputs[0]
nfan = 5
nEI = NoisyExpectedImprovement(model, X_observed, num_fantasies=nfan)
LogNEI = LogNoisyExpectedImprovement(model, X_observed, num_fantasies=nfan)
# before assigning, check that the attributes exist
self.assertTrue(hasattr(LogNEI, "model"))
self.assertTrue(hasattr(LogNEI, "best_f"))
self.assertTrue(isinstance(LogNEI.model, FixedNoiseGP))
LogNEI.model = nEI.model # let the two share their values and fantasies
LogNEI.best_f = nEI.best_f
X_test = torch.tensor(
[[[0.25]], [[0.75]]],
device=X_observed.device,
dtype=dtype,
)
X_test_log = X_test.clone()
X_test.requires_grad = True
X_test_log.requires_grad = True
val = nEI(X_test)
# testing logNEI yields the same result (also checks dtype)
log_val = LogNEI(X_test_log)
exp_log_val = log_val.exp()
# notably, val[1] is usually zero in this test, which is precisely what
# gives rise to problems during optimization, and what logNEI avoids
# since it generally takes a large negative number (<-2000) and has
# strong gradient signals in this regime.
rtol = 1e-12 if dtype == torch.double else 1e-6
atol = rtol
self.assertAllClose(exp_log_val, val, atol=atol, rtol=rtol)
# test basics
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.device.type, X_observed.device.type)
self.assertEqual(val.shape, torch.Size([2]))
# test values
self.assertGreater(val[0].item(), 8e-5)
self.assertLess(val[1].item(), 1e-6)
# test gradient
val.sum().backward()
self.assertGreater(X_test.grad[0].abs().item(), 8e-6)
# testing gradient through exp of log computation
exp_log_val.sum().backward()
# testing that first gradient element coincides. The second is in the
# regime where the naive implementation looses accuracy.
atol = 2e-5 if dtype == torch.float32 else 1e-12
rtol = atol
self.assertTrue(
torch.allclose(X_test.grad[0], X_test_log.grad[0], atol=atol, rtol=rtol)
)
# test non-FixedNoiseGP model
other_model = SingleTaskGP(X_observed, model.train_targets.unsqueeze(-1))
for constructor in (
NoisyExpectedImprovement,
LogNoisyExpectedImprovement,
):
with self.assertRaises(UnsupportedError):
constructor(other_model, X_observed, num_fantasies=5)
# Test constructor with minimize
acqf = constructor(model, X_observed, num_fantasies=5, maximize=False)
# test evaluation without gradients enabled
with torch.no_grad():
acqf(X_test)
# testing gradients are only propagated if X_observed requires them
# i.e. kernel hyper-parameters are not tracked through to best_f
X_observed.requires_grad = False
acqf = constructor(model, X_observed, num_fantasies=5)
self.assertFalse(acqf.best_f.requires_grad)
X_observed.requires_grad = True
acqf = constructor(model, X_observed, num_fantasies=5)
self.assertTrue(acqf.best_f.requires_grad)
class TestScalarizedPosteriorMean(BotorchTestCase):
def test_scalarized_posterior_mean(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor([[0.25], [0.5]], device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
weights = torch.tensor([0.5, 1.0], device=self.device, dtype=dtype)
module = ScalarizedPosteriorMean(model=mm, weights=weights)
X = torch.empty(1, 1, device=self.device, dtype=dtype)
pm = module(X)
self.assertTrue(
torch.allclose(pm, (mean.squeeze(-1) * module.weights).sum(dim=-1))
)
def test_scalarized_posterior_mean_batch(self):
for dtype in (torch.float, torch.double):
mean = torch.tensor(
[[-0.5, 1.0], [0.0, 1.0], [0.5, 1.0]], device=self.device, dtype=dtype
).view(3, 2, 1)
mm = MockModel(MockPosterior(mean=mean))
weights = torch.tensor([0.5, 1.0], device=self.device, dtype=dtype)
module = ScalarizedPosteriorMean(model=mm, weights=weights)
X = torch.empty(3, 1, 1, device=self.device, dtype=dtype)
pm = module(X)
self.assertTrue(
torch.allclose(pm, (mean.squeeze(-1) * module.weights).sum(dim=-1))
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
from typing import Optional
import torch
from botorch import settings
from botorch.acquisition import LearnedObjective
from botorch.acquisition.objective import (
ConstrainedMCObjective,
ExpectationPosteriorTransform,
GenericMCObjective,
IdentityMCObjective,
LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN,
LinearMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
ScalarizedPosteriorTransform,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import _get_single_precision_warning, InputDataWarning
from botorch.models.deterministic import PosteriorMeanModel
from botorch.models.pairwise_gp import PairwiseGP
from botorch.models.transforms.input import Normalize
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils import apply_constraints
from botorch.utils.testing import _get_test_posterior, BotorchTestCase
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from linear_operator.operators.dense_linear_operator import to_linear_operator
from torch import Tensor
def generic_obj_deprecated(samples: Tensor) -> Tensor:
return torch.log(torch.sum(samples**2, dim=-1))
def generic_obj(samples: Tensor, X=None) -> Tensor:
return generic_obj_deprecated(samples)
def infeasible_con(samples: Tensor) -> Tensor:
return torch.ones(samples.shape[0:-1], device=samples.device, dtype=samples.dtype)
def feasible_con(samples: Tensor) -> Tensor:
return -(
torch.ones(samples.shape[0:-1], device=samples.device, dtype=samples.dtype)
)
class TestPosteriorTransform(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
PosteriorTransform()
class TestScalarizedPosteriorTransform(BotorchTestCase):
def test_scalarized_posterior_transform(self):
for batch_shape, m, dtype in itertools.product(
([], [3]), (1, 2), (torch.float, torch.double)
):
offset = torch.rand(1).item()
weights = torch.randn(m, device=self.device, dtype=dtype)
obj = ScalarizedPosteriorTransform(weights=weights, offset=offset)
posterior = _get_test_posterior(
batch_shape, m=m, device=self.device, dtype=dtype
)
mean, covar = (
posterior.distribution.mean,
posterior.distribution.covariance_matrix,
)
new_posterior = obj(posterior)
exp_size = torch.Size(batch_shape + [1, 1])
self.assertEqual(new_posterior.mean.shape, exp_size)
new_mean_exp = offset + mean @ weights
self.assertAllClose(new_posterior.mean[..., -1], new_mean_exp)
self.assertEqual(new_posterior.variance.shape, exp_size)
new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
self.assertTrue(
torch.allclose(new_posterior.variance[..., -1], new_covar_exp)
)
# test error
with self.assertRaises(ValueError):
ScalarizedPosteriorTransform(weights=torch.rand(2, m))
# test evaluate
Y = torch.rand(2, m, device=self.device, dtype=dtype)
val = obj.evaluate(Y)
val_expected = offset + Y @ weights
self.assertTrue(torch.equal(val, val_expected))
class TestExpectationPosteriorTransform(BotorchTestCase):
def test_init(self):
# Without weights.
tf = ExpectationPosteriorTransform(n_w=5)
self.assertEqual(tf.n_w, 5)
self.assertAllClose(tf.weights, torch.ones(5, 1) * 0.2)
# Errors with weights.
with self.assertRaisesRegex(ValueError, "a tensor of size"):
ExpectationPosteriorTransform(n_w=3, weights=torch.ones(5, 1))
with self.assertRaisesRegex(ValueError, "non-negative"):
ExpectationPosteriorTransform(n_w=3, weights=-torch.ones(3, 1))
# Successful init with weights.
weights = torch.tensor([[1.0, 2.0], [2.0, 4.0], [3.0, 6.0]])
tf = ExpectationPosteriorTransform(n_w=3, weights=weights)
self.assertAllClose(tf.weights, weights / torch.tensor([6.0, 12.0]))
def test_evaluate(self):
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
# Without weights.
tf = ExpectationPosteriorTransform(n_w=3)
Y = torch.rand(3, 6, 2, **tkwargs)
self.assertTrue(
torch.allclose(tf.evaluate(Y), Y.view(3, 2, 3, 2).mean(dim=-2))
)
# With weights - weights intentionally doesn't use tkwargs.
weights = torch.tensor([[1.0, 2.0], [2.0, 1.0]])
tf = ExpectationPosteriorTransform(n_w=2, weights=weights)
expected = (Y.view(3, 3, 2, 2) * weights.to(Y)).sum(dim=-2) / 3.0
self.assertAllClose(tf.evaluate(Y), expected)
def test_expectation_posterior_transform(self):
tkwargs = {"dtype": torch.float, "device": self.device}
# Without weights, simple expectation, single output, no batch.
# q = 2, n_w = 3.
org_loc = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], **tkwargs)
org_covar = torch.tensor(
[
[1.0, 0.8, 0.7, 0.3, 0.2, 0.1],
[0.8, 1.0, 0.9, 0.25, 0.15, 0.1],
[0.7, 0.9, 1.0, 0.2, 0.2, 0.05],
[0.3, 0.25, 0.2, 1.0, 0.7, 0.6],
[0.2, 0.15, 0.2, 0.7, 1.0, 0.7],
[0.1, 0.1, 0.05, 0.6, 0.7, 1.0],
],
**tkwargs,
)
org_mvn = MultivariateNormal(org_loc, to_linear_operator(org_covar))
org_post = GPyTorchPosterior(distribution=org_mvn)
tf = ExpectationPosteriorTransform(n_w=3)
tf_post = tf(org_post)
self.assertIsInstance(tf_post, GPyTorchPosterior)
self.assertEqual(tf_post.sample().shape, torch.Size([1, 2, 1]))
tf_mvn = tf_post.distribution
self.assertIsInstance(tf_mvn, MultivariateNormal)
expected_loc = torch.tensor([2.0, 5.0], **tkwargs)
# This is the average of each 3 x 3 block.
expected_covar = torch.tensor([[0.8667, 0.1722], [0.1722, 0.7778]], **tkwargs)
self.assertAllClose(tf_mvn.loc, expected_loc)
self.assertAllClose(tf_mvn.covariance_matrix, expected_covar, atol=1e-3)
# With weights, 2 outputs, batched.
tkwargs = {"dtype": torch.double, "device": self.device}
# q = 2, n_w = 2, m = 2, leading to 8 values for loc and 8x8 cov.
org_loc = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], **tkwargs)
# We have 2 4x4 matrices with 0s as filler. Each block is for one outcome.
# Each 2x2 sub block corresponds to `n_w`.
org_covar = torch.tensor(
[
[1.0, 0.8, 0.3, 0.2, 0.0, 0.0, 0.0, 0.0],
[0.8, 1.4, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0],
[0.3, 0.2, 1.2, 0.5, 0.0, 0.0, 0.0, 0.0],
[0.2, 0.1, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.7, 0.4, 0.3],
[0.0, 0.0, 0.0, 0.0, 0.7, 0.8, 0.3, 0.2],
[0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 1.4, 0.5],
[0.0, 0.0, 0.0, 0.0, 0.3, 0.2, 0.5, 1.2],
],
**tkwargs,
)
# Making it batched by adding two more batches, mostly the same.
org_loc = org_loc.repeat(3, 1)
org_loc[1] += 100
org_loc[2] += 1000
org_covar = org_covar.repeat(3, 1, 1)
# Construct the transform with weights.
weights = torch.tensor([[1.0, 3.0], [2.0, 1.0]])
tf = ExpectationPosteriorTransform(n_w=2, weights=weights)
# Construct the posterior.
org_mvn = MultitaskMultivariateNormal(
# The return of mvn.loc and the required input are different.
# We constructed it according to the output of mvn.loc,
# reshaping here to have the required `b x n x t` shape.
org_loc.view(3, 2, 4).transpose(-2, -1),
to_linear_operator(org_covar),
interleaved=True, # To test the error.
)
org_post = GPyTorchPosterior(distribution=org_mvn)
# Error if interleaved.
with self.assertRaisesRegex(UnsupportedError, "interleaved"):
tf(org_post)
# Construct the non-interleaved posterior.
org_mvn = MultitaskMultivariateNormal(
org_loc.view(3, 2, 4).transpose(-2, -1),
to_linear_operator(org_covar),
interleaved=False,
)
org_post = GPyTorchPosterior(distribution=org_mvn)
self.assertTrue(torch.equal(org_mvn.loc, org_loc))
tf_post = tf(org_post)
self.assertIsInstance(tf_post, GPyTorchPosterior)
self.assertEqual(tf_post.sample().shape, torch.Size([1, 3, 2, 2]))
tf_mvn = tf_post.distribution
self.assertIsInstance(tf_mvn, MultitaskMultivariateNormal)
expected_loc = torch.tensor([[1.6667, 3.6667, 5.25, 7.25]], **tkwargs).repeat(
3, 1
)
expected_loc[1] += 100
expected_loc[2] += 1000
# This is the weighted average of each 2 x 2 block.
expected_covar = torch.tensor(
[
[1.0889, 0.1667, 0.0, 0.0],
[0.1667, 0.8, 0.0, 0.0],
[0.0, 0.0, 0.875, 0.35],
[0.0, 0.0, 0.35, 1.05],
],
**tkwargs,
).repeat(3, 1, 1)
self.assertAllClose(tf_mvn.loc, expected_loc, atol=1e-3)
self.assertAllClose(tf_mvn.covariance_matrix, expected_covar, atol=1e-3)
class TestMCAcquisitionObjective(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
MCAcquisitionObjective()
def test_verify_output_shape(self):
obj = IdentityMCObjective()
self.assertTrue(obj._verify_output_shape)
samples = torch.zeros(2, 3, 1)
X = torch.ones(2, 1)
# No error if X is not given.
obj(samples=samples)
# Error if X is given, 2 != 3
with self.assertRaises(RuntimeError):
obj(samples=samples, X=X)
# No error if _verify_output_shape=False
obj._verify_output_shape = False
obj(samples=samples, X=X)
class TestGenericMCObjective(BotorchTestCase):
def test_generic_mc_objective(self):
for dtype in (torch.float, torch.double):
obj = GenericMCObjective(generic_obj)
samples = torch.randn(1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
samples = torch.randn(2, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
samples = torch.randn(3, 1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
samples = torch.randn(3, 2, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
def test_generic_mc_objective_deprecated(self):
for dtype in (torch.float, torch.double):
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
obj = GenericMCObjective(generic_obj_deprecated)
warning_msg = (
"The `objective` callable of `GenericMCObjective` is expected to "
"take two arguments. Passing a callable that expects a single "
"argument will result in an error in future versions."
)
self.assertTrue(
any(issubclass(w.category, DeprecationWarning) for w in ws)
)
self.assertTrue(any(warning_msg in str(w.message) for w in ws))
samples = torch.randn(1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
samples = torch.randn(2, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
samples = torch.randn(3, 1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
samples = torch.randn(3, 2, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), generic_obj(samples)))
class TestConstrainedMCObjective(BotorchTestCase):
def test_constrained_mc_objective(self):
for dtype in (torch.float, torch.double):
# one feasible constraint
obj = ConstrainedMCObjective(
objective=generic_obj, constraints=[feasible_con]
)
samples = torch.randn(1, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[feasible_con],
samples=samples,
infeasible_cost=0.0,
)
self.assertTrue(torch.equal(obj(samples), constrained_obj))
# one infeasible constraint
obj = ConstrainedMCObjective(
objective=generic_obj, constraints=[infeasible_con]
)
samples = torch.randn(2, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[infeasible_con],
samples=samples,
infeasible_cost=0.0,
)
self.assertTrue(torch.equal(obj(samples), constrained_obj))
# one feasible, one infeasible
obj = ConstrainedMCObjective(
objective=generic_obj, constraints=[feasible_con, infeasible_con]
)
samples = torch.randn(2, 1, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[feasible_con, infeasible_con],
samples=samples,
infeasible_cost=torch.tensor([0.0], device=self.device, dtype=dtype),
)
# one feasible, one infeasible different etas
obj = ConstrainedMCObjective(
objective=generic_obj,
constraints=[feasible_con, infeasible_con],
eta=torch.tensor([1, 10]),
)
samples = torch.randn(2, 1, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[feasible_con, infeasible_con],
samples=samples,
eta=torch.tensor([1, 10]),
infeasible_cost=torch.tensor([0.0], device=self.device, dtype=dtype),
)
self.assertTrue(torch.equal(obj(samples), constrained_obj))
# one feasible, one infeasible, infeasible_cost
obj = ConstrainedMCObjective(
objective=generic_obj,
constraints=[feasible_con, infeasible_con],
infeasible_cost=5.0,
)
samples = torch.randn(3, 2, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[feasible_con, infeasible_con],
samples=samples,
infeasible_cost=5.0,
)
self.assertTrue(torch.equal(obj(samples), constrained_obj))
# one feasible, one infeasible, infeasible_cost, different eta
obj = ConstrainedMCObjective(
objective=generic_obj,
constraints=[feasible_con, infeasible_con],
infeasible_cost=5.0,
eta=torch.tensor([1, 10]),
)
samples = torch.randn(3, 2, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[feasible_con, infeasible_con],
samples=samples,
infeasible_cost=5.0,
eta=torch.tensor([1, 10]),
)
self.assertTrue(torch.equal(obj(samples), constrained_obj))
# one feasible, one infeasible, infeasible_cost, higher dimension
obj = ConstrainedMCObjective(
objective=generic_obj,
constraints=[feasible_con, infeasible_con],
infeasible_cost=torch.tensor([5.0], device=self.device, dtype=dtype),
)
samples = torch.randn(4, 3, 2, device=self.device, dtype=dtype)
constrained_obj = apply_constraints(
obj=generic_obj(samples),
constraints=[feasible_con, infeasible_con],
samples=samples,
infeasible_cost=5.0,
)
self.assertTrue(torch.equal(obj(samples), constrained_obj))
class TestIdentityMCObjective(BotorchTestCase):
def test_identity_mc_objective(self):
for dtype in (torch.float, torch.double):
obj = IdentityMCObjective()
# single-element tensor
samples = torch.randn(1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), samples[0]))
# single-dimensional non-squeezable tensor
samples = torch.randn(2, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), samples))
# two-dimensional squeezable tensor
samples = torch.randn(3, 1, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), samples.squeeze(-1)))
# two-dimensional non-squeezable tensor
samples = torch.randn(3, 2, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(obj(samples), samples))
class TestLinearMCObjective(BotorchTestCase):
def test_linear_mc_objective(self) -> None:
# Test passes for each seed
torch.manual_seed(torch.randint(high=1000, size=(1,)))
for dtype in (torch.float, torch.double):
weights = torch.rand(3, device=self.device, dtype=dtype)
obj = LinearMCObjective(weights=weights)
samples = torch.randn(4, 2, 3, device=self.device, dtype=dtype)
atol = 1e-8 if dtype == torch.double else 3e-8
rtol = 1e-5 if dtype == torch.double else 4e-5
self.assertAllClose(obj(samples), samples @ weights, atol=atol, rtol=rtol)
samples = torch.randn(5, 4, 2, 3, device=self.device, dtype=dtype)
self.assertAllClose(
obj(samples),
samples @ weights,
atol=atol,
rtol=rtol,
)
# make sure this errors if sample output dimensions are incompatible
shape_mismatch_msg = "Output shape of samples not equal to that of weights"
with self.assertRaisesRegex(RuntimeError, shape_mismatch_msg):
obj(samples=torch.randn(2, device=self.device, dtype=dtype))
with self.assertRaisesRegex(RuntimeError, shape_mismatch_msg):
obj(samples=torch.randn(1, device=self.device, dtype=dtype))
# make sure we can't construct objectives with multi-dim. weights
weights_1d_msg = "weights must be a one-dimensional tensor."
with self.assertRaisesRegex(ValueError, expected_regex=weights_1d_msg):
LinearMCObjective(
weights=torch.rand(2, 3, device=self.device, dtype=dtype)
)
with self.assertRaisesRegex(ValueError, expected_regex=weights_1d_msg):
LinearMCObjective(
weights=torch.tensor(1.0, device=self.device, dtype=dtype)
)
class TestLearnedObjective(BotorchTestCase):
def setUp(self, suppress_input_warnings: bool = False) -> None:
super().setUp(suppress_input_warnings=suppress_input_warnings)
self.x_dim = 2
def _get_pref_model(
self,
dtype: Optional[torch.dtype] = None,
input_transform: Optional[Normalize] = None,
) -> PairwiseGP:
train_X = torch.rand((2, self.x_dim), dtype=dtype)
train_comps = torch.LongTensor([[0, 1]])
pref_model = PairwiseGP(train_X, train_comps, input_transform=input_transform)
return pref_model
def test_learned_preference_objective(self) -> None:
pref_model = self._get_pref_model(dtype=torch.float64)
og_sample_shape = 3
batch_size = 2
n = 8
test_X = torch.rand(
torch.Size((og_sample_shape, batch_size, n, self.x_dim)),
dtype=torch.float64,
)
# test default setting where sampler =
# IIDNormalSampler(sample_shape=torch.Size([1]))
with self.subTest("default sampler"):
pref_obj = LearnedObjective(pref_model=pref_model)
first_call_output = pref_obj(test_X)
self.assertEqual(
first_call_output.shape, torch.Size([og_sample_shape, batch_size, n])
)
# test when sampler has num_samples = 16
with self.subTest("SobolQMCNormalSampler"):
num_samples = 16
pref_obj = LearnedObjective(
pref_model=pref_model,
sampler=SobolQMCNormalSampler(sample_shape=torch.Size([num_samples])),
)
self.assertEqual(
pref_obj(test_X).shape,
torch.Size([num_samples * og_sample_shape, batch_size, n]),
)
# test posterior mean
with self.subTest("PosteriorMeanModel"):
mean_pref_model = PosteriorMeanModel(model=pref_model)
pref_obj = LearnedObjective(pref_model=mean_pref_model)
self.assertEqual(
pref_obj(test_X).shape, torch.Size([og_sample_shape, batch_size, n])
)
# cannot use a deterministic model together with a sampler
with self.subTest("deterministic model"), self.assertRaises(AssertionError):
LearnedObjective(
pref_model=mean_pref_model,
sampler=SobolQMCNormalSampler(sample_shape=torch.Size([num_samples])),
)
def test_dtype_compatibility_with_PairwiseGP(self) -> None:
og_sample_shape = 3
batch_size = 2
n = 8
test_X = torch.rand(
torch.Size((og_sample_shape, batch_size, n, self.x_dim)),
)
for pref_model_dtype, test_x_dtype, expected_output_dtype in [
(torch.float64, torch.float64, torch.float64),
(torch.float32, torch.float32, torch.float32),
(torch.float64, torch.float32, torch.float64),
]:
with self.subTest(
"numerical behavior",
pref_model_dtype=pref_model_dtype,
test_x_dtype=test_x_dtype,
expected_output_dtype=expected_output_dtype,
):
# Ignore a single-precision warning in PairwiseGP
# and mixed-precision warning tested below
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=InputDataWarning,
message=_get_single_precision_warning(str(torch.float32)),
)
pref_model = self._get_pref_model(
dtype=pref_model_dtype,
input_transform=Normalize(d=2),
)
pref_obj = LearnedObjective(pref_model=pref_model)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=InputDataWarning,
message=LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN,
)
first_call_output = pref_obj(test_X.to(dtype=test_x_dtype))
second_call_output = pref_obj(test_X.to(dtype=test_x_dtype))
self.assertEqual(first_call_output.dtype, expected_output_dtype)
self.assertTrue(torch.equal(first_call_output, second_call_output))
with self.subTest("mixed precision warning"):
# should warn and test should pass
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=InputDataWarning)
pref_model = self._get_pref_model(
dtype=torch.float64, input_transform=Normalize(d=2)
)
pref_obj = LearnedObjective(pref_model=pref_model)
with self.assertWarnsRegex(
InputDataWarning, LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN
):
first_call_output = pref_obj(test_X)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.acquisition.penalized import (
GaussianPenalty,
group_lasso_regularizer,
GroupLassoPenalty,
L0Approximation,
L0PenaltyApprox,
L0PenaltyApproxObjective,
L1Penalty,
L1PenaltyObjective,
L2Penalty,
PenalizedAcquisitionFunction,
PenalizedMCObjective,
)
from botorch.exceptions import UnsupportedError
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from torch import Tensor
def generic_obj(samples: Tensor, X=None) -> Tensor:
return torch.log(torch.sum(samples**2, dim=-1))
class TestL2Penalty(BotorchTestCase):
def test_gaussian_penalty(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
init_point = torch.tensor([1.0, 1.0, 1.0], **tkwargs)
l2_module = L2Penalty(init_point=init_point)
# testing a batch of two points
sample_point = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs)
diff_norm_squared = (
torch.linalg.norm((sample_point - init_point), ord=2, dim=-1) ** 2
)
real_value = diff_norm_squared.max(dim=-1).values
computed_value = l2_module(sample_point)
self.assertEqual(computed_value.item(), real_value.item())
class TestL1Penalty(BotorchTestCase):
def test_l1_penalty(self):
for dtype in (torch.float, torch.double):
init_point = torch.tensor([1.0, 1.0, 1.0], device=self.device, dtype=dtype)
l1_module = L1Penalty(init_point=init_point)
# testing a batch of two points
sample_point = torch.tensor(
[[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], device=self.device, dtype=dtype
)
diff_l1_norm = torch.linalg.norm((sample_point - init_point), ord=1, dim=-1)
real_value = diff_l1_norm.max(dim=-1).values
computed_value = l1_module(sample_point)
self.assertEqual(computed_value.item(), real_value.item())
class TestGaussianPenalty(BotorchTestCase):
def test_gaussian_penalty(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
init_point = torch.tensor([1.0, 1.0, 1.0], **tkwargs)
sigma = 0.1
gaussian_module = GaussianPenalty(init_point=init_point, sigma=sigma)
# testing a batch of two points
sample_point = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs)
diff_norm_squared = (
torch.linalg.norm((sample_point - init_point), ord=2, dim=-1) ** 2
)
max_l2_distance = diff_norm_squared.max(dim=-1).values
real_value = torch.exp(max_l2_distance / 2 / sigma**2)
computed_value = gaussian_module(sample_point)
self.assertEqual(computed_value.item(), real_value.item())
class TestGroupLassoPenalty(BotorchTestCase):
def test_group_lasso_penalty(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
init_point = torch.tensor([0.5, 0.5, 0.5], **tkwargs)
groups = [[0, 2], [1]]
group_lasso_module = GroupLassoPenalty(init_point=init_point, groups=groups)
# testing a single point
sample_point = torch.tensor([[1.0, 2.0, 3.0]], **tkwargs)
real_value = group_lasso_regularizer(
sample_point - init_point, groups
) # torch.tensor([5.105551242828369], **tkwargs)
computed_value = group_lasso_module(sample_point)
self.assertEqual(computed_value.item(), real_value.item())
# testing unsupported input dim: X.shape[-2] > 1
sample_point_2 = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs)
with self.assertRaises(NotImplementedError):
group_lasso_module(sample_point_2)
class TestL0Approximation(BotorchTestCase):
def test_L0Approximation(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
target_point = torch.zeros(2, **tkwargs)
# test init
l0 = L0Approximation(target_point=target_point, **tkwargs)
self.assertTrue(torch.equal(l0.target_point, target_point))
self.assertAllClose(l0.a.data, torch.tensor(1.0, **tkwargs))
# verify L0 norm
self.assertTrue(
torch.equal(
l0(torch.zeros(2, **tkwargs)).data, torch.tensor([0], **tkwargs)
)
)
# check two-dim input tensors X
self.assertTrue(
torch.equal(
l0(torch.zeros(3, 2, **tkwargs)).data, torch.zeros(3, 1, **tkwargs)
)
)
# test raise when X and target_point have mismatched shape
with self.assertRaises(ValueError):
l0(torch.zeros(3, **tkwargs))
# test init with different a
l0 = L0Approximation(target_point=target_point, a=2.0, **tkwargs)
self.assertAllClose(l0.a.data, torch.tensor(2.0, **tkwargs))
self.assertAllClose(
l0(torch.ones(2, **tkwargs)).data,
torch.tensor([0.2350], **tkwargs),
rtol=1e-04,
)
# reset a
l0.a.data.fill_(0.5)
self.assertTrue(torch.equal(l0.a.data, torch.tensor(0.5, **tkwargs)))
self.assertAllClose(
l0(torch.ones(2, **tkwargs)).data,
torch.tensor([1.7293], **tkwargs),
rtol=1e-04,
)
def test_L0PenaltyApproxObjective(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
target_point = torch.zeros(2, **tkwargs)
# test init
l0_obj = L0PenaltyApproxObjective(target_point=target_point, **tkwargs)
self.assertTrue(torch.equal(l0_obj.target_point, target_point))
self.assertAllClose(l0_obj.a.data, torch.tensor(1.0, **tkwargs))
# check two-dim input tensors X
self.assertTrue(
torch.equal(
l0_obj(torch.zeros(3, 2, **tkwargs)).data,
torch.zeros(1, 3, **tkwargs),
)
)
# check "batch_shape x q x dim" input tensors X
batch_shape = 16
self.assertTrue(
torch.equal(
l0_obj(torch.zeros(batch_shape, 3, 2, **tkwargs)).data,
torch.zeros(1, batch_shape, 3, **tkwargs),
)
)
def test_L0PenaltyApprox(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
target_point = torch.zeros(2, **tkwargs)
# test init
l0_acqf = L0PenaltyApprox(target_point=target_point, **tkwargs)
self.assertTrue(torch.equal(l0_acqf.target_point, target_point))
self.assertAllClose(l0_acqf.a.data, torch.tensor(1.0, **tkwargs))
# check two-dim input tensors X
self.assertTrue(
torch.equal(
l0_acqf(torch.zeros(3, 2, **tkwargs)).data,
torch.tensor(0, **tkwargs),
)
)
# check "batch_shape x q x dim" input tensors X
batch_shape = 16
self.assertTrue(
torch.equal(
l0_acqf(torch.zeros(batch_shape, 3, 2, **tkwargs)).data,
torch.zeros(batch_shape, **tkwargs),
)
)
class TestPenalizedAcquisitionFunction(BotorchTestCase):
def test_penalized_acquisition_function(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
mock_model = MockModel(
MockPosterior(
mean=torch.tensor([[1.0]], **tkwargs),
variance=torch.tensor([[1.0]], **tkwargs),
)
)
init_point = torch.tensor([0.5, 0.5, 0.5], **tkwargs)
groups = [[0, 2], [1]]
raw_acqf = ExpectedImprovement(model=mock_model, best_f=1.0)
penalty = GroupLassoPenalty(init_point=init_point, groups=groups)
lmbda = 0.1
acqf = PenalizedAcquisitionFunction(
raw_acqf=raw_acqf, penalty_func=penalty, regularization_parameter=lmbda
)
sample_point = torch.tensor([[1.0, 2.0, 3.0]], **tkwargs)
raw_value = raw_acqf(sample_point)
penalty_value = penalty(sample_point)
real_value = raw_value - lmbda * penalty_value
computed_value = acqf(sample_point)
self.assertTrue(torch.equal(real_value, computed_value))
# testing X_pending for analytic raw_acqfn (EI)
X_pending = torch.tensor([0.1, 0.2, 0.3], **tkwargs)
with self.assertRaises(UnsupportedError):
acqf.set_X_pending(X_pending)
# testing X_pending for non-analytic raw_acqfn (EI)
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
raw_acqf_2 = qExpectedImprovement(
model=mock_model, best_f=0, sampler=sampler
)
init_point = torch.tensor([1.0, 1.0, 1.0], **tkwargs)
l2_module = L2Penalty(init_point=init_point)
acqf_2 = PenalizedAcquisitionFunction(
raw_acqf=raw_acqf_2,
penalty_func=l2_module,
regularization_parameter=lmbda,
)
X_pending = torch.tensor([0.1, 0.2, 0.3], **tkwargs)
acqf_2.set_X_pending(X_pending)
self.assertTrue(torch.equal(acqf_2.X_pending, X_pending))
class TestL1PenaltyObjective(BotorchTestCase):
def test_l1_penalty(self):
for dtype in (torch.float, torch.double):
init_point = torch.tensor([1.0, 1.0, 1.0], device=self.device, dtype=dtype)
l1_module = L1PenaltyObjective(init_point=init_point)
# testing a batch of two points
sample_point = torch.tensor(
[[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], device=self.device, dtype=dtype
)
real_values = torch.linalg.norm(
(sample_point - init_point), ord=1, dim=-1
).unsqueeze(dim=0)
computed_values = l1_module(sample_point)
self.assertTrue(torch.equal(real_values, computed_values))
class TestPenalizedMCObjective(BotorchTestCase):
def test_penalized_mc_objective(self):
for dtype in (torch.float, torch.double):
init_point = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0], device=self.device, dtype=dtype
)
l1_penalty_obj = L1PenaltyObjective(init_point=init_point)
obj = PenalizedMCObjective(
objective=generic_obj,
penalty_objective=l1_penalty_obj,
regularization_parameter=0.1,
)
# test self.expand_dim
self.assertIsNone(obj.expand_dim)
# test 'd' Tensor X
samples = torch.randn(4, 3, device=self.device, dtype=dtype)
X = torch.randn(4, 5, device=self.device, dtype=dtype)
penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X)
self.assertTrue(torch.equal(obj(samples, X), penalized_obj))
# test 'q x d' Tensor X
samples = torch.randn(4, 2, 3, device=self.device, dtype=dtype)
X = torch.randn(2, 5, device=self.device, dtype=dtype)
penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X)
self.assertTrue(torch.equal(obj(samples, X), penalized_obj))
# test 'batch-shape x q x d' Tensor X
samples = torch.randn(4, 3, 2, 3, device=self.device, dtype=dtype)
X = torch.randn(3, 2, 5, device=self.device, dtype=dtype)
penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X)
self.assertTrue(torch.equal(obj(samples, X), penalized_obj))
# test passing expand_dim
expand_dim = -2
obj2 = PenalizedMCObjective(
objective=generic_obj,
penalty_objective=l1_penalty_obj,
regularization_parameter=0.1,
expand_dim=expand_dim,
)
self.assertEqual(obj2.expand_dim, -2)
# test 'd' Tensor X
mcmc_samples = 8
# MCMC_dim = -3
samples = torch.randn(mcmc_samples, 4, 3, device=self.device, dtype=dtype)
X = torch.randn(4, 5, device=self.device, dtype=dtype)
penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X).unsqueeze(
expand_dim
)
self.assertTrue(torch.equal(obj2(samples, X), penalized_obj))
# test 'q x d' Tensor X
# MCMC_dim = -3
samples = torch.randn(
4, mcmc_samples, 2, 3, device=self.device, dtype=dtype
)
X = torch.randn(2, 5, device=self.device, dtype=dtype)
penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X).unsqueeze(
expand_dim
)
self.assertTrue(torch.equal(obj2(samples, X), penalized_obj))
# test 'batch-shape x q x d' Tensor X
# MCMC_dim = -3
samples = torch.randn(
4, 3, mcmc_samples, 2, 3, device=self.device, dtype=dtype
)
X = torch.randn(3, 2, 5, device=self.device, dtype=dtype)
penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X).unsqueeze(
expand_dim
)
self.assertTrue(torch.equal(obj2(samples, X), penalized_obj))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import ExitStack
from unittest import mock
import torch
from botorch.acquisition.analytic import PosteriorMean, ScalarizedPosteriorMean
from botorch.acquisition.cost_aware import GenericCostAwareUtility
from botorch.acquisition.knowledge_gradient import (
_get_value_function,
_split_fantasy_points,
ProjectedAcquisitionFunction,
qKnowledgeGradient,
qMultiFidelityKnowledgeGradient,
)
from botorch.acquisition.monte_carlo import qExpectedImprovement, qSimpleRegret
from botorch.acquisition.objective import (
GenericMCObjective,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.utils import project_to_sample_points
from botorch.exceptions.errors import UnsupportedError
from botorch.generation.gen import gen_candidates_scipy
from botorch.models import SingleTaskGP
from botorch.optim.optimize import optimize_acqf
from botorch.optim.utils import _filter_kwargs
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from gpytorch.distributions import MultitaskMultivariateNormal
from .test_monte_carlo import DummyNonScalarizingPosteriorTransform
NO = "botorch.utils.testing.MockModel.num_outputs"
def mock_util(X, deltas):
return 0.5 * deltas.sum(dim=0)
class TestQKnowledgeGradient(BotorchTestCase):
def test_initialize_q_knowledge_gradient(self):
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
# test error when neither specifying neither sampler nor num_fantasies
with self.assertRaises(ValueError):
qKnowledgeGradient(model=mm, num_fantasies=None)
# test error when sampler and num_fantasies arg are inconsistent
sampler = IIDNormalSampler(sample_shape=torch.Size([16]))
with self.assertRaises(ValueError):
qKnowledgeGradient(model=mm, num_fantasies=32, sampler=sampler)
# test default construction
qKG = qKnowledgeGradient(model=mm, num_fantasies=32)
self.assertEqual(qKG.num_fantasies, 32)
self.assertIsInstance(qKG.sampler, SobolQMCNormalSampler)
self.assertEqual(qKG.sampler.sample_shape, torch.Size([32]))
self.assertIsNone(qKG.objective)
self.assertIsNone(qKG.inner_sampler)
self.assertIsNone(qKG.X_pending)
self.assertIsNone(qKG.current_value)
self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 32 + 3)
# test custom construction
obj = GenericMCObjective(lambda Y, X: Y.mean(dim=-1))
sampler = IIDNormalSampler(sample_shape=torch.Size([16]))
X_pending = torch.zeros(2, 2, device=self.device, dtype=dtype)
qKG = qKnowledgeGradient(
model=mm,
num_fantasies=16,
sampler=sampler,
objective=obj,
X_pending=X_pending,
)
self.assertEqual(qKG.num_fantasies, 16)
self.assertEqual(qKG.sampler, sampler)
self.assertEqual(qKG.sampler.sample_shape, torch.Size([16]))
self.assertEqual(qKG.objective, obj)
self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler)
self.assertEqual(qKG.inner_sampler.sample_shape, torch.Size([128]))
self.assertTrue(torch.equal(qKG.X_pending, X_pending))
self.assertIsNone(qKG.current_value)
self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 16 + 3)
# test assignment of num_fantasies from sampler if not provided
qKG = qKnowledgeGradient(model=mm, num_fantasies=None, sampler=sampler)
self.assertEqual(qKG.sampler.sample_shape, torch.Size([16]))
# test custom construction with inner sampler and current value
inner_sampler = SobolQMCNormalSampler(sample_shape=torch.Size([256]))
current_value = torch.zeros(1, device=self.device, dtype=dtype)
qKG = qKnowledgeGradient(
model=mm,
num_fantasies=8,
objective=obj,
inner_sampler=inner_sampler,
current_value=current_value,
)
self.assertEqual(qKG.num_fantasies, 8)
self.assertEqual(qKG.sampler.sample_shape, torch.Size([8]))
self.assertEqual(qKG.objective, obj)
self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler)
self.assertEqual(qKG.inner_sampler, inner_sampler)
self.assertIsNone(qKG.X_pending)
self.assertTrue(torch.equal(qKG.current_value, current_value))
self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 8 + 3)
# test construction with posterior_transform
qKG_s = qKnowledgeGradient(
model=mm,
num_fantasies=16,
sampler=sampler,
posterior_transform=ScalarizedPosteriorTransform(weights=torch.rand(2)),
)
self.assertIsNone(qKG_s.inner_sampler)
self.assertIsInstance(
qKG_s.posterior_transform, ScalarizedPosteriorTransform
)
# test error if multi-output model and no objective or posterior transform
mean2 = torch.zeros(1, 2, device=self.device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2))
with self.assertRaises(UnsupportedError):
qKnowledgeGradient(model=mm2)
# test error if multi-output model and no objective and posterior transform
# does not scalarize
with self.assertRaises(UnsupportedError):
qKnowledgeGradient(
model=mm2,
posterior_transform=DummyNonScalarizingPosteriorTransform(),
)
with self.assertRaisesRegex(
UnsupportedError,
"Objectives that are not an `MCAcquisitionObjective` are not "
"supported.",
):
qKnowledgeGradient(model=mm, objective="car")
def test_evaluate_q_knowledge_gradient(self):
# Stop gap measure to avoid test failures on Ampere devices
# TODO: Find an elegant way of disallowing tf32 for botorch/gpytorch
# without blanket-disallowing it for all of torch.
torch.backends.cuda.matmul.allow_tf32 = False
for dtype in (torch.float, torch.double):
# basic test
n_f = 4
mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qKG = qKnowledgeGradient(model=mm, num_fantasies=n_f)
X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
val = qKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
self.assertAllClose(val, mean.mean(), atol=1e-4)
self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
# batched evaluation
b = 2
mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qKG = qKnowledgeGradient(model=mm, num_fantasies=n_f)
val = qKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1]))
self.assertTrue(
torch.allclose(val, mean.mean(dim=0).squeeze(-1), atol=1e-4)
)
self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
# pending points and current value
X_pending = torch.rand(2, 1, device=self.device, dtype=dtype)
mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
current_value = torch.rand(1, device=self.device, dtype=dtype)
X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qKG = qKnowledgeGradient(
model=mm,
num_fantasies=n_f,
X_pending=X_pending,
current_value=current_value,
)
val = qKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1]))
expected = (mean.mean() - current_value).reshape([])
self.assertAllClose(val, expected, atol=1e-4)
self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
# test objective (inner MC sampling)
objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1))
samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(samples=samples))
X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qKG = qKnowledgeGradient(
model=mm, num_fantasies=n_f, objective=objective
)
val = qKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
self.assertAllClose(val, objective(samples).mean(), atol=1e-4)
self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
# test scalarized posterior transform
weights = torch.rand(2, device=self.device, dtype=dtype)
post_tf = ScalarizedPosteriorTransform(weights=weights)
mean = torch.tensor([1.0, 0.5], device=self.device, dtype=dtype).expand(
n_f, 1, 2
)
cov = torch.tensor(
[[1.0, 0.1], [0.1, 0.5]], device=self.device, dtype=dtype
).expand(n_f, 2, 2)
posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov))
mfm = MockModel(posterior)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(None)
qKG = qKnowledgeGradient(
model=mm, num_fantasies=n_f, posterior_transform=post_tf
)
val = qKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
val_expected = (mean * weights).sum(-1).mean(0)[0]
self.assertAllClose(val, val_expected)
def test_evaluate_kg(self):
# a thorough test using real model and dtype double
d = 2
dtype = torch.double
bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype).repeat(1, d)
train_X = torch.rand(3, d, device=self.device, dtype=dtype)
train_Y = torch.rand(3, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
qKG = qKnowledgeGradient(
model=model,
num_fantasies=2,
objective=None,
X_pending=torch.rand(2, d, device=self.device, dtype=dtype),
current_value=torch.rand(1, device=self.device, dtype=dtype),
)
X = torch.rand(4, 3, d, device=self.device, dtype=dtype)
options = {"num_inner_restarts": 2, "raw_inner_samples": 3}
val = qKG.evaluate(
X, bounds=bounds, num_restarts=2, raw_samples=3, options=options
)
# verify output shape
self.assertEqual(val.size(), torch.Size([4]))
# verify dtype
self.assertEqual(val.dtype, dtype)
# test i) no dimension is squeezed out, ii) dtype float, iii) MC objective,
# and iv) t_batch_mode_transform
dtype = torch.float
bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
train_X = torch.rand(1, 1, device=self.device, dtype=dtype)
train_Y = torch.rand(1, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
qKG = qKnowledgeGradient(
model=model,
num_fantasies=1,
objective=GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1)),
)
X = torch.rand(1, 1, device=self.device, dtype=dtype)
options = {"num_inner_restarts": 1, "raw_inner_samples": 1}
val = qKG.evaluate(
X, bounds=bounds, num_restarts=1, raw_samples=1, options=options
)
# verify output shape
self.assertEqual(val.size(), torch.Size([1]))
# verify dtype
self.assertEqual(val.dtype, dtype)
class TestQMultiFidelityKnowledgeGradient(BotorchTestCase):
def test_initialize_qMFKG(self):
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
# test error when not specifying current_value
with self.assertRaises(UnsupportedError):
qMultiFidelityKnowledgeGradient(
model=mm, num_fantasies=None, cost_aware_utility=mock.Mock()
)
# test default construction
mock_cau = mock.Mock()
current_value = torch.zeros(1, device=self.device, dtype=dtype)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=32,
current_value=current_value,
cost_aware_utility=mock_cau,
)
self.assertEqual(qMFKG.num_fantasies, 32)
self.assertIsInstance(qMFKG.sampler, SobolQMCNormalSampler)
self.assertEqual(qMFKG.sampler.sample_shape, torch.Size([32]))
self.assertIsNone(qMFKG.objective)
self.assertIsNone(qMFKG.inner_sampler)
self.assertIsNone(qMFKG.X_pending)
self.assertEqual(qMFKG.get_augmented_q_batch_size(q=3), 32 + 3)
self.assertEqual(qMFKG.cost_aware_utility, mock_cau)
self.assertTrue(torch.equal(qMFKG.current_value, current_value))
self.assertIsNone(qMFKG._cost_sampler)
X = torch.rand(2, 3, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(qMFKG.project(X), X))
self.assertTrue(torch.equal(qMFKG.expand(X), X))
self.assertIsNone(qMFKG.valfunc_cls)
self.assertIsNone(qMFKG.valfunc_argfac)
# make sure cost sampling logic works
self.assertIsInstance(qMFKG.cost_sampler, SobolQMCNormalSampler)
self.assertEqual(qMFKG.cost_sampler.sample_shape, torch.Size([32]))
def test_evaluate_qMFKG(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# basic test
n_f = 4
current_value = torch.rand(1, **tkwargs)
cau = GenericCostAwareUtility(mock_util)
mean = torch.rand(n_f, 1, 1, **tkwargs)
variance = torch.rand(n_f, 1, 1, **tkwargs)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
current_value=current_value,
cost_aware_utility=cau,
)
X = torch.rand(n_f + 1, 1, **tkwargs)
val = qMFKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0)
self.assertAllClose(val, val_exp, atol=1e-4)
self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
# batched evaluation
b = 2
current_value = torch.rand(b, **tkwargs)
cau = GenericCostAwareUtility(mock_util)
mean = torch.rand(n_f, b, 1, **tkwargs)
variance = torch.rand(n_f, b, 1, **tkwargs)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
X = torch.rand(b, n_f + 1, 1, **tkwargs)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
current_value=current_value,
cost_aware_utility=cau,
)
val = qMFKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1]))
val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0)
self.assertAllClose(val, val_exp, atol=1e-4)
self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
# pending points and current value
mean = torch.rand(n_f, 1, 1, **tkwargs)
variance = torch.rand(n_f, 1, 1, **tkwargs)
X_pending = torch.rand(2, 1, **tkwargs)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
current_value = torch.rand(1, **tkwargs)
X = torch.rand(n_f + 1, 1, **tkwargs)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
X_pending=X_pending,
current_value=current_value,
cost_aware_utility=cau,
)
val = qMFKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1]))
val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0)
self.assertAllClose(val, val_exp, atol=1e-4)
self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
# test objective (inner MC sampling)
objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1))
samples = torch.randn(3, 1, 1, **tkwargs)
mfm = MockModel(MockPosterior(samples=samples))
X = torch.rand(n_f + 1, 1, **tkwargs)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
objective=objective,
current_value=current_value,
cost_aware_utility=cau,
)
val = qMFKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
val_exp = mock_util(X, objective(samples) - current_value).mean(dim=0)
self.assertAllClose(val, val_exp, atol=1e-4)
self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
# test valfunc_cls and valfunc_argfac
d, p, d_prime = 4, 3, 2
samples = torch.ones(3, 1, 1, **tkwargs)
mean = torch.tensor([[0.25], [0.5], [0.75]], **tkwargs).expand(
n_f, 1, -1, -1
)
weights = torch.tensor([0.5, 1.0, 1.0], **tkwargs)
mfm = MockModel(MockPosterior(mean=mean, samples=samples))
X = torch.rand(n_f * d + d, d, **tkwargs)
sample_points = torch.rand(p, d_prime, **tkwargs)
with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
project=lambda X: project_to_sample_points(X, sample_points),
valfunc_cls=ScalarizedPosteriorMean,
valfunc_argfac=lambda model: {"weights": weights},
)
val = qMFKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4]))
val_exp = torch.tensor([1.375], **tkwargs)
self.assertAllClose(val, val_exp, atol=1e-4)
patch_f.reset_mock()
# Make posterior sample shape agree with X
mfm._posterior._samples = torch.ones(1, 3, 1, **tkwargs)
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
project=lambda X: project_to_sample_points(X, sample_points),
valfunc_cls=qExpectedImprovement,
valfunc_argfac=lambda model: {"best_f": 0.0},
)
val = qMFKG(X)
patch_f.assert_called_once()
cargs, ckwargs = patch_f.call_args
self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4]))
val_exp = torch.tensor(1.0, device=self.device, dtype=dtype)
self.assertAllClose(val, val_exp, atol=1e-4)
def test_fixed_evaluation_qMFKG(self):
# mock test qMFKG.evaluate() with expand, project & cost aware utility
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
cau = GenericCostAwareUtility(mock_util)
n_f = 4
mean = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype)
variance = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
with ExitStack() as es:
patch_f = es.enter_context(
mock.patch.object(MockModel, "fantasize", return_value=mfm)
)
mock_num_outputs = es.enter_context(
mock.patch(NO, new_callable=mock.PropertyMock)
)
es.enter_context(
mock.patch(
"botorch.optim.optimize.optimize_acqf",
return_value=(
torch.ones(1, 1, 1, device=self.device, dtype=dtype),
torch.ones(1, device=self.device, dtype=dtype),
),
),
)
es.enter_context(
mock.patch(
"botorch.generation.gen.gen_candidates_scipy",
return_value=(
torch.ones(1, 1, 1, device=self.device, dtype=dtype),
torch.ones(1, device=self.device, dtype=dtype),
),
),
)
mock_num_outputs.return_value = 1
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
X_pending=torch.rand(1, 1, 1, device=self.device, dtype=dtype),
current_value=torch.zeros(1, device=self.device, dtype=dtype),
cost_aware_utility=cau,
project=lambda X: torch.zeros_like(X),
expand=lambda X: torch.ones_like(X),
)
val = qMFKG.evaluate(
X=torch.zeros(1, 1, 1, device=self.device, dtype=dtype),
bounds=torch.tensor(
[[0.0], [1.0]], device=self.device, dtype=dtype
),
num_restarts=1,
raw_samples=1,
)
patch_f.asset_called_once()
cargs, ckwargs = patch_f.call_args
self.assertTrue(
torch.equal(
ckwargs["X"],
torch.ones(1, 2, 1, device=self.device, dtype=dtype),
)
)
self.assertEqual(
val, cau(None, torch.ones(1, device=self.device, dtype=dtype))
)
# test with defaults - should see no errors
qMFKG = qMultiFidelityKnowledgeGradient(
model=mm,
num_fantasies=n_f,
)
qMFKG.evaluate(
X=torch.zeros(1, 1, 1, device=self.device, dtype=dtype),
bounds=torch.tensor(
[[0.0], [1.0]], device=self.device, dtype=dtype
),
num_restarts=1,
raw_samples=1,
)
def test_optimize_w_posterior_transform(self):
# This is mainly testing that we can optimize without errors.
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
mean = torch.tensor([1.0, 0.5], **tkwargs).expand(2, 1, 2)
cov = torch.tensor([[1.0, 0.1], [0.1, 0.5]], **tkwargs).expand(2, 2, 2)
posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov))
model = MockModel(posterior)
n_f = 4
mean = torch.tensor([1.0, 0.5], **tkwargs).expand(n_f, 2, 1, 2)
cov = torch.tensor([[1.0, 0.1], [0.1, 0.5]], **tkwargs).expand(n_f, 2, 2, 2)
posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov))
mfm = MockModel(posterior)
bounds = torch.zeros(2, 2, **tkwargs)
bounds[1] = 1
options = {"num_inner_restarts": 2, "raw_inner_samples": 2}
with mock.patch.object(MockModel, "fantasize", return_value=mfm):
kg = qMultiFidelityKnowledgeGradient(
model=model,
num_fantasies=n_f,
posterior_transform=ScalarizedPosteriorTransform(
weights=torch.rand(2, **tkwargs)
),
)
# Mocking this to get around grad issues.
with mock.patch(
f"{optimize_acqf.__module__}.gen_candidates_scipy",
return_value=(
torch.zeros(2, n_f + 1, 2, **tkwargs),
torch.zeros(2, **tkwargs),
),
), mock.patch(
f"{optimize_acqf.__module__}._filter_kwargs",
wraps=lambda f, **kwargs: _filter_kwargs(
function=gen_candidates_scipy, **kwargs
),
):
candidate, value = optimize_acqf(
acq_function=kg,
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=2,
options=options,
)
self.assertTrue(torch.equal(candidate, torch.zeros(1, 2, **tkwargs)))
class TestKGUtils(BotorchTestCase):
def test_get_value_function(self):
with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(None)
# test PosteriorMean
vf = _get_value_function(mm)
# test initialization
self.assertIn("model", vf._modules)
self.assertEqual(vf._modules["model"], mm)
self.assertIsInstance(vf, PosteriorMean)
self.assertIsNone(vf.posterior_transform)
# test SimpleRegret
obj = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
vf = _get_value_function(model=mm, objective=obj, sampler=sampler)
self.assertIsInstance(vf, qSimpleRegret)
self.assertEqual(vf.objective, obj)
self.assertEqual(vf.sampler, sampler)
# test with project
mock_project = mock.Mock(
return_value=torch.ones(1, 1, 1, device=self.device)
)
vf = _get_value_function(
model=mm,
objective=obj,
sampler=sampler,
project=mock_project,
)
self.assertIsInstance(vf, ProjectedAcquisitionFunction)
self.assertEqual(vf.objective, obj)
self.assertEqual(vf.sampler, sampler)
self.assertEqual(vf.project, mock_project)
test_X = torch.rand(1, 1, 1, device=self.device)
with mock.patch.object(
vf, "base_value_function", __class__=torch.nn.Module, return_value=None
) as patch_bvf:
vf(test_X)
mock_project.assert_called_once_with(test_X)
patch_bvf.assert_called_once_with(
torch.ones(1, 1, 1, device=self.device)
)
def test_split_fantasy_points(self):
for dtype in (torch.float, torch.double):
X = torch.randn(5, 3, device=self.device, dtype=dtype)
# test error when passing inconsistent n_f
with self.assertRaises(ValueError):
_split_fantasy_points(X, n_f=6)
# basic test
X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=2)
self.assertEqual(X_actual.shape, torch.Size([3, 3]))
self.assertEqual(X_fantasies.shape, torch.Size([2, 1, 3]))
self.assertTrue(torch.equal(X_actual, X[:3, :]))
self.assertTrue(torch.equal(X_fantasies, X[3:, :].unsqueeze(-2)))
# batched test
X = torch.randn(2, 5, 3, device=self.device, dtype=dtype)
X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=2)
self.assertEqual(X_actual.shape, torch.Size([2, 3, 3]))
self.assertEqual(X_fantasies.shape, torch.Size([2, 2, 1, 3]))
self.assertTrue(torch.equal(X_actual, X[..., :3, :]))
X_fantasies_exp = X[..., 3:, :].unsqueeze(-2).permute(1, 0, 2, 3)
self.assertTrue(torch.equal(X_fantasies, X_fantasies_exp))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.fixed_feature import (
FixedFeatureAcquisitionFunction,
get_device_of_sequence,
get_dtype_of_sequence,
)
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.models import SingleTaskGP
from botorch.utils.testing import BotorchTestCase, MockAcquisitionFunction
class TestFixedFeatureAcquisitionFunction(BotorchTestCase):
def test_fixed_features(self) -> None:
train_X = torch.rand(5, 3, device=self.device)
train_Y = train_X.norm(dim=-1, keepdim=True)
model = SingleTaskGP(train_X, train_Y).to(device=self.device).eval()
for q in [1, 2]:
qEI = qExpectedImprovement(model, best_f=0.0)
# test single point
test_X = torch.rand(q, 3, device=self.device)
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[2], values=test_X[..., -1:]
)
qei = qEI(test_X)
qei_ff = qEI_ff(test_X[..., :-1])
self.assertAllClose(qei, qei_ff)
# test list input with float and scalar tensor
for value in [0.5, torch.tensor(0.5)]:
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[2], values=[value]
)
qei_ff = qEI_ff(test_X[..., :-1])
test_X_clone = test_X.clone()
test_X_clone[..., 2] = value
qei = qEI(test_X_clone)
self.assertAllClose(qei, qei_ff)
# test list input with Tensor and float
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[0, 2], values=[test_X[..., [0]], value]
)
qei_ff = qEI_ff(test_X[..., [1]])
self.assertAllClose(qei, qei_ff)
# test t-batch with broadcasting and list of floats
test_X = torch.rand(q, 3, device=self.device).expand(4, q, 3)
qei = qEI(test_X)
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[2], values=test_X[0, :, -1:]
)
qei_ff = qEI_ff(test_X[..., :-1])
self.assertAllClose(qei, qei_ff)
# test t-batch with broadcasting and list of floats and Tensor
# test list input with float and scalar tensor
for value in [0.5, torch.tensor(0.5)]:
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[0, 2], values=[test_X[0, :, [0]], value]
)
qei_ff = qEI_ff(test_X[..., [1]])
test_X_clone = test_X.clone()
test_X_clone[..., 2] = value
qei = qEI(test_X_clone)
self.assertAllClose(qei, qei_ff)
# test X_pending
X_pending = torch.rand(2, 3, device=self.device)
qEI.set_X_pending(X_pending)
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[2], values=test_X[..., -1:]
)
self.assertAllClose(qEI.X_pending, qEI_ff.X_pending)
# test setting X_pending from qEI_ff
# (set target value to be last dim of X_pending and check if the
# constructed X_pending on qEI is the full X_pending)
X_pending = torch.rand(2, 3, device=self.device)
qEI.X_pending = None
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[2], values=X_pending[..., -1:]
)
qEI_ff.set_X_pending(X_pending[..., :-1])
self.assertAllClose(qEI.X_pending, X_pending)
# test setting to None
qEI_ff.X_pending = None
self.assertIsNone(qEI_ff.X_pending)
# test gradient
test_X = torch.rand(1, 3, device=self.device, requires_grad=True)
qei = qEI(test_X)
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[2], values=test_X[..., [2]].detach()
)
test_X_ff = test_X[..., :-1].detach().clone().requires_grad_(True)
qei_ff = qEI_ff(test_X_ff)
self.assertAllClose(qei, qei_ff)
qei.backward()
qei_ff.backward()
self.assertAllClose(test_X.grad[..., :-1], test_X_ff.grad)
# test list input with float and scalar tensor
for value in [0.5, torch.tensor(0.5)]:
# computing with fixed features
test_X_ff = test_X[..., [1]].detach().clone().requires_grad_(True)
qEI_ff = FixedFeatureAcquisitionFunction(
qEI, d=3, columns=[0, 2], values=[test_X[..., [0]].detach(), value]
)
qei_ff = qEI_ff(test_X_ff)
qei_ff.backward()
# computing ground truth
test_X_clone = test_X.detach().clone()
test_X_clone[..., 2] = value
test_X_clone.requires_grad_(True)
qei = qEI(test_X_clone)
qei.backward()
self.assertAllClose(test_X_clone.grad[..., [1]], test_X_ff.grad)
# test error b/c of incompatible input shapes
with self.assertRaises(ValueError):
qEI_ff(test_X)
# test error when there is no X_pending (analytic EI)
test_X = torch.rand(q, 3, device=self.device)
analytic_EI = ExpectedImprovement(model, best_f=0.0)
EI_ff = FixedFeatureAcquisitionFunction(
analytic_EI, d=3, columns=[2], values=test_X[..., -1:]
)
with self.assertRaises(ValueError):
EI_ff.X_pending
def test_values_dtypes(self) -> None:
acqf = MockAcquisitionFunction()
for input, d, expected_dtype in [
(torch.tensor([0.0], dtype=torch.float32), 1, torch.float32),
(torch.tensor([0.0], dtype=torch.float64), 1, torch.float64),
(
[
torch.tensor([0.0], dtype=torch.float32),
torch.tensor([0.0], dtype=torch.float64),
],
2,
torch.float64,
),
([0.0], 1, torch.float64),
([torch.tensor(0.0, dtype=torch.float32), 0.0], 2, torch.float64),
]:
with self.subTest(input=input, d=d, expected_dtype=expected_dtype):
self.assertEqual(get_dtype_of_sequence(input), expected_dtype)
ff = FixedFeatureAcquisitionFunction(
acqf, d=d, columns=[2], values=input
)
self.assertEqual(ff.values.dtype, expected_dtype)
def test_values_devices(self) -> None:
acqf = MockAcquisitionFunction()
cpu = torch.device("cpu")
cuda = torch.device("cuda")
test_cases = [
(torch.tensor([0.0], device=cpu), 1, cpu),
([0.0], 1, cpu),
([0.0, torch.tensor([0.0], device=cpu)], 2, cpu),
]
# Can only properly test this when running CUDA tests
if self.device == torch.cuda:
test_cases = test_cases + [
(torch.tensor([0.0], device=cuda), 1, cuda),
(
[
torch.tensor([0.0], dtype=cpu),
torch.tensor([0.0], dtype=cuda),
],
2,
cuda,
),
([0.0], 1, cpu),
([torch.tensor(0.0, dtype=cuda), 0.0], 2, cuda),
]
for input, d, expected_device in test_cases:
with self.subTest(input=input, d=d, expected_device=expected_device):
self.assertEqual(get_device_of_sequence(input), expected_device)
ff = FixedFeatureAcquisitionFunction(
acqf, d=d, columns=[2], values=input
)
self.assertEqual(ff.values.device, expected_device)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.cached_cholesky import CachedCholeskyMCAcquisitionFunction
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import GenericMCObjective
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import SingleTaskGP
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.higher_order_gp import HigherOrderGP
from botorch.models.model import ModelList
from botorch.models.transforms.outcome import Log
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.low_rank import extract_batch_covar
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from linear_operator.utils.errors import NanError, NotPSDError
CHOLESKY_PATH = "linear_operator.operators._linear_operator.psd_safe_cholesky"
EXTRACT_BATCH_COVAR_PATH = "botorch.acquisition.cached_cholesky.extract_batch_covar"
class DummyCachedCholeskyAcqf(
MCAcquisitionFunction, CachedCholeskyMCAcquisitionFunction
):
def forward(self, X):
return X
class TestCachedCholeskyMCAcquisitionFunction(BotorchTestCase):
def test_setup(self):
mean = torch.zeros(1, 1)
variance = torch.ones(1, 1)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
# basic test w/ invalid model.
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
acqf._setup(model=mm)
self.assertFalse(acqf._cache_root)
with self.assertWarnsRegex(RuntimeWarning, "cache_root"):
acqf._setup(model=mm, cache_root=True)
self.assertFalse(acqf._cache_root)
# Unsupported outcome transform.
stgp = SingleTaskGP(
torch.zeros(1, 1), torch.zeros(1, 1), outcome_transform=Log()
)
with self.assertWarnsRegex(RuntimeWarning, "cache_root"):
acqf._setup(model=stgp, cache_root=True)
self.assertFalse(acqf._cache_root)
# ModelList is not supported.
model_list = ModelList(SingleTaskGP(torch.zeros(1, 1), torch.zeros(1, 1)))
with self.assertWarnsRegex(RuntimeWarning, "cache_root"):
acqf._setup(model=model_list, cache_root=True)
self.assertFalse(acqf._cache_root)
# basic test w/ supported model.
stgp = SingleTaskGP(torch.zeros(1, 1), torch.zeros(1, 1))
acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
acqf._setup(model=stgp, cache_root=True)
self.assertTrue(acqf._cache_root)
# test the base_samples are set to None
self.assertIsNone(acqf.sampler.base_samples)
# test model that uses matheron's rule and sampler.batch_range != (0, -1)
hogp = HigherOrderGP(torch.zeros(1, 1), torch.zeros(1, 1, 1)).eval()
acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler)
with self.assertWarnsRegex(RuntimeWarning, "cache_root"):
acqf._setup(model=hogp, cache_root=True)
# test deterministic model
model = GenericDeterministicModel(f=lambda X: X)
acqf = DummyCachedCholeskyAcqf(model=model, sampler=sampler)
acqf._setup(model=model, cache_root=True)
self.assertFalse(acqf._cache_root)
def test_cache_root_decomposition(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
# test mt-mvn
train_x = torch.rand(2, 1, **tkwargs)
train_y = torch.rand(2, 2, **tkwargs)
test_x = torch.rand(2, 1, **tkwargs)
model = SingleTaskGP(train_x, train_y)
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
with torch.no_grad():
posterior = model.posterior(test_x)
acqf = DummyCachedCholeskyAcqf(
model=model,
sampler=sampler,
objective=GenericMCObjective(lambda Y: Y[..., 0]),
)
baseline_L = torch.eye(2, **tkwargs)
with mock.patch(
EXTRACT_BATCH_COVAR_PATH, wraps=extract_batch_covar
) as mock_extract_batch_covar:
with mock.patch(
CHOLESKY_PATH, return_value=baseline_L
) as mock_cholesky:
baseline_L_acqf = acqf._compute_root_decomposition(
posterior=posterior
)
mock_extract_batch_covar.assert_called_once_with(
posterior.distribution
)
mock_cholesky.assert_called_once()
# test mvn
model = SingleTaskGP(train_x, train_y[:, :1])
with torch.no_grad():
posterior = model.posterior(test_x)
with mock.patch(EXTRACT_BATCH_COVAR_PATH) as mock_extract_batch_covar:
with mock.patch(
CHOLESKY_PATH, return_value=baseline_L
) as mock_cholesky:
baseline_L_acqf = acqf._compute_root_decomposition(
posterior=posterior
)
mock_extract_batch_covar.assert_not_called()
mock_cholesky.assert_called_once()
self.assertTrue(torch.equal(baseline_L_acqf, baseline_L))
def test_get_f_X_samples(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
mean = torch.zeros(5, 1, **tkwargs)
variance = torch.ones(5, 1, **tkwargs)
mm = MockModel(
MockPosterior(
mean=mean, variance=variance, samples=torch.rand(5, 1, **tkwargs)
)
)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
with self.assertWarnsRegex(RuntimeWarning, "cache_root"):
acqf._setup(model=mm, cache_root=True)
self.assertFalse(acqf._cache_root)
acqf._cache_root = True
q = 3
baseline_L = torch.eye(5 - q, **tkwargs)
acqf._baseline_L = baseline_L
posterior = mm.posterior(torch.rand(5, 1, **tkwargs))
# basic test
rv = torch.rand(1, 5, 1, **tkwargs)
with mock.patch(
"botorch.acquisition.cached_cholesky.sample_cached_cholesky",
return_value=rv,
) as mock_sample_cached_cholesky:
samples = acqf._get_f_X_samples(posterior=posterior, q_in=q)
mock_sample_cached_cholesky.assert_called_once_with(
posterior=posterior,
baseline_L=acqf._baseline_L,
q=q,
base_samples=acqf.sampler.base_samples,
sample_shape=acqf.sampler.sample_shape,
)
self.assertTrue(torch.equal(rv, samples))
# test fall back when sampling from cached cholesky fails
for error_cls in (NanError, NotPSDError):
base_samples = torch.rand(1, 5, 1, **tkwargs)
acqf.sampler.base_samples = base_samples
acqf._baseline_L = baseline_L
with mock.patch(
"botorch.acquisition.cached_cholesky.sample_cached_cholesky",
side_effect=error_cls,
) as mock_sample_cached_cholesky:
with warnings.catch_warnings(record=True) as ws, settings.debug(
True
):
samples = acqf._get_f_X_samples(posterior=posterior, q_in=q)
mock_sample_cached_cholesky.assert_called_once_with(
posterior=posterior,
baseline_L=acqf._baseline_L,
q=q,
base_samples=base_samples,
sample_shape=acqf.sampler.sample_shape,
)
self.assertTrue(issubclass(ws[0].category, BotorchWarning))
self.assertTrue(samples.shape, torch.Size([1, q, 1]))
# test HOGP
hogp = HigherOrderGP(torch.zeros(2, 1), torch.zeros(2, 1, 1)).eval()
acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler)
acqf._setup(model=hogp, cache_root=True)
mock_samples = torch.rand(5, 1, 1, **tkwargs)
posterior = MockPosterior(
mean=mean, variance=variance, samples=mock_samples
)
samples = acqf._get_f_X_samples(posterior=posterior, q_in=q)
self.assertTrue(torch.equal(samples, mock_samples[2:].unsqueeze(0)))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.preference import (
AnalyticExpectedUtilityOfBestOption,
PairwiseBayesianActiveLearningByDisagreement,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models import SingleTaskGP
from botorch.models.deterministic import FixedSingleSampleModel
from botorch.models.pairwise_gp import PairwiseGP
from botorch.utils.testing import BotorchTestCase
class TestPreferenceAcquisitionFunctions(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.twargs = {"dtype": torch.double}
self.X_dim = 3
self.Y_dim = 2
X = torch.rand(2, self.X_dim, **self.twargs)
Y = torch.rand(2, self.Y_dim, **self.twargs)
comps = torch.tensor([[1, 0]], dtype=torch.long)
self.model = SingleTaskGP(X, Y)
self.pref_model_on_X = PairwiseGP(X, comps)
self.pref_model_on_Y = PairwiseGP(Y, comps)
self.deterministic_model = FixedSingleSampleModel(model=self.model)
def pairwise_preference_acqf_test(
self, acqf_class: AcquisitionFunction, test_previous_winner: bool
):
for outcome_model in [self.deterministic_model, None]:
pref_model = (
self.pref_model_on_X if outcome_model is None else self.pref_model_on_Y
)
# Test with an outcome model and a preference model
acqf = acqf_class(pref_model=pref_model, outcome_model=outcome_model)
# test forward with different number of points
X1 = torch.rand(1, self.X_dim, **self.twargs)
X2 = torch.rand(2, self.X_dim, **self.twargs)
X3 = torch.rand(3, self.X_dim, **self.twargs)
# q = 1
with self.assertRaises((UnsupportedError, AssertionError)):
acqf(X1)
# q = 2
acqf(X2)
# q > 2
with self.assertRaises((UnsupportedError, AssertionError)):
acqf(X3)
if test_previous_winner:
previous_winner = (
torch.rand(1, self.X_dim, **self.twargs)
if outcome_model is None
else torch.rand(1, self.Y_dim, **self.twargs)
)
acqf = acqf_class(
pref_model=pref_model,
outcome_model=outcome_model,
previous_winner=previous_winner,
)
# q = 1
acqf(X1)
# q = 2
with self.assertRaises((UnsupportedError, AssertionError)):
acqf(X2)
# q > 2
with self.assertRaises((UnsupportedError, AssertionError)):
acqf(X3)
def test_analytic_eubo(self):
self.pairwise_preference_acqf_test(
acqf_class=AnalyticExpectedUtilityOfBestOption,
test_previous_winner=True,
)
def test_analytic_bald(self):
self.pairwise_preference_acqf_test(
acqf_class=PairwiseBayesianActiveLearningByDisagreement,
test_previous_winner=False,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import torch
from botorch.acquisition.joint_entropy_search import qJointEntropySearch
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase
def get_model(train_X, train_Y, use_model_list, standardize_model):
num_objectives = train_Y.shape[-1]
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model
class TestQJointEntropySearch(BotorchTestCase):
def test_joint_entropy_search(self):
torch.manual_seed(1)
tkwargs = {"device": self.device}
estimation_types = ("LB", "MC")
num_objectives = 1
for (
dtype,
estimation_type,
use_model_list,
standardize_model,
maximize,
condition_noiseless,
) in product(
(torch.float, torch.double),
estimation_types,
(False, True),
(False, True),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
input_dim = 2
train_X = torch.rand(4, input_dim, **tkwargs)
train_Y = torch.rand(4, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
num_samples = 20
optimal_inputs = torch.rand(num_samples, input_dim, **tkwargs)
optimal_outputs = torch.rand(num_samples, num_objectives, **tkwargs)
# test acquisition
X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)]
for i in range(len(X_pending_list)):
X_pending = X_pending_list[i]
acq = qJointEntropySearch(
model=model,
optimal_inputs=optimal_inputs,
optimal_outputs=optimal_outputs,
estimation_type=estimation_type,
num_samples=64,
X_pending=X_pending,
condition_noiseless=condition_noiseless,
maximize=maximize,
)
self.assertIsInstance(acq.sampler, SobolQMCNormalSampler)
test_Xs = [
torch.rand(4, 1, input_dim, **tkwargs),
torch.rand(4, 3, input_dim, **tkwargs),
torch.rand(4, 5, 1, input_dim, **tkwargs),
torch.rand(4, 5, 3, input_dim, **tkwargs),
]
for j in range(len(test_Xs)):
acq_X = acq(test_Xs[j])
# assess shape
self.assertTrue(acq_X.shape == test_Xs[j].shape[:-2])
with self.assertRaises(ValueError):
acq = qJointEntropySearch(
model=model,
optimal_inputs=optimal_inputs,
optimal_outputs=optimal_outputs,
estimation_type="NO_EST",
num_samples=64,
X_pending=X_pending,
condition_noiseless=condition_noiseless,
maximize=maximize,
)
acq_X = acq(test_Xs[j])
# Support with fully bayesian models is not yet implemented. Thus, we
# throw an error for now.
fully_bayesian_model = SaasFullyBayesianSingleTaskGP(train_X, train_Y)
with self.assertRaises(NotImplementedError):
acq = qJointEntropySearch(
model=fully_bayesian_model,
optimal_inputs=optimal_inputs,
optimal_outputs=optimal_outputs,
estimation_type="LB",
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import torch
from botorch.acquisition.active_learning import (
PairwiseMCPosteriorVariance,
qNegIntegratedPosteriorVariance,
)
from botorch.acquisition.objective import (
GenericMCObjective,
ScalarizedPosteriorTransform,
)
from botorch.models.pairwise_gp import PairwiseGP
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from gpytorch.distributions import MultitaskMultivariateNormal
class TestQNegIntegratedPosteriorVariance(BotorchTestCase):
def test_init(self):
mm = MockModel(MockPosterior(mean=torch.rand(2, 1)))
mc_points = torch.rand(2, 2)
qNIPV = qNegIntegratedPosteriorVariance(model=mm, mc_points=mc_points)
sampler = qNIPV.sampler
self.assertIsInstance(sampler, SobolQMCNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([1]))
self.assertTrue(torch.equal(mc_points, qNIPV.mc_points))
self.assertIsNone(qNIPV.X_pending)
self.assertIsNone(qNIPV.posterior_transform)
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
qNIPV = qNegIntegratedPosteriorVariance(
model=mm, mc_points=mc_points, sampler=sampler
)
self.assertIsInstance(qNIPV.sampler, IIDNormalSampler)
self.assertEqual(qNIPV.sampler.sample_shape, torch.Size([2]))
def test_q_neg_int_post_variance(self):
no = "botorch.utils.testing.MockModel.num_outputs"
for dtype in (torch.float, torch.double):
# basic test
mean = torch.zeros(4, 1, device=self.device, dtype=dtype)
variance = torch.rand(4, 1, device=self.device, dtype=dtype)
mc_points = torch.rand(10, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
with mock.patch.object(MockModel, "fantasize", return_value=mfm):
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
# TODO: Make this work with arbitrary models
mm = MockModel(None)
qNIPV = qNegIntegratedPosteriorVariance(
model=mm, mc_points=mc_points
)
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
val = qNIPV(X)
self.assertAllClose(val, -(variance.mean()), atol=1e-4)
# batched model
mean = torch.zeros(2, 4, 1, device=self.device, dtype=dtype)
variance = torch.rand(2, 4, 1, device=self.device, dtype=dtype)
mc_points = torch.rand(2, 10, 1, device=self.device, dtype=dtype)
mfm = MockModel(MockPosterior(mean=mean, variance=variance))
with mock.patch.object(MockModel, "fantasize", return_value=mfm):
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
# TODO: Make this work with arbitrary models
mm = MockModel(None)
qNIPV = qNegIntegratedPosteriorVariance(
model=mm, mc_points=mc_points
)
# TODO: Allow broadcasting for batch evaluation
X = torch.empty(2, 1, 1, device=self.device, dtype=dtype) # dummy
val = qNIPV(X)
val_exp = -variance.mean(dim=-2).squeeze(-1)
self.assertAllClose(val, val_exp, atol=1e-4)
# multi-output model
mean = torch.zeros(4, 2, device=self.device, dtype=dtype)
variance = torch.rand(4, 2, device=self.device, dtype=dtype)
cov = torch.diag_embed(variance.view(-1))
f_posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov))
mc_points = torch.rand(10, 1, device=self.device, dtype=dtype)
mfm = MockModel(f_posterior)
with mock.patch.object(MockModel, "fantasize", return_value=mfm):
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(None)
weights = torch.tensor([0.5, 0.5], device=self.device, dtype=dtype)
qNIPV = qNegIntegratedPosteriorVariance(
model=mm,
mc_points=mc_points,
posterior_transform=ScalarizedPosteriorTransform(
weights=weights
),
)
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
val = qNIPV(X)
self.assertTrue(
torch.allclose(val, -0.5 * variance.mean(), atol=1e-4)
)
# batched multi-output model
mean = torch.zeros(4, 3, 1, 2, device=self.device, dtype=dtype)
variance = torch.rand(4, 3, 1, 2, device=self.device, dtype=dtype)
cov = torch.diag_embed(variance.view(4, 3, -1))
f_posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov))
mc_points = torch.rand(4, 1, device=self.device, dtype=dtype)
mfm = MockModel(f_posterior)
with mock.patch.object(MockModel, "fantasize", return_value=mfm):
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(None)
weights = torch.tensor([0.5, 0.5], device=self.device, dtype=dtype)
qNIPV = qNegIntegratedPosteriorVariance(
model=mm,
mc_points=mc_points,
posterior_transform=ScalarizedPosteriorTransform(
weights=weights
),
)
X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) # dummy
val = qNIPV(X)
val_exp = -0.5 * variance.mean(dim=0).view(3, -1).mean(dim=-1)
self.assertAllClose(val, val_exp, atol=1e-4)
class TestPairwiseMCPosteriorVariance(BotorchTestCase):
def test_pairwise_mc_post_var(self):
train_X = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0]])
train_comp = torch.tensor([[0, 1]], dtype=torch.long)
model = PairwiseGP(train_X, train_comp)
# example link function
probit = torch.distributions.normal.Normal(0, 1).cdf
probit_obj = GenericMCObjective(objective=lambda Y, X: probit(Y.squeeze(-1)))
pv = PairwiseMCPosteriorVariance(model=model, objective=probit_obj)
n_test_pair = 8
good_X_2 = torch.rand((n_test_pair, 2, 3))
good_X_4 = torch.rand((n_test_pair, 4, 3))
bad_X = torch.rand((n_test_pair, 3, 3))
# ensure q is a multiple of 2
with self.assertRaises(RuntimeError):
pv(bad_X)
self.assertEqual(pv(good_X_2).shape, torch.Size([n_test_pair]))
self.assertEqual(pv(good_X_4).shape, torch.Size([n_test_pair]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import torch
from botorch.acquisition.multi_objective.predictive_entropy_search import (
_safe_update_omega,
_update_damping,
qMultiObjectivePredictiveEntropySearch,
)
from botorch.exceptions import UnsupportedError
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.utils.testing import BotorchTestCase
def dummy_sample_pareto_sets(model, num_pareto_samples, num_pareto_points):
m = model.models[0] if isinstance(model, ModelListGP) else model
input_dim = m.train_inputs[0].shape[-1]
tkwargs = {"dtype": m.train_inputs[0].dtype, "device": m.train_inputs[0].device}
return torch.rand(
num_pareto_samples,
num_pareto_points,
input_dim,
**tkwargs,
)
def get_model(train_X, train_Y, use_model_list, standardize_model):
num_objectives = train_Y.shape[-1]
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model
class TestQMultiObjectivePredictiveEntropySearch(BotorchTestCase):
def test_initialization_errors(self):
torch.manual_seed(1)
tkwargs = {"device": self.device}
standardize_model = False
for (dtype, num_objectives, use_model_list,) in product(
(torch.float, torch.double),
(1, 2, 3),
(False, True),
):
tkwargs["dtype"] = dtype
# test batched model
train_X = torch.rand(4, 3, 2, **tkwargs)
train_Y = torch.rand(4, 3, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
num_pareto_samples = 3
if num_objectives > 1:
num_pareto_points = 4
else:
num_pareto_points = 1
pareto_sets = dummy_sample_pareto_sets(
model, num_pareto_samples, num_pareto_points
)
# test batch model error
with self.assertRaises(NotImplementedError):
qMultiObjectivePredictiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
)
# test wrong Pareto set shape
train_X = torch.rand(1, 2, **tkwargs)
train_Y = torch.rand(1, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
pareto_sets = dummy_sample_pareto_sets(
model, num_pareto_samples, num_pareto_points
)
with self.assertRaises(UnsupportedError):
qMultiObjectivePredictiveEntropySearch(
model=model,
pareto_sets=pareto_sets.unsqueeze(0),
)
with self.assertRaises(UnsupportedError):
qMultiObjectivePredictiveEntropySearch(
model=model,
pareto_sets=pareto_sets.unsqueeze(-1),
)
def test_moo_predictive_entropy_search(self, use_model_list=False, maximize=False):
torch.manual_seed(1)
tkwargs = {"device": self.device}
for (dtype, num_objectives, standardize_model,) in product(
(torch.float, torch.double),
(1, 2, 3),
(False, True),
):
tkwargs["dtype"] = dtype
input_dim = 2
train_X = torch.rand(4, input_dim, **tkwargs)
train_Y = torch.rand(4, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
num_pareto_samples = 3
num_pareto_points = 1 if num_objectives == 1 else 4
pareto_sets = dummy_sample_pareto_sets(
model, num_pareto_samples, num_pareto_points
)
# test acquisition
X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)]
for i in range(len(X_pending_list)):
X_pending = X_pending_list[i]
acq = qMultiObjectivePredictiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
maximize=maximize,
X_pending=X_pending,
)
test_Xs = [
torch.rand(4, 1, input_dim, **tkwargs),
torch.rand(4, 3, input_dim, **tkwargs),
torch.rand(4, 5, 1, input_dim, **tkwargs),
torch.rand(4, 5, 3, input_dim, **tkwargs),
]
for test_X in test_Xs:
acq_X = acq(test_X)
# assess shape
self.assertTrue(acq_X.shape == test_X.shape[:-2])
def test_moo_predictive_entropy_search_maximize(self):
self.test_moo_predictive_entropy_search(maximize=True)
def test_moo_predictive_entropy_search_model_list(self):
self.test_moo_predictive_entropy_search(use_model_list=True)
def test_moo_predictive_entropy_search_model_list_maximize(self):
self.test_moo_predictive_entropy_search(use_model_list=True, maximize=True)
def test_update_damping(self):
# test error when old and new covariance are not positive semi-definite
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
cov_old = torch.ones(1, 2, 2, **tkwargs)
cov_new = torch.ones(1, 2, 2, **tkwargs)
damping_factor = torch.ones(1, **tkwargs)
jitter = 0.0
with self.assertRaises(ValueError):
_update_damping(
nat_cov=cov_old,
nat_cov_new=cov_new,
damping_factor=damping_factor,
jitter=jitter,
)
def test_safe_omega_update(self):
tkwargs = {"device": self.device}
# test exception when EP fails because the jitter is too small and omega
# update skips. This naturally depends on the precision.
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
N = 1
P = 3
M = 2
mean_f = torch.zeros(2, M, N + P, **tkwargs)
cov_f = torch.ones(2, M, N + P, N + P, **tkwargs)
omega_f_nat_mean = torch.zeros(2, M, N + P, P, 2, **tkwargs)
omega_f_nat_cov = torch.zeros(2, M, N + P, P, 2, 2, **tkwargs)
maximize = True
jitter = 0.0
# The inversion of a factor of `cov_f` will fail spit out a
# `torch._C._LinAlgError` error.
omega_f_nat_mean_new, omega_f_nat_cov_new = _safe_update_omega(
mean_f=mean_f,
cov_f=cov_f,
omega_f_nat_mean=omega_f_nat_mean,
omega_f_nat_cov=omega_f_nat_cov,
N=N,
P=P,
M=M,
maximize=maximize,
jitter=jitter,
)
self.assertTrue(torch.equal(omega_f_nat_mean, omega_f_nat_mean_new))
self.assertTrue(torch.equal(omega_f_nat_cov, omega_f_nat_cov_new))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from itertools import product
from unittest import mock
import torch
from botorch.acquisition.multi_objective.objective import (
MCMultiOutputObjective,
UnstandardizeMCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import (
compute_sample_box_decomposition,
get_default_partitioning_alpha,
prune_inferior_points_multi_objective,
random_search_optimizer,
sample_optimal_points,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.utils.gp_sampling import get_gp_samples
from botorch.utils.multi_objective import is_non_dominated
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from torch import Tensor
class TestUtils(BotorchTestCase):
def test_get_default_partitioning_alpha(self):
for m in range(2, 7):
expected_val = 0.0 if m < 5 else 10 ** (-8 + m)
self.assertEqual(
expected_val, get_default_partitioning_alpha(num_objectives=m)
)
# In `BotorchTestCase.setUp` warnings are filtered, so here we
# remove the filter to ensure a warning is issued as expected.
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as ws:
self.assertEqual(0.1, get_default_partitioning_alpha(num_objectives=7))
self.assertEqual(len(ws), 1)
class DummyMCMultiOutputObjective(MCMultiOutputObjective):
def forward(self, samples: Tensor) -> Tensor:
return samples
class TestMultiObjectiveUtils(BotorchTestCase):
def setUp(self):
super().setUp()
self.model = mock.MagicMock()
self.objective = DummyMCMultiOutputObjective()
self.X_observed = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
self.X_pending = torch.tensor([[1.0, 3.0, 4.0]])
self.mc_samples = 250
self.qmc = True
self.ref_point = [0.0, 0.0]
self.Y = torch.tensor([[1.0, 2.0]])
self.seed = 1
def test_prune_inferior_points_multi_objective(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
X = torch.rand(3, 2, **tkwargs)
ref_point = torch.tensor([0.25, 0.25], **tkwargs)
# the event shape is `q x m` = 3 x 2
samples = torch.tensor([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]], **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# test that a batched X raises errors
with self.assertRaises(UnsupportedError):
prune_inferior_points_multi_objective(
model=mm, X=X.expand(2, 3, 2), ref_point=ref_point
)
# test that a batched model raises errors (event shape is `q x m` = 3 x m)
mm2 = MockModel(MockPosterior(samples=samples.expand(2, 3, 2)))
with self.assertRaises(UnsupportedError):
prune_inferior_points_multi_objective(
model=mm2, X=X, ref_point=ref_point
)
# test that invalid max_frac is checked properly
with self.assertRaises(ValueError):
prune_inferior_points_multi_objective(
model=mm, X=X, max_frac=1.1, ref_point=ref_point
)
# test basic behaviour
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point
)
self.assertTrue(torch.equal(X_pruned, X[[-1]]))
# test unstd objective
unstd_obj = UnstandardizeMCMultiOutputObjective(
Y_mean=samples.mean(dim=0), Y_std=samples.std(dim=0), outcomes=[0, 1]
)
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point, objective=unstd_obj
)
self.assertTrue(torch.equal(X_pruned, X[[-1]]))
# test constraints
samples_constrained = torch.tensor(
[[1.0, 2.0, -1.0], [2.0, 1.0, -1.0], [3.0, 4.0, 1.0]], **tkwargs
)
mm_constrained = MockModel(MockPosterior(samples=samples_constrained))
X_pruned = prune_inferior_points_multi_objective(
model=mm_constrained,
X=X,
ref_point=ref_point,
objective=unstd_obj,
constraints=[lambda Y: Y[..., -1]],
)
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test non-repeated samples (requires mocking out MockPosterior's rsample)
samples = torch.tensor(
[[[3.0], [0.0], [0.0]], [[0.0], [2.0], [0.0]], [[0.0], [0.0], [1.0]]],
device=self.device,
dtype=dtype,
)
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point
)
self.assertTrue(torch.equal(X_pruned, X))
# test max_frac limiting
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point, max_frac=2 / 3
)
if self.device.type == "cuda":
# sorting has different order on cuda
self.assertTrue(
torch.equal(X_pruned, X[[2, 1]]) or torch.equal(X_pruned, X[[1, 2]])
)
else:
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test that zero-probability is in fact pruned
samples[2, 0, 0] = 10
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point
)
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test marginalize_dim and constraints
samples = torch.tensor([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]], **tkwargs)
samples = samples.unsqueeze(-3).expand(
*samples.shape[:-2],
2,
*samples.shape[-2:],
)
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm,
X=X,
ref_point=ref_point,
objective=unstd_obj,
constraints=[lambda Y: Y[..., -1] - 3.0],
marginalize_dim=-3,
)
self.assertTrue(torch.equal(X_pruned, X[:2]))
def test_compute_sample_box_decomposition(self):
tkwargs = {"device": self.device}
for dtype, maximize in product((torch.float, torch.double), (True, False)):
tkwargs["dtype"] = dtype
# test error when inputting incorrect Pareto front
X = torch.rand(4, 3, 2, 1, **tkwargs)
with self.assertRaises(UnsupportedError):
compute_sample_box_decomposition(pareto_fronts=X, maximize=maximize)
# test single and multi-objective setting
for num_objectives in (1, 5):
X = torch.rand(4, 3, num_objectives, **tkwargs)
bd1 = compute_sample_box_decomposition(
pareto_fronts=X, maximize=maximize
)
# assess shape
self.assertTrue(bd1.ndim == 4)
self.assertTrue(bd1.shape[-1] == num_objectives)
self.assertTrue(bd1.shape[-3] == 2)
if num_objectives == 1:
self.assertTrue(bd1.shape[-2] == 1)
# assess whether upper bound is greater than lower bound
self.assertTrue(torch.all(bd1[:, 1, ...] - bd1[:, 0, ...] >= 0))
# test constrained setting
num_constraints = 7
bd2 = compute_sample_box_decomposition(
pareto_fronts=X,
maximize=maximize,
num_constraints=num_constraints,
)
# assess shape
self.assertTrue(bd2.ndim == 4)
self.assertTrue(bd2.shape[-1] == num_objectives + num_constraints)
self.assertTrue(bd2.shape[-2] == bd1.shape[-2] + 1)
self.assertTrue(bd2.shape[-3] == 2)
# assess whether upper bound is greater than lower bound
self.assertTrue(torch.all(bd2[:, 1, ...] - bd2[:, 0, ...] >= 0))
# the constraint padding should not change the box-decomposition
# if the box-decomposition procedure is not random
self.assertTrue(torch.equal(bd1, bd2[..., 0:-1, 0:num_objectives]))
# test with a specified optimum
opt_X = 2.0 if maximize else -3.0
X[:, 0, :] = opt_X
bd3 = compute_sample_box_decomposition(
pareto_fronts=X, maximize=maximize
)
# check optimum
if maximize:
self.assertTrue(torch.all(bd3[:, 1, ...] == opt_X))
else:
self.assertTrue(torch.all(bd3[:, 0, ...] == opt_X))
def get_model(
dtype,
device,
num_points,
input_dim,
num_objectives,
use_model_list,
standardize_model,
):
torch.manual_seed(123)
tkwargs = {"dtype": dtype, "device": device}
train_X = torch.rand(num_points, input_dim, **tkwargs)
train_Y = torch.rand(num_points, num_objectives, **tkwargs)
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list and num_objectives > 1:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model.eval(), train_X, train_Y
class TestThompsonSampling(BotorchTestCase):
def test_random_search_optimizer(self):
torch.manual_seed(1)
input_dim = 3
num_initial = 5
tkwargs = {"device": self.device}
optimizer_kwargs = {
"pop_size": 1000,
"max_tries": 5,
}
for (
dtype,
maximize,
num_objectives,
use_model_list,
standardize_model,
) in product(
(torch.float, torch.double),
(True, False),
(1, 2),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
num_points = num_objectives
model, X, Y = get_model(
num_points=num_initial,
input_dim=input_dim,
num_objectives=num_objectives,
use_model_list=use_model_list,
standardize_model=standardize_model,
**tkwargs,
)
model_sample = get_gp_samples(
model=model,
num_outputs=num_objectives,
n_samples=1,
)
input_dim = X.shape[-1]
# fake bounds
bounds = torch.zeros((2, input_dim), **tkwargs)
bounds[1] = 1.0
pareto_set, pareto_front = random_search_optimizer(
model=model_sample,
bounds=bounds,
num_points=num_points,
maximize=maximize,
**optimizer_kwargs,
)
# check shape
self.assertTrue(pareto_set.ndim == 2)
self.assertTrue(pareto_front.ndim == 2)
self.assertTrue(pareto_set.shape[-1] == X.shape[-1])
self.assertTrue(pareto_front.shape[-1] == Y.shape[-1])
self.assertTrue(pareto_front.shape[-2] == pareto_set.shape[-2])
num_optimal_points = pareto_front.shape[-2]
# check if samples are non-dominated
weight = 1.0 if maximize else -1.0
count = torch.sum(is_non_dominated(Y=weight * pareto_front))
self.assertTrue(count == num_optimal_points)
# Ask for more optimal points than query evaluations
with self.assertRaises(RuntimeError):
random_search_optimizer(
model=model_sample,
bounds=bounds,
num_points=20,
maximize=maximize,
max_tries=1,
pop_size=10,
)
def test_sample_optimal_points(self):
torch.manual_seed(1)
input_dim = 3
num_initial = 5
tkwargs = {"device": self.device}
optimizer_kwargs = {
"pop_size": 100,
"max_tries": 1,
}
num_samples = 2
num_points = 1
for (
dtype,
maximize,
num_objectives,
opt_kwargs,
use_model_list,
standardize_model,
) in product(
(torch.float, torch.double),
(True, False),
(1, 2),
(optimizer_kwargs, None),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
model, X, Y = get_model(
num_points=num_initial,
input_dim=input_dim,
num_objectives=num_objectives,
use_model_list=use_model_list,
standardize_model=standardize_model,
**tkwargs,
)
input_dim = X.shape[-1]
bounds = torch.zeros((2, input_dim), **tkwargs)
bounds[1] = 1.0
# check the error when asking for too many optimal points
if num_objectives == 1:
with self.assertRaises(UnsupportedError):
sample_optimal_points(
model=model,
bounds=bounds,
num_samples=num_samples,
num_points=2,
maximize=maximize,
optimizer=random_search_optimizer,
optimizer_kwargs=opt_kwargs,
)
pareto_sets, pareto_fronts = sample_optimal_points(
model=model,
bounds=bounds,
num_samples=num_samples,
num_points=num_points,
maximize=maximize,
optimizer=random_search_optimizer,
optimizer_kwargs=opt_kwargs,
)
# check shape
ps_desired_shape = torch.Size([num_samples, num_points, input_dim])
pf_desired_shape = torch.Size([num_samples, num_points, num_objectives])
self.assertTrue(pareto_sets.shape == ps_desired_shape)
self.assertTrue(pareto_fronts.shape == pf_desired_shape)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.multi_objective.multi_fidelity import MOMF
from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective
from botorch.exceptions.errors import BotorchError
from botorch.exceptions.warnings import BotorchWarning
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestMOMF(BotorchTestCase):
def test_momf(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
ref_point = [0.0, 0.0]
t_ref_point = torch.tensor(ref_point, **tkwargs)
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
partitioning = NondominatedPartitioning(ref_point=t_ref_point)
# the event shape is `b x q x m` = 1 x 1 x 2
samples = torch.zeros(1, 1, 2, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# test error if there is not pareto_Y initialized in partitioning
with self.assertRaises(BotorchError):
MOMF(model=mm, ref_point=ref_point, partitioning=partitioning)
partitioning.update(Y=pareto_Y)
# test error if ref point has wrong shape
with self.assertRaises(ValueError):
MOMF(model=mm, ref_point=ref_point[:1], partitioning=partitioning)
X = torch.zeros(1, 1, **tkwargs)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# check ref point
self.assertTrue(
torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs))
)
# check cached indices
self.assertTrue(hasattr(acqf, "q_subset_indices"))
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0]], device=self.device),
)
)
# test q=2
X2 = torch.zeros(2, 1, **tkwargs)
samples2 = torch.zeros(1, 2, 2, **tkwargs)
mm2 = MockModel(MockPosterior(samples=samples2))
acqf.model = mm2
self.assertEqual(acqf.model, mm2)
res = acqf(X2)
self.assertEqual(res.item(), 0.0)
# check cached indices
self.assertTrue(hasattr(acqf, "q_subset_indices"))
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0], [1]], device=self.device),
)
)
self.assertIn("q_choose_2", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_2"],
torch.tensor([[0, 1]], device=self.device),
)
)
self.assertNotIn("q_choose_3", acqf.q_subset_indices)
# now back to 1 and sure all caches were cleared
acqf.model = mm
res = acqf(X)
self.assertNotIn("q_choose_2", acqf.q_subset_indices)
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0]], device=self.device),
)
)
X = torch.zeros(1, 1, **tkwargs)
samples = torch.zeros(1, 1, 2, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
# get mm sample shape to match shape of X + X_pending
acqf.model._posterior._samples = torch.zeros(1, 2, 2, **tkwargs)
res = acqf(X)
X2 = torch.zeros(1, 1, 1, requires_grad=True, **tkwargs)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
# test objective
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
)
# get mm sample shape to match shape of X
acqf.model._posterior._samples = torch.zeros(1, 1, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# Test that the hypervolume improvement is correct for given sample
# test q = 1
X = torch.zeros(1, 1, **tkwargs)
# basic test
samples = torch.tensor([[[6.5, 4.5]]], **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 1.5)
# test q = 1, does not contribute
samples = torch.tensor([0.0, 1.0], **tkwargs).view(1, 1, 2)
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test q = 2, both points contribute
X = torch.zeros(2, 1, **tkwargs)
samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
# since q = 2, fidelity cost is 0 but fixed cost is 1 for each
# hence total cost is 2 MOMF defaults to an Affine Cost Model.
self.assertEqual(res.item(), 1.75 / 2)
# test q = 2, only 1 point contributes
samples = torch.tensor([[6.5, 4.5], [6.0, 4.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 1.5 / 2)
# test q = 2, neither contributes
samples = torch.tensor([[2.0, 2.0], [0.0, 0.1]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test q = 2, test point better than current best second objective
samples = torch.tensor([[6.5, 4.5], [6.0, 6.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 8.0 / 2)
# test q = 2, test point better than current-best first objective
samples = torch.tensor([[6.5, 4.5], [9.0, 2.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 2.0 / 2)
# test q = 3, all contribute
X = torch.zeros(3, 1, **tkwargs)
samples = torch.tensor(
[[6.5, 4.5], [9.0, 2.0], [7.0, 4.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
# since q = 3, fidelity cost is 0 but fixed cost is 1 for each
# hence total cost is 3.
self.assertEqual(res.item(), 2.25 / 3)
# test q = 3, not all contribute
samples = torch.tensor(
[[6.5, 4.5], [9.0, 2.0], [7.0, 5.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertTrue(
torch.allclose(res, torch.tensor(3.5 / 3, **tkwargs), atol=1e-15)
)
# test q = 3, none contribute
samples = torch.tensor(
[[0.0, 4.5], [1.0, 2.0], [3.0, 0.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test m = 3, q=1
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]],
**tkwargs,
)
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.zeros(1, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 12.0)
# test m = 3, q=1, X is ones so fidelity + fixed_cost is 2
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]],
**tkwargs,
)
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.ones(1, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 12.0 / 2)
# test m = 3, q=1, with custom callable function
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]],
**tkwargs,
)
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
def cost(x):
return (6 * x[..., -1]).unsqueeze(-1)
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
cost_call=cost,
)
X = torch.ones(1, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 12.0 / 6)
# change reference point
ref_point = [0.0] * 3
X = torch.zeros(1, 2, **tkwargs)
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 4.0)
# test m = 3, no contribution
ref_point = [1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test m = 3, q = 2
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs
)
samples = torch.tensor(
[[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.zeros(2, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 22.0 / 2)
# test batched model
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs
)
samples = torch.tensor(
[[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs
).unsqueeze(0)
samples = torch.stack([samples, samples + 1], dim=1)
mm = MockModel(MockPosterior(samples=samples))
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.zeros(2, 2, **tkwargs)
res = acqf(X)
self.assertTrue(
torch.equal(
res,
# batch_shape x model_batch_shape
torch.tensor([[22.0, 60.0]], **tkwargs) / 2,
)
)
# test batched model with batched partitioning with multiple batch dims
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
pareto_Y = torch.stack(
[
pareto_Y,
pareto_Y + 0.5,
],
dim=0,
)
samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0)
samples = torch.stack([samples, samples + 1], dim=1)
mm = MockModel(MockPosterior(samples=samples))
ref_point = [-1.0] * 2
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = FastNondominatedPartitioning(
ref_point=t_ref_point, Y=pareto_Y
)
cell_bounds = partitioning.get_hypercell_bounds().unsqueeze(1)
with mock.patch.object(
partitioning, "get_hypercell_bounds", return_value=cell_bounds
):
acqf = MOMF(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
# test multiple batch dims
self.assertEqual(acqf.cell_lower_bounds.shape, torch.Size([1, 2, 4, 2]))
self.assertEqual(acqf.cell_upper_bounds.shape, torch.Size([1, 2, 4, 2]))
X = torch.zeros(2, 2, **tkwargs)
res = acqf(X)
self.assertTrue(
torch.equal(
res,
# batch_shape x model_batch_shape
torch.tensor(
[[1.75, 3.5]], dtype=samples.dtype, device=samples.device
)
/ 2,
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from copy import deepcopy
from itertools import product
from math import pi
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.cached_cholesky import _get_cache_root_not_supported_message
from botorch.acquisition.multi_objective.monte_carlo import (
MultiObjectiveMCAcquisitionFunction,
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.multi_output_risk_measures import (
MultiOutputRiskMeasureMCObjective,
)
from botorch.acquisition.multi_objective.objective import (
GenericMCMultiOutputObjective,
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
)
from botorch.acquisition.objective import IdentityMCObjective
from botorch.exceptions.errors import BotorchError, UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import (
GenericDeterministicModel,
HigherOrderGP,
KroneckerMultiTaskGP,
MultiTaskGP,
)
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.transforms.input import InputPerturbation
from botorch.models.transforms.outcome import Standardize
from botorch.posteriors.posterior_list import PosteriorList
from botorch.posteriors.transformed import TransformedPosterior
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.low_rank import sample_cached_cholesky
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from botorch.utils.transforms import match_batch_shape, standardize
class DummyMultiObjectiveMCAcquisitionFunction(MultiObjectiveMCAcquisitionFunction):
def forward(self, X):
pass
class DummyMCMultiOutputObjective(MCMultiOutputObjective):
def forward(self, samples, X=None):
if X is not None:
return samples[..., : X.shape[-2], :]
else:
return samples
class TestMultiObjectiveMCAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
MultiObjectiveMCAcquisitionFunction()
def test_init(self):
mm = MockModel(MockPosterior(mean=torch.rand(2, 1), samples=torch.rand(2, 1)))
# test default init
acqf = DummyMultiObjectiveMCAcquisitionFunction(model=mm)
self.assertIsInstance(acqf.objective, IdentityMCMultiOutputObjective)
self.assertIsNone(acqf.sampler)
# Initialize the sampler.
acqf.get_posterior_samples(mm.posterior(torch.ones(1, 1)))
self.assertEqual(acqf.sampler.sample_shape, torch.Size([128]))
self.assertIsNone(acqf.X_pending)
# test custom init
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([64]))
objective = DummyMCMultiOutputObjective()
X_pending = torch.rand(2, 1)
acqf = DummyMultiObjectiveMCAcquisitionFunction(
model=mm, sampler=sampler, objective=objective, X_pending=X_pending
)
self.assertEqual(acqf.objective, objective)
self.assertEqual(acqf.sampler, sampler)
self.assertTrue(torch.equal(acqf.X_pending, X_pending))
# test unsupported objective
with self.assertRaises(UnsupportedError):
DummyMultiObjectiveMCAcquisitionFunction(
model=mm, objective=IdentityMCObjective()
)
# test constraints with input perturbation.
mm.input_transform = InputPerturbation(perturbation_set=torch.rand(2, 1))
with self.assertRaises(UnsupportedError):
DummyMultiObjectiveMCAcquisitionFunction(
model=mm, constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])]
)
class TestQExpectedHypervolumeImprovement(BotorchTestCase):
def test_q_expected_hypervolume_improvement(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
ref_point = [0.0, 0.0]
t_ref_point = torch.tensor(ref_point, **tkwargs)
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
partitioning = NondominatedPartitioning(ref_point=t_ref_point)
# the event shape is `b x q x m` = 1 x 1 x 2
samples = torch.zeros(1, 1, 2, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# test error if there is not pareto_Y initialized in partitioning
with self.assertRaises(BotorchError):
qExpectedHypervolumeImprovement(
model=mm, ref_point=ref_point, partitioning=partitioning
)
partitioning.update(Y=pareto_Y)
# test error if ref point has wrong shape
with self.assertRaises(ValueError):
qExpectedHypervolumeImprovement(
model=mm, ref_point=ref_point[:1], partitioning=partitioning
)
X = torch.zeros(1, 1, **tkwargs)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# check ref point
self.assertTrue(
torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs))
)
# check cached indices
self.assertTrue(hasattr(acqf, "q_subset_indices"))
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0]], device=self.device),
)
)
# test q=2
X2 = torch.zeros(2, 1, **tkwargs)
samples2 = torch.zeros(1, 2, 2, **tkwargs)
mm2 = MockModel(MockPosterior(samples=samples2))
acqf.model = mm2
self.assertEqual(acqf.model, mm2)
self.assertIn("model", acqf._modules)
self.assertEqual(acqf._modules["model"], mm2)
res = acqf(X2)
self.assertEqual(res.item(), 0.0)
# check cached indices
self.assertTrue(hasattr(acqf, "q_subset_indices"))
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0], [1]], device=self.device),
)
)
self.assertIn("q_choose_2", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_2"],
torch.tensor([[0, 1]], device=self.device),
)
)
self.assertNotIn("q_choose_3", acqf.q_subset_indices)
# now back to 1 and sure all caches were cleared
acqf.model = mm
res = acqf(X)
self.assertNotIn("q_choose_2", acqf.q_subset_indices)
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0]], device=self.device),
)
)
X = torch.zeros(1, 1, **tkwargs)
samples = torch.zeros(1, 1, 2, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345)
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
# get mm sample shape to match shape of X + X_pending
acqf.model._posterior._samples = torch.zeros(1, 2, 2, **tkwargs)
res = acqf(X)
X2 = torch.zeros(1, 1, 1, requires_grad=True, **tkwargs)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
# test objective
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
)
# get mm sample shape to match shape of X
acqf.model._posterior._samples = torch.zeros(1, 1, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# Test that the hypervolume improvement is correct for given sample
# test q = 1
X = torch.zeros(1, 1, **tkwargs)
# basic test
samples = torch.tensor([[[6.5, 4.5]]], **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 1.5)
# test q = 1, does not contribute
samples = torch.tensor([0.0, 1.0], **tkwargs).view(1, 1, 2)
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test q = 2, both points contribute
X = torch.zeros(2, 1, **tkwargs)
samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 1.75)
# test q = 2, only 1 point contributes
samples = torch.tensor([[6.5, 4.5], [6.0, 4.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 1.5)
# test q = 2, neither contributes
samples = torch.tensor([[2.0, 2.0], [0.0, 0.1]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test q = 2, test point better than current best second objective
samples = torch.tensor([[6.5, 4.5], [6.0, 6.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf.model = mm
res = acqf(X)
self.assertEqual(res.item(), 8.0)
# test q = 2, test point better than current-best first objective
samples = torch.tensor([[6.5, 4.5], [9.0, 2.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 2.0)
# test q = 3, all contribute
X = torch.zeros(3, 1, **tkwargs)
samples = torch.tensor(
[[6.5, 4.5], [9.0, 2.0], [7.0, 4.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 2.25)
# test q = 3, not all contribute
samples = torch.tensor(
[[6.5, 4.5], [9.0, 2.0], [7.0, 5.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 3.5)
# test q = 3, none contribute
samples = torch.tensor(
[[0.0, 4.5], [1.0, 2.0], [3.0, 0.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test m = 3, q=1
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]],
**tkwargs,
)
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.zeros(1, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 12.0)
# change reference point
ref_point = [0.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 4.0)
# test m = 3, no contribution
ref_point = [1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test m = 3, q = 2
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs
)
samples = torch.tensor(
[[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs
).unsqueeze(0)
mm = MockModel(MockPosterior(samples=samples))
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.zeros(2, 2, **tkwargs)
res = acqf(X)
self.assertEqual(res.item(), 22.0)
# test batched model
pareto_Y = torch.tensor(
[[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs
)
samples = torch.tensor(
[[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs
).unsqueeze(0)
samples = torch.stack([samples, samples + 1], dim=1)
mm = MockModel(MockPosterior(samples=samples))
ref_point = [-1.0] * 3
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y)
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
X = torch.zeros(2, 2, **tkwargs)
res = acqf(X)
self.assertTrue(
torch.equal(
res,
# batch_shape x model_batch_shape
torch.tensor([[22.0, 60.0]], **tkwargs),
)
)
# test batched model with batched partitioning with multiple batch dims
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
pareto_Y = torch.stack(
[
pareto_Y,
pareto_Y + 0.5,
],
dim=0,
)
samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0)
samples = torch.stack([samples, samples + 1], dim=1)
mm = MockModel(MockPosterior(samples=samples))
ref_point = [-1.0] * 2
t_ref_point = torch.tensor(ref_point, **tkwargs)
partitioning = FastNondominatedPartitioning(
ref_point=t_ref_point, Y=pareto_Y
)
cell_bounds = partitioning.get_hypercell_bounds().unsqueeze(1)
with mock.patch.object(
partitioning, "get_hypercell_bounds", return_value=cell_bounds
):
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
)
# test multiple batch dims
self.assertEqual(acqf.cell_lower_bounds.shape, torch.Size([1, 2, 4, 2]))
self.assertEqual(acqf.cell_upper_bounds.shape, torch.Size([1, 2, 4, 2]))
X = torch.zeros(2, 2, **tkwargs)
res = acqf(X)
self.assertTrue(
torch.equal(
res,
# batch_shape x model_batch_shape
torch.tensor(
[[1.75, 3.5]], dtype=samples.dtype, device=samples.device
),
)
)
def test_constrained_q_expected_hypervolume_improvement(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
ref_point = [0.0, 0.0]
t_ref_point = torch.tensor(ref_point, **tkwargs)
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
partitioning = NondominatedPartitioning(ref_point=t_ref_point)
partitioning.update(Y=pareto_Y)
# test q=1
# the event shape is `b x q x m` = 1 x 1 x 2
samples = torch.tensor([[[6.5, 4.5]]], **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
X = torch.zeros(1, 1, **tkwargs)
# test zero slack
for eta in (1e-1, 1e-2):
expected_values = [0.5 * 1.5, 0.5 * 0.5 * 1.5]
for i, constraints in enumerate(
[
[lambda Z: torch.zeros_like(Z[..., -1])],
[
lambda Z: torch.zeros_like(Z[..., -1]),
lambda Z: torch.zeros_like(Z[..., -1]),
],
]
):
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
constraints=constraints,
eta=eta,
)
res = acqf(X)
self.assertAlmostEqual(res.item(), expected_values[i], places=4)
# test multiple constraints one and multiple etas
constraints = [
lambda Z: torch.ones_like(Z[..., -1]),
lambda Z: torch.ones_like(Z[..., -1]),
]
etas = [1, torch.tensor([1, 10])]
expected_values = [
(
torch.sigmoid(torch.as_tensor(-1.0))
* torch.sigmoid(torch.as_tensor(-1.0))
* 1.5
).item(),
(
torch.sigmoid(torch.as_tensor(-1.0))
* torch.sigmoid(torch.as_tensor(-1.0 / 10.0))
* 1.5
).item(),
]
for eta, expected_value in zip(etas, expected_values):
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
constraints=constraints,
eta=eta,
)
res = acqf(X)
self.assertAlmostEqual(
res.item(),
expected_value,
places=4,
)
# test feasible
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])],
eta=1e-3,
)
res = acqf(X)
self.assertAlmostEqual(res.item(), 1.5, places=4)
# test infeasible
acqf = qExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
constraints=[lambda Z: 100.0 * torch.ones_like(Z[..., -1])],
eta=1e-3,
)
res = acqf(X)
self.assertAlmostEqual(res.item(), 0.0, places=4)
# TODO: Test non-trivial constraint values, multiple constraints, and q > 1
class TestQNoisyExpectedHypervolumeImprovement(BotorchTestCase):
def setUp(self):
self.ref_point = [0.0, 0.0, 0.0]
self.Y_raw = torch.tensor(
[
[2.0, 0.5, 1.0],
[1.0, 2.0, 1.0],
[1.0, 1.0, 1.0],
],
device=self.device,
)
self.pareto_Y_raw = torch.tensor(
[
[2.0, 0.5, 1.0],
[1.0, 2.0, 1.0],
],
device=self.device,
)
super().setUp()
def test_q_noisy_expected_hypervolume_improvement(self):
tkwargs = {"device": self.device}
for dtype, m in product(
(torch.float, torch.double),
(1, 2, 3),
):
tkwargs["dtype"] = dtype
ref_point = self.ref_point[:m]
Y = self.Y_raw[:, :m].to(**tkwargs)
pareto_Y = self.pareto_Y_raw[:, :m].to(**tkwargs)
X_baseline = torch.rand(Y.shape[0], 1, **tkwargs)
# the event shape is `b x q + r x m` = 1 x 1 x 2
baseline_samples = Y
samples = torch.cat(
[baseline_samples.unsqueeze(0), torch.zeros(1, 1, m, **tkwargs)],
dim=1,
)
mm = MockModel(MockPosterior(samples=baseline_samples))
X = torch.zeros(1, 1, **tkwargs)
# basic test
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
# test error is raised if m == 1
if m == 1:
with self.assertRaisesRegex(
ValueError,
"qNoisyExpectedHypervolumeImprovement supports m>=2 outcomes ",
):
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
continue
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
# set the MockPosterior to use samples over baseline points and new
# candidates
acqf.model._posterior._samples = samples
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# check ref point
self.assertTrue(
torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs))
)
# check cached indices
self.assertTrue(hasattr(acqf, "q_subset_indices"))
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0]], device=self.device),
)
)
# test q=2
X2 = torch.zeros(2, 1, **tkwargs)
samples2 = torch.cat(
[baseline_samples.unsqueeze(0), torch.zeros(1, 2, m, **tkwargs)],
dim=1,
)
mm2 = MockModel(MockPosterior(samples=baseline_samples))
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm2,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
# set the MockPosterior to use samples over baseline points and new
# candidates
acqf.model._posterior._samples = samples2
res = acqf(X2)
self.assertEqual(res.item(), 0.0)
# check cached indices
self.assertTrue(hasattr(acqf, "q_subset_indices"))
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0], [1]], device=self.device),
)
)
self.assertIn("q_choose_2", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_2"],
torch.tensor([[0, 1]], device=self.device),
)
)
self.assertNotIn("q_choose_3", acqf.q_subset_indices)
# now back to 1 and sure all caches were cleared
acqf.model = mm
res = acqf(X)
self.assertNotIn("q_choose_2", acqf.q_subset_indices)
self.assertIn("q_choose_1", acqf.q_subset_indices)
self.assertTrue(
torch.equal(
acqf.q_subset_indices["q_choose_1"],
torch.tensor([[0]], device=self.device),
)
)
# test error is raised if X_baseline is batched
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
with self.assertRaises(UnsupportedError):
qNoisyExpectedHypervolumeImprovement(
model=mm2,
ref_point=ref_point,
X_baseline=X_baseline.unsqueeze(0),
sampler=sampler,
cache_root=False,
)
# test objective
# set the MockPosterior to use samples over baseline points
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
cache_root=False,
)
# sample_shape x n x m
original_base_samples = sampler.base_samples.detach().clone()
# set the MockPosterior to use samples over baseline points and new
# candidates
mm._posterior._samples = samples
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test that original base samples were retained
self.assertTrue(
torch.equal(
# sample_shape x batch_shape x n x m
sampler.base_samples[0, 0, : original_base_samples.shape[1], :],
original_base_samples[0],
)
)
# test that base_samples for X_baseline are fixed
# set the MockPosterior to use samples over baseline points
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
orig_base_sampler = deepcopy(acqf.base_sampler)
# set the MockPosterior to use samples over baseline points and new
# candidates
mm._posterior._samples = samples
with torch.no_grad():
acqf(X)
self.assertTrue(
torch.equal(
orig_base_sampler.base_samples, acqf.base_sampler.base_samples
)
)
self.assertTrue(
torch.allclose(
acqf.base_sampler.base_samples,
acqf.sampler.base_samples[..., : X_baseline.shape[0], :],
)
)
mm._posterior._samples = baseline_samples
# test empty pareto set
ref_point2 = [15.0, 14.0, 16.0][:m]
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point2,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
cache_root=False,
)
self.assertTrue((acqf.cell_lower_bounds[..., 0] == 15).all())
self.assertTrue((acqf.cell_lower_bounds[..., 1] == 14).all())
if m == 3:
self.assertTrue((acqf.cell_lower_bounds[..., 2] == 16).all())
self.assertTrue(torch.isinf(acqf.cell_upper_bounds).all())
for b in (acqf.cell_lower_bounds, acqf.cell_upper_bounds):
self.assertEqual(list(b.shape), [1, 1, m])
self.assertEqual(list(b.shape), [1, 1, m])
# test no baseline points
ref_point2 = [15.0, 14.0, 16.0][:m]
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point2,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
prune_baseline=True,
cache_root=False,
)
self.assertTrue((acqf.cell_lower_bounds[..., 0] == 15).all())
self.assertTrue((acqf.cell_lower_bounds[..., 1] == 14).all())
if m == 3:
self.assertTrue((acqf.cell_lower_bounds[..., 2] == 16).all())
self.assertTrue(torch.isinf(acqf.cell_upper_bounds).all())
for b in (acqf.cell_lower_bounds, acqf.cell_upper_bounds):
self.assertEqual(list(b.shape), [1, 1, m])
self.assertEqual(list(b.shape), [1, 1, m])
# test X_pending with CBD
for incremental_nehvi in (False, True):
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
incremental_nehvi=incremental_nehvi,
cache_root=False,
)
original_base_samples = sampler.base_samples.detach().clone()
# the box decomposition algorithm is faster on the CPU for m>2,
# so NEHVI runs it on the CPU
expected_pareto_Y = pareto_Y if m == 2 else pareto_Y.cpu()
self.assertTrue(
torch.equal(acqf.partitioning.pareto_Y[0], expected_pareto_Y)
)
self.assertIsNone(acqf.X_pending)
new_Y = torch.tensor(
[[0.5, 3.0, 0.5][:m]], dtype=dtype, device=self.device
)
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y,
]
).unsqueeze(0)
bd = DominatedPartitioning(
ref_point=torch.tensor(ref_point).to(**tkwargs), Y=pareto_Y
)
initial_hv = bd.compute_hypervolume()
# test _initial_hvs
if not incremental_nehvi:
self.assertTrue(hasattr(acqf, "_initial_hvs"))
self.assertTrue(torch.equal(acqf._initial_hvs, initial_hv.view(-1)))
# test forward
X_test = torch.rand(1, 1, dtype=dtype, device=self.device)
with torch.no_grad():
val = acqf(X_test)
bd.update(mm._posterior._samples[0, -1:])
expected_val = bd.compute_hypervolume() - initial_hv
self.assertTrue(torch.equal(val, expected_val.view(-1)))
# test that original base_samples were retained
self.assertTrue(
torch.equal(
# sample_shape x batch_shape x n x m
sampler.base_samples[0, 0, : original_base_samples.shape[1], :],
original_base_samples[0],
)
)
# test X_pending
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
incremental_nehvi=incremental_nehvi,
cache_root=False,
)
# sample_shape x n x m
original_base_samples = sampler.base_samples.detach().clone()
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y,
],
dim=0,
)
X_pending = torch.rand(1, 1, dtype=dtype, device=self.device)
acqf.set_X_pending(X_pending)
if not incremental_nehvi:
self.assertTrue(torch.equal(expected_val, acqf._prev_nehvi))
self.assertIsNone(acqf.X_pending)
# check that X_baseline has been updated
self.assertTrue(torch.equal(acqf.X_baseline[:-1], acqf._X_baseline))
self.assertTrue(torch.equal(acqf.X_baseline[-1:], X_pending))
# check that partitioning has been updated
acqf_pareto_Y = acqf.partitioning.pareto_Y[0]
# the box decomposition algorithm is faster on the CPU for m>2,
# so NEHVI runs it on the CPU
self.assertTrue(torch.equal(acqf_pareto_Y[:-1], expected_pareto_Y))
expected_new_Y = new_Y if m == 2 else new_Y.cpu()
self.assertTrue(torch.equal(acqf_pareto_Y[-1:], expected_new_Y))
# test that base samples were retained
self.assertTrue(
torch.equal(
# sample_shape x n x m
sampler.base_samples[0, : original_base_samples.shape[1], :],
original_base_samples[0],
)
)
self.assertTrue(
torch.equal(
acqf.sampler.base_samples,
acqf.base_sampler.base_samples,
)
)
# test incremental nehvi in forward
new_Y2 = torch.cat(
[
new_Y,
torch.tensor(
[[0.25, 9.5, 1.5][:m]], dtype=dtype, device=self.device
),
],
dim=0,
)
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y2,
]
).unsqueeze(0)
X_test = torch.rand(1, 1, dtype=dtype, device=self.device)
with torch.no_grad():
val = acqf(X_test)
if incremental_nehvi:
# set initial hv to include X_pending
initial_hv = bd.compute_hypervolume()
bd.update(mm._posterior._samples[0, -1:])
expected_val = bd.compute_hypervolume() - initial_hv
self.assertTrue(torch.equal(val, expected_val.view(-1)))
# add another point
X_pending2 = torch.cat(
[X_pending, torch.rand(1, 1, dtype=dtype, device=self.device)], dim=0
)
mm._posterior._samples = mm._posterior._samples.squeeze(0)
acqf.set_X_pending(X_pending2)
self.assertIsNone(acqf.X_pending)
# check that X_baseline has been updated
self.assertTrue(torch.equal(acqf.X_baseline[:-2], acqf._X_baseline))
self.assertTrue(torch.equal(acqf.X_baseline[-2:], X_pending2))
# check that partitioning has been updated
acqf_pareto_Y = acqf.partitioning.pareto_Y[0]
self.assertTrue(torch.equal(acqf_pareto_Y[:-2], expected_pareto_Y))
expected_new_Y2 = new_Y2 if m == 2 else new_Y2.cpu()
self.assertTrue(torch.equal(acqf_pareto_Y[-2:], expected_new_Y2))
# test set X_pending with grad
# Get posterior samples to agree with X_pending
mm._posterior._samples = torch.zeros(1, 7, m, **tkwargs)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(
torch.cat([X_pending2, X_pending2], dim=0).requires_grad_(True)
)
self.assertIsNone(acqf.X_pending)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
# test max iep
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
incremental_nehvi=False,
max_iep=1,
cache_root=False,
)
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y,
]
)
acqf.set_X_pending(X_pending)
self.assertTrue(torch.equal(acqf.X_pending, X_pending))
acqf_pareto_Y = acqf.partitioning.pareto_Y[0]
self.assertTrue(torch.equal(acqf_pareto_Y, expected_pareto_Y))
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y2,
]
)
# check that after second pending point is added, X_pending is set to None
# and the pending points are included in the box decompositions
acqf.set_X_pending(X_pending2)
self.assertIsNone(acqf.X_pending)
acqf_pareto_Y = acqf.partitioning.pareto_Y[0]
self.assertTrue(torch.equal(acqf_pareto_Y[:-2], expected_pareto_Y))
self.assertTrue(torch.equal(acqf_pareto_Y[-2:], expected_new_Y2))
# test qNEHVI without CBD
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
cache_pending=False,
cache_root=False,
)
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y,
]
).unsqueeze(0)
X_pending10 = X_pending.expand(10, 1)
acqf.set_X_pending(X_pending10)
self.assertTrue(torch.equal(acqf.X_pending, X_pending10))
acqf_pareto_Y = acqf.partitioning.pareto_Y[0]
self.assertTrue(torch.equal(acqf_pareto_Y, expected_pareto_Y))
acqf.set_X_pending(X_pending)
mm._posterior._samples = torch.cat(
[
baseline_samples,
new_Y2,
]
).unsqueeze(0)
with torch.no_grad():
val = acqf(X_test)
bd = DominatedPartitioning(
ref_point=torch.tensor(ref_point).to(**tkwargs), Y=pareto_Y
)
initial_hv = bd.compute_hypervolume()
bd.update(mm._posterior._samples.squeeze(0))
expected_val = bd.compute_hypervolume() - initial_hv
self.assertTrue(torch.equal(expected_val.view(1), val))
# test alpha > 0
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
cache_pending=False,
alpha=1e-3,
cache_root=False,
)
if len(ref_point) == 2:
partitioning = acqf.partitioning
else:
partitioning = acqf.partitioning.box_decompositions[0]
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, 1e-3)
# test set_X_pending when X_pending = None
acqf.set_X_pending(X_pending10)
self.assertTrue(torch.equal(acqf.X_pending, X_pending10))
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
# test X_pending is not None on __init__
mm._posterior._samples = torch.zeros(1, 5, m, **tkwargs)
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
objective=IdentityMCMultiOutputObjective(),
alpha=1e-3,
X_pending=X_pending2,
cache_root=False,
)
self.assertTrue(torch.equal(X_baseline, acqf._X_baseline))
self.assertTrue(torch.equal(acqf.X_baseline[:-2], acqf._X_baseline))
self.assertTrue(torch.equal(acqf.X_baseline[-2:], X_pending2))
def test_constrained_q_noisy_expected_hypervolume_improvement(self):
# TODO: improve tests with constraints
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
ref_point = [0.0, 0.0]
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
X_baseline = torch.zeros(pareto_Y.shape[0], 1, **tkwargs)
baseline_samples = pareto_Y
# test q=1
# the event shape is `b x q x m` = 1 x 1 x 2
samples = torch.cat(
[
baseline_samples.unsqueeze(0),
torch.tensor([[[6.5, 4.5]]], **tkwargs),
],
dim=1,
)
mm = MockModel(MockPosterior(samples=baseline_samples))
X = torch.zeros(1, 1, **tkwargs)
# test zero slack multiple constraints, multiple etas
for eta in [1e-1, 1e-2, torch.tensor([1.0, 10.0])]:
# set the MockPosterior to use samples over baseline points
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
constraints=[
lambda Z: torch.zeros_like(Z[..., -1]),
lambda Z: torch.zeros_like(Z[..., -1]),
],
eta=eta,
cache_root=False,
)
# set the MockPosterior to use samples over baseline points and new
# candidates
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 0.5 * 0.5 * 1.5, places=4)
# test zero slack single constraint
for eta in (1e-1, 1e-2):
# set the MockPosterior to use samples over baseline points
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
constraints=[lambda Z: torch.zeros_like(Z[..., -1])],
eta=eta,
cache_root=False,
)
# set the MockPosterior to use samples over baseline points and new
# candidates
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 0.5 * 1.5, places=4)
# set X_pending
X_pending = torch.rand(1, 1, **tkwargs)
acqf.set_X_pending(X_pending)
samples = torch.cat(
[
samples,
torch.tensor([[[10.0, 0.5]]], **tkwargs),
],
dim=1,
)
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 0.5 * 0.5, places=4)
# test incremental nehvi=False
mm._posterior._samples = baseline_samples
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
constraints=[lambda Z: torch.zeros_like(Z[..., -1])],
eta=1e-3,
incremental_nehvi=False,
cache_root=False,
)
samples = torch.cat(
[
baseline_samples.unsqueeze(0),
torch.tensor([[[6.5, 4.5]]], **tkwargs),
],
dim=1,
)
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 0.5 * 1.5, places=4)
acqf.set_X_pending(X_pending)
samples = torch.cat(
[
samples,
torch.tensor([[[10.0, 0.5]]], **tkwargs),
],
dim=1,
)
mm._posterior._samples = samples
res = acqf(X)
# test that HVI is not incremental
# Note that the cached pending point uses strict constraint evaluation
# so the HVI from the cached pending point is 1.5.
# The new X contributes an HVI of 0.5, but with a constraint slack of 0,
# the sigmoid soft-evaluation yields a constrained HVI of 0.25
self.assertAlmostEqual(res.item(), 1.75, places=4)
# test feasible
# set the MockPosterior to use samples over baseline points
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])],
eta=1e-3,
cache_root=False,
)
samples = torch.cat(
[
baseline_samples.unsqueeze(0),
torch.tensor([[[6.5, 4.5]]], **tkwargs),
],
dim=1,
)
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 1.5, places=4)
# test multiple constraints one eta with
# this crashes for large etas, and I do not why
# set the MockPosterior to use samples over baseline points
etas = [torch.tensor([1.0]), torch.tensor([1.0, 10.0])]
constraints = [
[lambda Z: torch.ones_like(Z[..., -1])],
[
lambda Z: torch.ones_like(Z[..., -1]),
lambda Z: torch.ones_like(Z[..., -1]),
],
]
expected_values = [
(torch.sigmoid(torch.as_tensor(-1.0 / 1)) * 1.5).item(),
(
torch.sigmoid(torch.as_tensor(-1.0 / 1))
* torch.sigmoid(torch.as_tensor(-1.0 / 10))
* 1.5
).item(),
]
for eta, constraint, expected_value in zip(
etas, constraints, expected_values
):
acqf.constraints = constraint
acqf.eta = eta
res = acqf(X)
self.assertAlmostEqual(
res.item(),
expected_value,
places=4,
)
# test infeasible
# set the MockPosterior to use samples over baseline points
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
constraints=[lambda Z: 100.0 * torch.ones_like(Z[..., -1])],
eta=1e-3,
cache_root=False,
)
# set the MockPosterior to use samples over baseline points and new
# candidates
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 0.0, places=4)
# test >2 objectives
ref_point = [0.0, 0.0, 0.0]
baseline_samples = torch.tensor(
[
[4.0, 5.0, 1.0],
[5.0, 5.0, 1.0],
[8.5, 3.5, 1.0],
[8.5, 3.0, 1.0],
[9.0, 1.0, 1.0],
],
**tkwargs,
)
mm._posterior._samples = baseline_samples
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])],
eta=1e-3,
cache_root=False,
)
# set the MockPosterior to use samples over baseline points and new
# candidates
samples = torch.cat(
[
baseline_samples.unsqueeze(0),
torch.tensor([[[6.5, 4.5, 1.0]]], **tkwargs),
],
dim=1,
)
mm._posterior._samples = samples
res = acqf(X)
self.assertAlmostEqual(res.item(), 1.5, places=4)
def test_prune_baseline(self):
# test prune_baseline
no = "botorch.utils.testing.MockModel.num_outputs"
prune = (
"botorch.acquisition.multi_objective.monte_carlo."
"prune_inferior_points_multi_objective"
)
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
ref_point = [0.0, 0.0]
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
X_baseline = torch.zeros(pareto_Y.shape[0], 1, **tkwargs)
baseline_samples = pareto_Y
X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
# Reduce samples to same shape as X_pruned.
mm = MockModel(MockPosterior(samples=baseline_samples[:1]))
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=True,
cache_root=False,
)
mock_prune.assert_called_once()
self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
def test_cache_root(self):
sample_cached_path = (
"botorch.acquisition.cached_cholesky.sample_cached_cholesky"
)
state_dict = {
"likelihood.noise_covar.raw_noise": torch.tensor(
[[0.0895], [0.2594]], dtype=torch.float64
),
"mean_module.raw_constant": torch.tensor(
[-0.4545, -0.1285], dtype=torch.float64
),
"covar_module.raw_outputscale": torch.tensor(
[1.4876, 1.4897], dtype=torch.float64
),
"covar_module.base_kernel.raw_lengthscale": torch.tensor(
[[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64
),
}
# test batched models (e.g. for MCMC)
for train_batch_shape in (torch.Size([]), torch.Size([3])):
if len(train_batch_shape) > 0:
for k, v in state_dict.items():
state_dict[k] = v.unsqueeze(0).expand(*train_batch_shape, *v.shape)
for dtype, ref_point in product(
(torch.float, torch.double),
([-5.0, -5.0], [10.0, 10.0]),
):
tkwargs = {"device": self.device, "dtype": dtype}
for k, v in state_dict.items():
state_dict[k] = v.to(**tkwargs)
all_close_kwargs = (
{"atol": 1e-1, "rtol": 1e-2}
if dtype == torch.float
else {"atol": 1e-4, "rtol": 1e-6}
)
torch.manual_seed(1234)
train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs)
train_Y = torch.sin(train_X * 2 * pi) + torch.randn(
*train_batch_shape, 3, 2, **tkwargs
)
train_Y = standardize(train_Y)
model = SingleTaskGP(train_X, train_Y)
if len(train_batch_shape) > 0:
X_baseline = train_X[0]
else:
X_baseline = train_X
model.load_state_dict(state_dict, strict=False)
sampler = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0)
torch.manual_seed(0)
acqf = qNoisyExpectedHypervolumeImprovement(
model=model,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler,
prune_baseline=False,
cache_root=True,
)
sampler2 = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0)
torch.manual_seed(0)
acqf_no_cache = qNoisyExpectedHypervolumeImprovement(
model=model,
ref_point=ref_point,
X_baseline=X_baseline,
sampler=sampler2,
prune_baseline=False,
cache_root=False,
)
# load CBD
acqf_no_cache.cell_lower_bounds = acqf.cell_lower_bounds.clone()
acqf_no_cache.cell_upper_bounds = acqf.cell_upper_bounds.clone()
for q, batch_shape in product(
(1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3]))
):
torch.manual_seed(0)
acqf.q_in = -1
test_X = (
0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs)
).requires_grad_(True)
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val = acqf(test_X)
mock_sample_cached.assert_called_once()
val.sum().backward()
base_samples = acqf.sampler.base_samples.detach().clone()
X_grad = test_X.grad.clone()
test_X2 = test_X.detach().clone().requires_grad_(True)
acqf_no_cache.sampler.base_samples = base_samples
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val2 = acqf_no_cache(test_X2)
mock_sample_cached.assert_not_called()
self.assertAllClose(val, val2, **all_close_kwargs)
val2.sum().backward()
if dtype == torch.double:
# The gradient computation is very unstable in single precision
# so we only check the gradient when using torch.double.
self.assertTrue(
torch.allclose(X_grad, test_X2.grad, **all_close_kwargs)
)
if ref_point == [-5.0, -5.0]:
self.assertTrue((X_grad != 0).any())
# test we fall back to standard sampling for
# ill-conditioned covariances
acqf._baseline_L = torch.zeros_like(acqf._baseline_L)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
with torch.no_grad():
acqf(test_X)
self.assertEqual(
sum(issubclass(w.category, BotorchWarning) for w in ws), 1
)
def test_cache_root_w_standardize(self):
# Test caching with standardize transform.
train_x = torch.rand(3, 2, dtype=torch.float64)
train_y = torch.randn(3, 2, dtype=torch.float64)
model = SingleTaskGP(train_x, train_y, outcome_transform=Standardize(m=2))
acqf = qNoisyExpectedHypervolumeImprovement(
model=model,
X_baseline=train_x,
ref_point=torch.ones(2),
sampler=IIDNormalSampler(sample_shape=torch.Size([1])),
cache_root=True,
)
self.assertIsNotNone(acqf._baseline_L)
self.assertEqual(acqf(train_x[:1]).shape, torch.Size([1]))
self.assertEqual(acqf(train_x.unsqueeze(-2)).shape, torch.Size([3]))
def test_with_set_valued_objectives(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
tx = torch.rand(5, 2, **tkwargs)
ty = torch.randn(5, 2, **tkwargs)
perturbation = InputPerturbation(
perturbation_set=torch.randn(3, 2, **tkwargs)
).eval()
baseline_samples = perturbation(ty)
class DummyObjective(MultiOutputRiskMeasureMCObjective):
r"""A dummy set valued objective."""
_verify_output_shape = False
def forward(self, samples, X=None):
samples = self._prepare_samples(samples)
return samples[..., :2, :].reshape(
*samples.shape[:-3], -1, samples.shape[-1]
)
model = MockModel(MockPosterior(samples=baseline_samples))
acqf = qNoisyExpectedHypervolumeImprovement(
model=model,
ref_point=torch.tensor([0.0, 0.0], **tkwargs),
X_baseline=tx,
sampler=SobolQMCNormalSampler(sample_shape=torch.Size([2])),
objective=DummyObjective(n_w=3),
prune_baseline=False,
cache_root=False,
)
test_x = torch.rand(3, 2, 2, **tkwargs)
samples = torch.cat(
[baseline_samples.expand(3, -1, -1), torch.zeros(3, 6, 2, **tkwargs)],
dim=1,
)
acqf.model._posterior._samples = samples
res = acqf(test_x)
self.assertTrue(torch.equal(res, torch.zeros(3, **tkwargs)))
self.assertEqual(acqf.q_in, 6)
self.assertEqual(acqf.q_out, 4)
self.assertEqual(len(acqf.q_subset_indices.keys()), 4)
def test_deterministic(self):
for dtype, prune in ((torch.float, False), (torch.double, True)):
tkwargs = {"device": self.device, "dtype": dtype}
model = GenericDeterministicModel(f=lambda x: x, num_outputs=2)
with self.assertWarnsRegex(
RuntimeWarning,
_get_cache_root_not_supported_message(GenericDeterministicModel),
):
acqf = qNoisyExpectedHypervolumeImprovement(
model=model,
ref_point=torch.tensor([0.0, 0.0], **tkwargs),
X_baseline=torch.rand(5, 2, **tkwargs),
prune_baseline=prune,
cache_root=True,
)
self.assertFalse(acqf._cache_root)
self.assertEqual(
acqf(torch.rand(3, 2, 2, **tkwargs)).shape, torch.Size([3])
)
def test_with_multitask(self):
# Verify that _set_sampler works with MTGP, KroneckerMTGP and HOGP.
torch.manual_seed(1234)
tkwargs = {"device": self.device, "dtype": torch.double}
train_x = torch.rand(6, 2, **tkwargs)
train_y = torch.randn(6, 2, **tkwargs)
mtgp_task = torch.cat(
[torch.zeros(6, 1, **tkwargs), torch.ones(6, 1, **tkwargs)], dim=0
)
mtgp_x = torch.cat([train_x.repeat(2, 1), mtgp_task], dim=-1)
mtgp = MultiTaskGP(mtgp_x, train_y.view(-1, 1), task_feature=2).eval()
kmtgp = KroneckerMultiTaskGP(train_x, train_y).eval()
hogp = HigherOrderGP(train_x, train_y.repeat(6, 1, 1)).eval()
hogp_obj = GenericMCMultiOutputObjective(lambda Y, X: Y.mean(dim=-2))
test_x = torch.rand(2, 3, 2, **tkwargs)
def get_acqf(model):
return qNoisyExpectedHypervolumeImprovement(
model=model,
ref_point=torch.tensor([0.0, 0.0], **tkwargs),
X_baseline=train_x,
sampler=IIDNormalSampler(sample_shape=torch.Size([2])),
objective=hogp_obj if isinstance(model, HigherOrderGP) else None,
prune_baseline=True,
cache_root=False,
)
for model in [mtgp, kmtgp, hogp]:
acqf = get_acqf(model)
posterior = model.posterior(acqf.X_baseline)
base_evals = acqf.base_sampler(posterior)
base_samples = acqf.base_sampler.base_samples
with mock.patch.object(
qNoisyExpectedHypervolumeImprovement,
"_compute_qehvi",
wraps=acqf._compute_qehvi,
) as wrapped_compute:
acqf(test_x)
wrapped_compute.assert_called_once()
expected_shape = (
torch.Size([2, 2, 3, 6, 2])
if isinstance(model, HigherOrderGP)
else torch.Size([2, 2, 3, 2])
)
self.assertEqual(
wrapped_compute.call_args[-1]["samples"].shape, expected_shape
)
new_base_samples = acqf.sampler.base_samples
# Check that the base samples are the same.
if model is mtgp:
expected = new_base_samples[..., :-3, :].squeeze(-3)
else:
n_train = base_samples.shape[-1] // 2
expected = torch.cat(
[new_base_samples[..., :n_train], new_base_samples[..., -n_train:]],
dim=-1,
).squeeze(-2)
self.assertTrue(torch.equal(base_samples, expected))
# Check that they produce the same f_X for baseline points.
X_full = torch.cat(
[match_batch_shape(acqf.X_baseline, test_x), test_x], dim=-2
)
posterior = acqf.model.posterior(X_full)
samples = acqf.sampler(posterior)
expected = samples[:, :, :-3]
repeat_shape = [1, 2, 1, 1]
if model is hogp:
repeat_shape.append(1)
self.assertTrue(
torch.allclose(
base_evals.unsqueeze(1).repeat(*repeat_shape),
expected,
atol=1e-2,
rtol=1e-4,
)
)
def test_with_transformed(self):
# Verify that _set_sampler works with transformed posteriors.
mm = MockModel(
posterior=PosteriorList(
TransformedPosterior(
MockPosterior(samples=torch.rand(2, 3, 1)), lambda X: X
),
TransformedPosterior(
MockPosterior(samples=torch.rand(2, 3, 1)), lambda X: X
),
)
)
sampler = ListSampler(
IIDNormalSampler(sample_shape=torch.Size([2])),
IIDNormalSampler(sample_shape=torch.Size([2])),
)
# This calls _set_sampler which used to error out in
# NormalMCSampler._update_base_samples with TransformedPosterior
# due to the missing batch_shape (fixed in #1625).
qNoisyExpectedHypervolumeImprovement(
model=mm,
ref_point=torch.tensor([0.0, 0.0]),
X_baseline=torch.rand(3, 2),
sampler=sampler,
cache_root=False,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from unittest import mock
import torch
from botorch.acquisition.max_value_entropy_search import qMaxValueEntropy
from botorch.acquisition.multi_objective.max_value_entropy_search import (
qLowerBoundMultiObjectiveMaxValueEntropySearch,
qMultiObjectiveMaxValueEntropy,
)
from botorch.acquisition.multi_objective.utils import compute_sample_box_decomposition
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase
def get_model(train_X, train_Y, use_model_list, standardize_model):
num_objectives = train_Y.shape[-1]
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model
def dummy_sample_pareto_frontiers(model):
m = model.models[0] if isinstance(model, ModelListGP) else model
return torch.rand(
3,
4,
model.num_outputs,
dtype=m.train_inputs[0].dtype,
device=m.train_inputs[0].device,
)
class TestMultiObjectiveMaxValueEntropy(BotorchTestCase):
def test_multi_objective_max_value_entropy(self):
for dtype, m in product((torch.float, torch.double), (2, 3)):
torch.manual_seed(7)
# test batched model
train_X = torch.rand(1, 1, 2, dtype=dtype, device=self.device)
train_Y = torch.rand(1, 1, m, dtype=dtype, device=self.device)
model = SingleTaskGP(train_X, train_Y)
with self.assertRaises(NotImplementedError):
qMultiObjectiveMaxValueEntropy(model, dummy_sample_pareto_frontiers)
# test initialization
train_X = torch.rand(4, 2, dtype=dtype, device=self.device)
train_Y = torch.rand(4, m, dtype=dtype, device=self.device)
# test batched MO model
model = SingleTaskGP(train_X, train_Y)
mesmo = qMultiObjectiveMaxValueEntropy(model, dummy_sample_pareto_frontiers)
self.assertEqual(mesmo.num_fantasies, 16)
# Initialize the sampler.
dummy_post = model.posterior(train_X[:1])
mesmo.get_posterior_samples(dummy_post)
self.assertIsInstance(mesmo.sampler, SobolQMCNormalSampler)
self.assertEqual(mesmo.sampler.sample_shape, torch.Size([128]))
self.assertIsInstance(mesmo.fantasies_sampler, SobolQMCNormalSampler)
self.assertEqual(mesmo.posterior_max_values.shape, torch.Size([3, 1, m]))
# test conversion to single-output model
self.assertIs(mesmo.mo_model, model)
self.assertEqual(mesmo.mo_model.num_outputs, m)
self.assertIsInstance(mesmo.model, SingleTaskGP)
self.assertEqual(mesmo.model.num_outputs, 1)
self.assertEqual(
mesmo.model._aug_batch_shape, mesmo.model._input_batch_shape
)
# test ModelListGP
model = ModelListGP(
*[SingleTaskGP(train_X, train_Y[:, i : i + 1]) for i in range(m)]
)
mock_sample_pfs = mock.Mock()
mock_sample_pfs.return_value = dummy_sample_pareto_frontiers(model=model)
mesmo = qMultiObjectiveMaxValueEntropy(model, mock_sample_pfs)
self.assertEqual(mesmo.num_fantasies, 16)
# Initialize the sampler.
dummy_post = model.posterior(train_X[:1])
mesmo.get_posterior_samples(dummy_post)
self.assertIsInstance(mesmo.sampler, SobolQMCNormalSampler)
self.assertEqual(mesmo.sampler.sample_shape, torch.Size([128]))
self.assertIsInstance(mesmo.fantasies_sampler, SobolQMCNormalSampler)
self.assertEqual(mesmo.posterior_max_values.shape, torch.Size([3, 1, m]))
# test conversion to batched MO model
self.assertIsInstance(mesmo.mo_model, SingleTaskGP)
self.assertEqual(mesmo.mo_model.num_outputs, m)
self.assertIs(mesmo.mo_model, mesmo._init_model)
# test conversion to single-output model
self.assertIsInstance(mesmo.model, SingleTaskGP)
self.assertEqual(mesmo.model.num_outputs, 1)
self.assertEqual(
mesmo.model._aug_batch_shape, mesmo.model._input_batch_shape
)
# test that we call sample_pareto_frontiers with the multi-output model
mock_sample_pfs.assert_called_once_with(mesmo.mo_model)
# test basic evaluation
X = torch.rand(1, 2, device=self.device, dtype=dtype)
with torch.no_grad():
vals = mesmo(X)
igs = qMaxValueEntropy.forward(mesmo, X=X.view(1, 1, 1, 2))
self.assertEqual(vals.shape, torch.Size([1]))
self.assertTrue(torch.equal(vals, igs.sum(dim=-1)))
# test batched evaluation
X = torch.rand(4, 1, 2, device=self.device, dtype=dtype)
with torch.no_grad():
vals = mesmo(X)
igs = qMaxValueEntropy.forward(mesmo, X=X.view(4, 1, 1, 2))
self.assertEqual(vals.shape, torch.Size([4]))
self.assertTrue(torch.equal(vals, igs.sum(dim=-1)))
# test set X pending to None
mesmo.set_X_pending(None)
self.assertIs(mesmo.mo_model, mesmo._init_model)
fant_X = torch.cat(
[
train_X.expand(16, 4, 2),
torch.rand(16, 1, 2, device=self.device, dtype=dtype),
],
dim=1,
)
fant_Y = torch.cat(
[
train_Y.expand(16, 4, m),
torch.rand(16, 1, m, device=self.device, dtype=dtype),
],
dim=1,
)
fantasy_model = SingleTaskGP(fant_X, fant_Y)
# test with X_pending is not None
with mock.patch.object(
SingleTaskGP, "fantasize", return_value=fantasy_model
) as mock_fantasize:
qMultiObjectiveMaxValueEntropy(
model,
dummy_sample_pareto_frontiers,
X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
)
mock_fantasize.assert_called_once()
class TestQLowerBoundMultiObjectiveMaxValueEntropySearch(BotorchTestCase):
def _base_test_lb_moo_max_value_entropy_search(self, estimation_type):
torch.manual_seed(1)
tkwargs = {"device": self.device}
for (dtype, num_objectives, use_model_list, standardize_model) in product(
(torch.float, torch.double),
(1, 2, 3),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
input_dim = 2
train_X = torch.rand(4, input_dim, **tkwargs)
train_Y = torch.rand(4, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
pareto_fronts = dummy_sample_pareto_frontiers(model)
hypercell_bounds = compute_sample_box_decomposition(pareto_fronts)
# test acquisition
X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)]
for X_pending in X_pending_list:
acq = qLowerBoundMultiObjectiveMaxValueEntropySearch(
model=model,
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
X_pending=X_pending,
)
self.assertIsInstance(acq.sampler, SobolQMCNormalSampler)
test_Xs = [
torch.rand(4, 1, input_dim, **tkwargs),
torch.rand(4, 3, input_dim, **tkwargs),
torch.rand(4, 5, 1, input_dim, **tkwargs),
torch.rand(4, 5, 3, input_dim, **tkwargs),
]
for test_X in test_Xs:
acq_X = acq(test_X)
# assess shape
self.assertTrue(acq_X.shape == test_X.shape[:-2])
def test_lb_moo_max_value_entropy_search_0(self):
self._base_test_lb_moo_max_value_entropy_search(estimation_type="0")
def test_lb_moo_max_value_entropy_search_LB(self):
self._base_test_lb_moo_max_value_entropy_search(estimation_type="LB")
def test_lb_moo_max_value_entropy_search_LB2(self):
self._base_test_lb_moo_max_value_entropy_search(estimation_type="LB2")
def test_lb_moo_max_value_entropy_search_MC(self):
self._base_test_lb_moo_max_value_entropy_search(estimation_type="MC")
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Optional
import torch
from botorch import settings
from botorch.acquisition.multi_objective.multi_output_risk_measures import (
IndependentCVaR,
IndependentVaR,
MARS,
MultiOutputExpectation,
MultiOutputRiskMeasureMCObjective,
MultiOutputWorstCase,
MVaR,
)
from botorch.acquisition.multi_objective.objective import (
IdentityMCMultiOutputObjective,
WeightedMCMultiOutputObjective,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.transforms.input import InputPerturbation
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
class NotSoAbstractMORiskMeasure(MultiOutputRiskMeasureMCObjective):
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
prepared_samples = self._prepare_samples(samples)
return prepared_samples.sum(dim=-2)
class TestMultiOutputRiskMeasureMCObjective(BotorchTestCase):
def test_multi_output_risk_measure_mc_objective(self):
# abstract raises
with self.assertRaises(TypeError):
MultiOutputRiskMeasureMCObjective(n_w=3)
for dtype in (torch.float, torch.double):
samples = torch.tensor(
[
[
[1.0, 1.2],
[0.5, 0.7],
[2.0, 2.2],
[3.0, 3.4],
[1.0, 1.2],
[5.0, 5.6],
]
],
device=self.device,
dtype=dtype,
)
obj = NotSoAbstractMORiskMeasure(n_w=3)
# test _prepare_samples
expected_samples = samples.view(1, 2, 3, 2)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, expected_samples))
# test batches
samples = torch.rand(5, 3, 6, 3, device=self.device, dtype=dtype)
expected_samples = samples.view(5, 3, 2, 3, 3)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, expected_samples))
# negating with preprocessing function
obj = NotSoAbstractMORiskMeasure(
n_w=3,
preprocessing_function=WeightedMCMultiOutputObjective(
weights=torch.tensor(
[-1.0, -1.0, -1.0], device=self.device, dtype=dtype
)
),
)
prepared_samples = obj._prepare_samples(samples)
self.assertTrue(torch.equal(prepared_samples, -expected_samples))
class TestMultiOutputExpectation(BotorchTestCase):
def test_mo_expectation(self):
obj = MultiOutputExpectation(n_w=3)
for dtype in (torch.float, torch.double):
obj = MultiOutputExpectation(n_w=3)
samples = torch.tensor(
[
[
[1.0, 1.2],
[0.5, 0.5],
[1.5, 2.2],
[3.0, 1.2],
[1.0, 7.1],
[5.0, 5.8],
]
],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.allclose(
rm_samples,
torch.tensor(
[[[1.0, 1.3], [3.0, 4.7]]], device=self.device, dtype=dtype
),
)
)
# w/ first output negated
obj.preprocessing_function = WeightedMCMultiOutputObjective(
torch.tensor([-1.0, 1.0], device=self.device, dtype=dtype)
)
rm_samples = obj(samples)
self.assertTrue(
torch.allclose(
rm_samples,
torch.tensor(
[[[-1.0, 1.3], [-3.0, 4.7]]], device=self.device, dtype=dtype
),
)
)
class TestIndependentCVaR(BotorchTestCase):
def test_independent_cvar(self):
obj = IndependentCVaR(alpha=0.5, n_w=3)
self.assertEqual(obj.alpha_idx, 1)
with self.assertRaises(ValueError):
IndependentCVaR(alpha=3, n_w=3)
for dtype in (torch.float, torch.double):
obj = IndependentCVaR(alpha=0.5, n_w=3)
samples = torch.tensor(
[
[
[1.0, 1.2],
[0.5, 0.7],
[2.0, 2.2],
[3.0, 1.2],
[1.0, 7.2],
[5.0, 5.8],
]
],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.allclose(
rm_samples,
torch.tensor(
[[[0.75, 0.95], [2.0, 3.5]]], device=self.device, dtype=dtype
),
)
)
# w/ first output negated
obj.preprocessing_function = WeightedMCMultiOutputObjective(
torch.tensor([-1.0, 1.0], device=self.device, dtype=dtype)
)
rm_samples = obj(samples)
self.assertTrue(
torch.allclose(
rm_samples,
torch.tensor(
[[[-1.5, 0.95], [-4.0, 3.5]]], device=self.device, dtype=dtype
),
)
)
class TestIndependentVaR(BotorchTestCase):
def test_independent_var(self):
for dtype in (torch.float, torch.double):
obj = IndependentVaR(alpha=0.5, n_w=3)
samples = torch.tensor(
[
[
[1.0, 3.2],
[0.5, 0.7],
[2.0, 2.2],
[3.0, 1.2],
[1.0, 7.2],
[5.0, 5.8],
]
],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor(
[[[1.0, 2.2], [3.0, 5.8]]], device=self.device, dtype=dtype
),
)
)
# w/ weights
obj.preprocessing_function = WeightedMCMultiOutputObjective(
torch.tensor([0.5, -1.0], device=self.device, dtype=dtype)
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor(
[[[0.5, -2.2], [1.5, -5.8]]], device=self.device, dtype=dtype
),
)
)
class TestMultiOutputWorstCase(BotorchTestCase):
def test_multi_output_worst_case(self):
for dtype in (torch.float, torch.double):
obj = MultiOutputWorstCase(n_w=3)
samples = torch.tensor(
[
[
[1.0, 3.2],
[5.5, 0.7],
[2.0, 2.2],
[3.0, 1.2],
[5.0, 7.2],
[5.0, 5.8],
]
],
device=self.device,
dtype=dtype,
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor(
[[[1.0, 0.7], [3.0, 1.2]]], device=self.device, dtype=dtype
),
)
)
# w/ weights
obj.preprocessing_function = WeightedMCMultiOutputObjective(
torch.tensor([-1.0, 2.0], device=self.device, dtype=dtype)
)
rm_samples = obj(samples)
self.assertTrue(
torch.equal(
rm_samples,
torch.tensor(
[[[-5.5, 1.4], [-5.0, 2.4]]], device=self.device, dtype=dtype
),
)
)
class TestMVaR(BotorchTestCase):
def test_mvar(self):
with self.assertRaises(ValueError):
MVaR(n_w=5, alpha=3.0)
def set_equals(t1: Tensor, t2: Tensor) -> bool:
r"""Check if two `k x m`-dim tensors are equivalent after possibly
reordering the `k` dimension. Ignores duplicate entries.
"""
t1 = t1.unique(dim=0)
t2 = t2.unique(dim=0)
if t1.shape != t2.shape:
return False
equals_sum = (t1.unsqueeze(-2) == t2).all(dim=-1).sum(dim=-1)
return torch.equal(equals_sum, torch.ones_like(equals_sum))
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
mvar = MVaR(n_w=5, alpha=0.6)
# a simple negatively correlated example
Y = torch.stack(
[torch.linspace(1, 5, 5), torch.linspace(5, 1, 5)],
dim=-1,
).to(**tkwargs)
expected_set = torch.stack(
[torch.linspace(1, 3, 3), torch.linspace(3, 1, 3)],
dim=-1,
).to(Y)
# check that both versions produce the correct set
cpu_mvar = mvar.get_mvar_set_cpu(Y) # For 2d input, returns k x m
gpu_mvar = mvar.get_mvar_set_gpu(Y)[0] # returns a batch list of k x m
self.assertTrue(set_equals(cpu_mvar, gpu_mvar))
self.assertTrue(set_equals(cpu_mvar, expected_set))
# check that the `filter_dominated` works correctly
mvar = MVaR(
n_w=5,
alpha=0.4,
filter_dominated=False,
)
# negating the input to treat large values as undesirable
Y = -torch.tensor(
[
[1, 4],
[2, 3],
[3, 2],
[4, 1],
[3.5, 3.5],
],
**tkwargs,
)
cpu_mvar = mvar.get_mvar_set_cpu(Y)
gpu_mvar = mvar.get_mvar_set_gpu(Y)[0]
self.assertTrue(set_equals(cpu_mvar, gpu_mvar))
# negating here as well
expected_w_dominated = -torch.tensor(
[
[2, 4],
[3, 3],
[3.5, 3],
[3, 3.5],
[4, 2],
],
**tkwargs,
)
self.assertTrue(set_equals(cpu_mvar, expected_w_dominated))
expected_non_dominated = expected_w_dominated[
is_non_dominated(expected_w_dominated)
]
mvar.filter_dominated = True
cpu_mvar = mvar.get_mvar_set_cpu(Y)
gpu_mvar = mvar.get_mvar_set_gpu(Y)[0]
self.assertTrue(set_equals(cpu_mvar, gpu_mvar))
self.assertTrue(set_equals(cpu_mvar, expected_non_dominated))
# test batched w/ random input
mvar = MVaR(
n_w=10,
alpha=0.5,
filter_dominated=False,
)
Y = torch.rand(4, 10, 2, **tkwargs)
cpu_mvar = mvar.get_mvar_set_cpu(Y)
gpu_mvar = mvar.get_mvar_set_gpu(Y)
# check that the two agree
self.assertTrue(
all([set_equals(cpu_mvar[i], gpu_mvar[i]) for i in range(4)])
)
# check that the MVaR is dominated by `alpha` fraction (maximization).
dominated_count = (Y[0].unsqueeze(-2) >= cpu_mvar[0]).all(dim=-1).sum(dim=0)
expected_count = (
torch.ones(cpu_mvar[0].shape[0], device=self.device, dtype=torch.long)
* 5
)
self.assertTrue(torch.equal(dominated_count, expected_count))
# test forward pass
# with `expectation=True`
mvar = MVaR(
n_w=10,
alpha=0.5,
expectation=True,
)
samples = torch.rand(2, 20, 2, **tkwargs)
mvar_exp = mvar(samples)
expected = [
mvar.get_mvar_set_cpu(Y).mean(dim=0) for Y in samples.view(4, 10, 2)
]
self.assertTrue(
torch.allclose(mvar_exp, torch.stack(expected).view(2, 2, 2))
)
# m > 2
samples = torch.rand(2, 20, 3, **tkwargs)
mvar_exp = mvar(samples)
expected = [
mvar.get_mvar_set_gpu(Y)[0].mean(dim=0) for Y in samples.view(4, 10, 3)
]
self.assertTrue(torch.equal(mvar_exp, torch.stack(expected).view(2, 2, 3)))
# with `expectation=False`
mvar = MVaR(
n_w=10,
alpha=0.5,
expectation=False,
pad_to_n_w=True,
)
samples = torch.rand(2, 20, 2, **tkwargs)
mvar_vals = mvar(samples)
self.assertTrue(mvar_vals.shape == samples.shape)
expected = [mvar.get_mvar_set_cpu(Y) for Y in samples.view(4, 10, 2)]
for i in range(4):
batch_idx = i // 2
q_idx_start = 10 * (i % 2)
expected_ = expected[i]
# check that the actual values are there
self.assertTrue(
set_equals(
mvar_vals[
batch_idx, q_idx_start : q_idx_start + expected_.shape[0]
],
expected_,
)
)
# check for correct padding
self.assertTrue(
torch.equal(
mvar_vals[
batch_idx,
q_idx_start + expected_.shape[0] : q_idx_start + 10,
],
mvar_vals[
batch_idx, q_idx_start + expected_.shape[0] - 1
].expand(10 - expected_.shape[0], -1),
)
)
# Test the no-exact alpha level points case.
# This happens when there are duplicates in the input.
Y = torch.ones(10, 2, **tkwargs)
cpu_mvar = mvar.get_mvar_set_cpu(Y)
gpu_mvar = mvar.get_mvar_set_gpu(Y)[0]
self.assertTrue(torch.equal(cpu_mvar, Y[:1]))
self.assertTrue(torch.equal(gpu_mvar, Y[:1]))
# Test grad warning
with self.assertWarnsRegex(RuntimeWarning, "requires grad"):
mvar(Y.requires_grad_())
# TODO: Test grad support once properly implemented.
class TestMARS(BotorchTestCase):
def test_init(self):
# Init w/ defaults.
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
)
self.assertEqual(mars.alpha, 0.5)
self.assertEqual(mars.n_w, 3)
self.assertTrue(torch.equal(mars.chebyshev_weights, torch.tensor([0.5, 0.5])))
self.assertIsNone(mars.baseline_Y)
self.assertIsNone(mars.ref_point)
self.assertIsInstance(
mars.preprocessing_function, IdentityMCMultiOutputObjective
)
self.assertIsInstance(mars.mvar, MVaR)
self.assertEqual(mars.mvar.alpha, 0.5)
self.assertEqual(mars.mvar.n_w, 3)
# Errors with Chebyshev weights.
with self.assertRaisesRegex(UnsupportedError, "Negative"):
MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[-0.5, 0.5],
)
with self.assertRaisesRegex(UnsupportedError, "Batched"):
MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[[0.5], [0.5]],
)
# With optional arguments.
baseline_Y = torch.rand(3, 2)
ref_point = [3.0, 5.0]
def dummy_func(Y):
return Y
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
baseline_Y=baseline_Y,
ref_point=ref_point,
preprocessing_function=dummy_func,
)
self.assertTrue(torch.equal(mars.baseline_Y, baseline_Y))
self.assertTrue(torch.equal(mars.ref_point, torch.tensor(ref_point)))
self.assertIs(mars.preprocessing_function, dummy_func)
def test_set_baseline_Y(self):
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
)
perturbation = InputPerturbation(
perturbation_set=torch.tensor([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]])
)
model = GenericDeterministicModel(f=lambda X: X, num_outputs=2)
model.input_transform = perturbation
X_baseline = torch.tensor([[0.0, 0.0], [1.0, 1.0]])
mars.set_baseline_Y(model=model, X_baseline=X_baseline)
self.assertTrue(torch.equal(mars.baseline_Y, torch.tensor([[1.5, 1.5]])))
# With Y_samples.
mars._baseline_Y = None
Y_samples = model.posterior(X_baseline).mean
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
mars.set_baseline_Y(model=model, X_baseline=X_baseline, Y_samples=Y_samples)
self.assertTrue(torch.equal(mars.baseline_Y, torch.tensor([[1.5, 1.5]])))
self.assertTrue(any(w.category == BotorchWarning for w in ws))
# With pre-processing function.
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
preprocessing_function=lambda Y: -Y,
)
mars.set_baseline_Y(model=model, X_baseline=X_baseline)
self.assertTrue(torch.equal(mars.baseline_Y, torch.tensor([[-0.5, -0.5]])))
def test_get_Y_normalization_bounds(self):
# Error if batched.
with self.assertRaisesRegex(UnsupportedError, "Batched"):
MARS._get_Y_normalization_bounds(Y=torch.rand(3, 5, 2))
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
# Empty Y.
bounds = MARS._get_Y_normalization_bounds(Y=torch.empty(0, 3, **tkwargs))
expected = torch.zeros(2, 3, **tkwargs)
expected[1] = 1.0
self.assertAllClose(bounds, expected)
# Single point in pareto_Y.
bounds = MARS._get_Y_normalization_bounds(Y=torch.zeros(1, 3, **tkwargs))
self.assertAllClose(bounds, expected)
# With reference point.
bounds = MARS._get_Y_normalization_bounds(
Y=torch.zeros(1, 3, **tkwargs), ref_point=-torch.ones(3)
)
self.assertAllClose(bounds, expected - 1)
# Check that dominated points are ignored.
Y = torch.tensor([[0.0, 0.0], [0.5, 1.0], [1.0, 0.5]], **tkwargs)
expected = expected[:, :2]
expected[0] = 0.5
bounds = MARS._get_Y_normalization_bounds(Y=Y)
self.assertAllClose(bounds, expected)
# Multiple pareto with ref point.
# Nothing better than ref.
bounds = MARS._get_Y_normalization_bounds(
Y=Y, ref_point=torch.ones(2) * 0.75
)
self.assertAllClose(bounds, expected)
# W/ points better than ref.
Y = torch.tensor(
[[0.5, 1.0], [1.0, 0.5], [0.8, 0.8], [0.9, 0.7]], **tkwargs
)
bounds = MARS._get_Y_normalization_bounds(
Y=Y, ref_point=torch.ones(2) * 0.6
)
expected = torch.tensor([[0.6, 0.6], [0.9, 0.8]], **tkwargs)
self.assertAllClose(bounds, expected)
def test_chebyshev_objective(self):
# Check that the objective is destroyed on setters.
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
baseline_Y=torch.empty(0, 2),
)
self.assertIsNone(mars._chebyshev_objective)
# Gets constructed on property access.
self.assertIsNotNone(mars.chebyshev_objective)
self.assertIsNotNone(mars._chebyshev_objective)
# Destored on updating the weights.
mars.chebyshev_weights = [0.5, 0.3]
self.assertIsNone(mars._chebyshev_objective)
# Destroyed on setting baseline_Y.
mars.chebyshev_objective
mars.baseline_Y = None
self.assertIsNone(mars._chebyshev_objective)
# Error if baseline_Y is not set.
with self.assertRaisesRegex(RuntimeError, "baseline_Y"):
MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
).chebyshev_objective
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
# Without ref point or pre-processing.
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
baseline_Y=torch.tensor([[0.0, 0.5], [0.5, 0.0]], **tkwargs),
)
obj = mars.chebyshev_objective
Y = torch.ones(2, 2, **tkwargs)
self.assertAllClose(obj(Y), torch.ones(2, **tkwargs))
# With pre-processing.
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
baseline_Y=torch.tensor([[0.0, 0.5], [0.5, 0.0]], **tkwargs),
preprocessing_function=lambda Y: -Y,
)
obj = mars.chebyshev_objective
Y = -torch.ones(2, 2, **tkwargs)
self.assertAllClose(obj(Y), torch.ones(2, **tkwargs))
# With ref point.
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
baseline_Y=torch.tensor([[0.0, 0.5], [0.5, 0.0]], **tkwargs),
ref_point=[1.0, 1.0],
)
obj = mars.chebyshev_objective
Y = torch.ones(2, 2, **tkwargs)
self.assertAllClose(obj(Y), torch.zeros(2, **tkwargs))
def test_end_to_end(self):
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
mars = MARS(
alpha=0.5,
n_w=3,
chebyshev_weights=[0.5, 0.5],
ref_point=[1.0, 1.0],
baseline_Y=torch.randn(5, 2, **tkwargs),
)
samples = torch.randn(5, 9, 2, **tkwargs)
mars_vals = mars(samples)
self.assertEqual(mars_vals.shape, torch.Size([5, 3]))
self.assertEqual(mars_vals.dtype, dtype)
self.assertEqual(mars_vals.device.type, self.device.type)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.acquisition.multi_objective.analytic import (
ExpectedHypervolumeImprovement,
MultiObjectiveAnalyticAcquisitionFunction,
)
from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective
from botorch.acquisition.objective import PosteriorTransform
from botorch.exceptions.errors import BotorchError, UnsupportedError
from botorch.posteriors import GPyTorchPosterior
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from torch import Tensor
class DummyMultiObjectiveAnalyticAcquisitionFunction(
MultiObjectiveAnalyticAcquisitionFunction
):
def forward(self, X):
pass
class DummyPosteriorTransform(PosteriorTransform):
def evaluate(self, Y: Tensor) -> Tensor:
pass
def forward(self, posterior: GPyTorchPosterior) -> GPyTorchPosterior:
pass
class TestMultiObjectiveAnalyticAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
MultiObjectiveAnalyticAcquisitionFunction()
def test_init(self):
mm = MockModel(MockPosterior(mean=torch.rand(2, 1)))
# test default init
acqf = DummyMultiObjectiveAnalyticAcquisitionFunction(model=mm)
self.assertTrue(acqf.posterior_transform is None) # is None by default
# test custom init
posterior_transform = DummyPosteriorTransform()
acqf = DummyMultiObjectiveAnalyticAcquisitionFunction(
model=mm, posterior_transform=posterior_transform
)
self.assertEqual(acqf.posterior_transform, posterior_transform)
# test unsupported objective
with self.assertRaises(UnsupportedError):
DummyMultiObjectiveAnalyticAcquisitionFunction(
model=mm, posterior_transform=IdentityMCMultiOutputObjective()
)
acqf = DummyMultiObjectiveAnalyticAcquisitionFunction(model=mm)
# test set_X_pending
with self.assertRaises(UnsupportedError):
acqf.set_X_pending()
class TestExpectedHypervolumeImprovement(BotorchTestCase):
def test_expected_hypervolume_improvement(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
ref_point = [0.0, 0.0]
tkwargs["dtype"] = dtype
pareto_Y = torch.tensor(
[[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs
)
partitioning = NondominatedPartitioning(
ref_point=torch.tensor(ref_point, **tkwargs)
)
# the event shape is `b x q x m` = 1 x 1 x 1
mean = torch.zeros(1, 1, 2, **tkwargs)
variance = torch.zeros(1, 1, 2, **tkwargs)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
# test error if there is not pareto_Y initialized in partitioning
with self.assertRaises(BotorchError):
ExpectedHypervolumeImprovement(
model=mm, ref_point=ref_point, partitioning=partitioning
)
partitioning.update(Y=pareto_Y)
# test error if ref point has wrong shape
with self.assertRaises(ValueError):
ExpectedHypervolumeImprovement(
model=mm, ref_point=ref_point[:1], partitioning=partitioning
)
with self.assertRaises(ValueError):
# test error if no pareto_Y point is better than ref_point
ExpectedHypervolumeImprovement(
model=mm, ref_point=[10.0, 10.0], partitioning=partitioning
)
X = torch.zeros(1, 1, **tkwargs)
# basic test
acqf = ExpectedHypervolumeImprovement(
model=mm, ref_point=ref_point, partitioning=partitioning
)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# check ref point
self.assertTrue(
torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs))
)
# check bounds
self.assertTrue(hasattr(acqf, "cell_lower_bounds"))
self.assertTrue(hasattr(acqf, "cell_upper_bounds"))
# check cached indices
expected_indices = torch.tensor(
[[0, 0], [0, 1], [1, 0], [1, 1]], dtype=torch.long, device=self.device
)
self.assertTrue(torch.equal(acqf._cross_product_indices, expected_indices))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.acquisition.multi_objective.multi_output_risk_measures import (
MultiOutputExpectation,
)
from botorch.acquisition.multi_objective.objective import (
FeasibilityWeightedMCMultiOutputObjective,
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
UnstandardizeMCMultiOutputObjective,
WeightedMCMultiOutputObjective,
)
from botorch.acquisition.objective import (
IdentityMCObjective,
UnstandardizePosteriorTransform,
)
from botorch.exceptions.errors import BotorchError, BotorchTensorDimensionError
from botorch.models.transforms.outcome import Standardize
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestMCMultiOutputObjective(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
MCMultiOutputObjective()
class TestIdentityMCMultiOutputObjective(BotorchTestCase):
def test_identity_mc_multi_output_objective(self):
objective = IdentityMCMultiOutputObjective()
with self.assertRaises(BotorchTensorDimensionError):
IdentityMCMultiOutputObjective(outcomes=[0])
# test negative outcome without specifying num_outcomes
with self.assertRaises(BotorchError):
IdentityMCMultiOutputObjective(outcomes=[0, -1])
for batch_shape, m, dtype in itertools.product(
([], [3]), (2, 3), (torch.float, torch.double)
):
samples = torch.rand(*batch_shape, 2, m, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(objective(samples), samples))
class TestWeightedMCMultiOutputObjective(BotorchTestCase):
def test_weighted_mc_multi_output_objective(self):
with self.assertRaises(BotorchTensorDimensionError):
WeightedMCMultiOutputObjective(weights=torch.rand(3, 1))
with self.assertRaises(BotorchTensorDimensionError):
WeightedMCMultiOutputObjective(
weights=torch.rand(3), outcomes=[0, 1], num_outcomes=3
)
for batch_shape, m, dtype in itertools.product(
([], [3]), (2, 3), (torch.float, torch.double)
):
weights = torch.rand(m, device=self.device, dtype=dtype)
objective = WeightedMCMultiOutputObjective(weights=weights)
samples = torch.rand(*batch_shape, 2, m, device=self.device, dtype=dtype)
self.assertTrue(torch.equal(objective(samples), samples * weights))
class TestFeasibilityWeightedMCMultiOutputObjective(BotorchTestCase):
def test_feasibility_weighted_mc_multi_output_objective(self):
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
X = torch.zeros(5, 1, **tkwargs)
# The infeasible cost will be 0.0.
means = torch.tensor(
[
[1.0, 0.5],
[2.0, -1.0],
[3.0, -0.5],
[4.0, 1.0],
[5.0, 1.0],
],
**tkwargs,
)
variances = torch.zeros(5, 2, **tkwargs)
mm = MockModel(MockPosterior(mean=means, variance=variances))
feas_obj = FeasibilityWeightedMCMultiOutputObjective(
model=mm,
X_baseline=X,
constraint_idcs=[-1],
objective=None,
)
feas_samples = feas_obj(means)
expected = torch.tensor([[1.0], [0.0], [0.0], [4.0], [5.0]], **tkwargs)
self.assertTrue(torch.allclose(feas_samples, expected))
self.assertTrue(feas_obj._verify_output_shape)
# With an objective.
preprocessing_function = WeightedMCMultiOutputObjective(
weights=torch.tensor([2.0])
)
dummy_obj = MultiOutputExpectation(
n_w=1, preprocessing_function=preprocessing_function
)
dummy_obj._verify_output_shape = False # for testing
feas_obj = FeasibilityWeightedMCMultiOutputObjective(
model=mm,
X_baseline=X,
constraint_idcs=[1],
objective=dummy_obj,
)
feas_samples = feas_obj(means)
self.assertTrue(torch.allclose(feas_samples, expected * 2.0))
self.assertFalse(feas_obj._verify_output_shape)
# No constraints.
feas_obj = FeasibilityWeightedMCMultiOutputObjective(
model=mm,
X_baseline=X,
constraint_idcs=[],
objective=None,
)
feas_samples = feas_obj(means)
self.assertIs(feas_samples, means)
# With a single-output objective.
feas_obj = FeasibilityWeightedMCMultiOutputObjective(
model=mm,
X_baseline=X,
constraint_idcs=[1],
objective=IdentityMCObjective(),
)
feas_samples = feas_obj(means)
self.assertTrue(torch.allclose(feas_samples, expected.squeeze(-1)))
# Error with duplicate idcs.
with self.assertRaisesRegex(ValueError, "duplicate"):
FeasibilityWeightedMCMultiOutputObjective(
model=mm,
X_baseline=X,
constraint_idcs=[1, -1],
)
class TestUnstandardizeMultiOutputObjective(BotorchTestCase):
def test_unstandardize_mo_objective(self):
warnings.filterwarnings(
"ignore",
message=(
"UnstandardizeAnalyticMultiOutputObjective is deprecated. "
"Use UnstandardizePosteriorTransform instead."
),
)
Y_mean = torch.ones(2)
Y_std = torch.ones(2)
with self.assertRaises(BotorchTensorDimensionError):
UnstandardizeMCMultiOutputObjective(
Y_mean=Y_mean, Y_std=Y_std, outcomes=[0, 1, 2]
)
for objective_class in (
UnstandardizeMCMultiOutputObjective,
UnstandardizePosteriorTransform,
):
with self.assertRaises(BotorchTensorDimensionError):
objective_class(Y_mean=Y_mean.unsqueeze(0), Y_std=Y_std)
with self.assertRaises(BotorchTensorDimensionError):
objective_class(Y_mean=Y_mean, Y_std=Y_std.unsqueeze(0))
objective = objective_class(Y_mean=Y_mean, Y_std=Y_std)
for batch_shape, m, outcomes, dtype in itertools.product(
([], [3]), (2, 3), (None, [-2, -1]), (torch.float, torch.double)
):
Y_mean = torch.rand(m, dtype=dtype, device=self.device)
Y_std = torch.rand(m, dtype=dtype, device=self.device).clamp_min(1e-3)
kwargs = {}
if objective_class == UnstandardizeMCMultiOutputObjective:
kwargs["outcomes"] = outcomes
objective = objective_class(Y_mean=Y_mean, Y_std=Y_std, **kwargs)
if objective_class == UnstandardizePosteriorTransform:
objective = objective_class(Y_mean=Y_mean, Y_std=Y_std)
if outcomes is None:
# passing outcomes is not currently supported
mean = torch.rand(2, m, dtype=dtype, device=self.device)
variance = variance = torch.rand(
2, m, dtype=dtype, device=self.device
)
mock_posterior = MockPosterior(mean=mean, variance=variance)
tf_posterior = objective(mock_posterior)
tf = Standardize(m=m)
tf.means = Y_mean
tf.stdvs = Y_std
tf._stdvs_sq = Y_std.pow(2)
tf._is_trained = torch.tensor(True)
tf.eval()
expected_posterior = tf.untransform_posterior(mock_posterior)
self.assertTrue(
torch.equal(tf_posterior.mean, expected_posterior.mean)
)
self.assertTrue(
torch.equal(
tf_posterior.variance, expected_posterior.variance
)
)
# testing evaluate specifically
if objective_class == UnstandardizePosteriorTransform:
Y = torch.randn_like(Y_mean) + Y_mean
val = objective.evaluate(Y)
val_expected = Y_mean + Y * Y_std
self.assertTrue(torch.allclose(val, val_expected))
else:
samples = torch.rand(
*batch_shape, 2, m, dtype=dtype, device=self.device
)
obj_expected = samples * Y_std.to(dtype=dtype) + Y_mean.to(
dtype=dtype
)
if outcomes is not None:
obj_expected = obj_expected[..., outcomes]
self.assertTrue(torch.equal(objective(samples), obj_expected))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import torch
from botorch.acquisition.multi_objective.joint_entropy_search import (
LowerBoundMultiObjectiveEntropySearch,
qLowerBoundMultiObjectiveJointEntropySearch,
)
from botorch.acquisition.multi_objective.utils import compute_sample_box_decomposition
from botorch.exceptions import UnsupportedError
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase
def get_model(train_X, train_Y, use_model_list, standardize_model):
num_objectives = train_Y.shape[-1]
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model
def dummy_sample_pareto_sets(model, num_pareto_samples, num_pareto_points):
m = model.models[0] if isinstance(model, ModelListGP) else model
input_dim = m.train_inputs[0].shape[-1]
tkwargs = {"dtype": m.train_inputs[0].dtype, "device": m.train_inputs[0].device}
return torch.rand(
num_pareto_samples,
num_pareto_points,
input_dim,
**tkwargs,
)
def dummy_sample_pareto_fronts(model, num_pareto_samples, num_pareto_points):
m = model.models[0] if isinstance(model, ModelListGP) else model
num_objectives = model.num_outputs
tkwargs = {"dtype": m.train_inputs[0].dtype, "device": m.train_inputs[0].device}
return torch.rand(
num_pareto_samples,
num_pareto_points,
num_objectives,
**tkwargs,
)
class DummyLowerBoundMultiObjectiveEntropySearch(LowerBoundMultiObjectiveEntropySearch):
def _compute_posterior_statistics(self, X):
pass
def _compute_monte_carlo_variables(self, posterior):
pass
def forward(self, X):
pass
class TestLowerBoundMultiObjectiveEntropySearch(BotorchTestCase):
def test_abstract_raises(self):
torch.manual_seed(1)
tkwargs = {"device": self.device}
estimation_types = ("0", "LB", "LB2", "MC", "Dummy")
for (
dtype,
num_objectives,
estimation_type,
use_model_list,
standardize_model,
) in product(
(torch.float, torch.double),
(1, 2, 3),
estimation_types,
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
# test batched model
train_X = torch.rand(4, 3, 2, **tkwargs)
train_Y = torch.rand(4, 3, num_objectives, **tkwargs)
model = SingleTaskGP(train_X, train_Y)
num_pareto_samples = 3
num_pareto_points = 1 if num_objectives == 1 else 4
pareto_sets = dummy_sample_pareto_sets(
model, num_pareto_samples, num_pareto_points
)
pareto_fronts = dummy_sample_pareto_fronts(
model, num_pareto_samples, num_pareto_points
)
hypercell_bounds = torch.rand(
num_pareto_samples, 2, 4, num_objectives, **tkwargs
)
with self.assertRaises(NotImplementedError):
DummyLowerBoundMultiObjectiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
)
# test wrong Pareto shape and hypercell bounds
train_X = torch.rand(1, 2, **tkwargs)
train_Y = torch.rand(1, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
num_pareto_samples = 3
num_pareto_points = 4
pareto_sets = dummy_sample_pareto_sets(
model, num_pareto_samples, num_pareto_points
)
pareto_fronts = dummy_sample_pareto_fronts(
model, num_pareto_samples, num_pareto_points
)
hypercell_bounds = torch.rand(
num_pareto_samples, 2, 4, num_objectives, **tkwargs
)
with self.assertRaises(UnsupportedError):
DummyLowerBoundMultiObjectiveEntropySearch(
model=model,
pareto_sets=pareto_sets.unsqueeze(0),
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
)
with self.assertRaises(UnsupportedError):
DummyLowerBoundMultiObjectiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts.unsqueeze(0),
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
)
with self.assertRaises(UnsupportedError):
DummyLowerBoundMultiObjectiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds.unsqueeze(0),
estimation_type=estimation_type,
num_samples=64,
)
if estimation_type == "Dummy":
with self.assertRaises(NotImplementedError):
DummyLowerBoundMultiObjectiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
)
else:
DummyLowerBoundMultiObjectiveEntropySearch(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
)
class TestQLowerBoundMultiObjectiveJointEntropySearch(BotorchTestCase):
def _base_test_lb_moo_joint_entropy_search(self, estimation_type):
torch.manual_seed(1)
tkwargs = {"device": self.device}
for (dtype, num_objectives, use_model_list, standardize_model,) in product(
(torch.float, torch.double),
(1, 2, 3),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
input_dim = 2
train_X = torch.rand(4, input_dim, **tkwargs)
train_Y = torch.rand(4, num_objectives, **tkwargs)
model = get_model(train_X, train_Y, use_model_list, standardize_model)
num_pareto_samples = 3
num_pareto_points = 4
pareto_sets = dummy_sample_pareto_sets(
model, num_pareto_samples, num_pareto_points
)
pareto_fronts = dummy_sample_pareto_fronts(
model, num_pareto_samples, num_pareto_points
)
hypercell_bounds = compute_sample_box_decomposition(pareto_fronts)
# test acquisition
X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)]
for X_pending in X_pending_list:
acq = qLowerBoundMultiObjectiveJointEntropySearch(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds,
estimation_type=estimation_type,
num_samples=64,
X_pending=X_pending,
)
self.assertIsInstance(acq.sampler, SobolQMCNormalSampler)
test_Xs = [
torch.rand(4, 1, input_dim, **tkwargs),
torch.rand(4, 3, input_dim, **tkwargs),
torch.rand(4, 5, 1, input_dim, **tkwargs),
torch.rand(4, 5, 3, input_dim, **tkwargs),
]
for test_X in test_Xs:
acq_X = acq(test_X)
# assess shape
self.assertTrue(acq_X.shape == test_X.shape[:-2])
def test_lb_moo_joint_entropy_search_0(self):
self._base_test_lb_moo_joint_entropy_search(estimation_type="0")
def test_lb_moo_joint_entropy_search_LB(self):
self._base_test_lb_moo_joint_entropy_search(estimation_type="LB")
def test_lb_moo_joint_entropy_search_LB2(self):
self._base_test_lb_moo_joint_entropy_search(estimation_type="LB2")
def test_lb_moo_joint_entropy_search_MC(self):
self._base_test_lb_moo_joint_entropy_search(estimation_type="MC")
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.sampling.deterministic import DeterministicSampler
from botorch.utils.testing import BotorchTestCase, MockPosterior
class TestDeterministicSampler(BotorchTestCase):
def test_deterministic_sampler(self):
# Basic usage.
samples = torch.rand(1, 2)
posterior = MockPosterior(samples=samples)
sampler = DeterministicSampler(sample_shape=torch.Size([2]))
self.assertTrue(torch.equal(samples.repeat(2, 1, 1), sampler(posterior)))
# Test _update_base_samples.
sampler._update_base_samples(
posterior=posterior,
base_sampler=sampler,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.posteriors.posterior_list import PosteriorList
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.sampling.stochastic_samplers import StochasticSampler
from botorch.utils.testing import BotorchTestCase, MockPosterior
class TestListSampler(BotorchTestCase):
def test_list_sampler(self):
# Test initialization.
sampler = ListSampler(
IIDNormalSampler(sample_shape=torch.Size([2])),
StochasticSampler(sample_shape=torch.Size([2])),
)
self.assertIsInstance(sampler.samplers[0], IIDNormalSampler)
self.assertIsInstance(sampler.samplers[1], StochasticSampler)
self.assertEqual(sampler.sample_shape, torch.Size([2]))
# Test validation.
with self.assertRaisesRegex(UnsupportedError, "all samplers to have the "):
ListSampler(
StochasticSampler(sample_shape=torch.Size([2])),
StochasticSampler(sample_shape=torch.Size([3])),
)
# Test basic usage.
org_samples = torch.rand(1, 5)
p1 = MockPosterior(samples=org_samples[:, :2])
p2 = MockPosterior(samples=org_samples[:, 2:])
p_list = PosteriorList(p1, p2)
samples = sampler(p_list)
self.assertAllClose(samples, org_samples.repeat(2, 1, 1))
# Test _update_base_samples.
sampler = ListSampler(
IIDNormalSampler(sample_shape=torch.Size([2])),
SobolQMCNormalSampler(sample_shape=torch.Size([2])),
)
sampler2 = ListSampler(
IIDNormalSampler(sample_shape=torch.Size([2])),
SobolQMCNormalSampler(sample_shape=torch.Size([2])),
)
with mock.patch.object(
sampler.samplers[0], "_update_base_samples"
) as update_0, mock.patch.object(
sampler.samplers[1], "_update_base_samples"
) as update_1:
sampler._update_base_samples(posterior=p_list, base_sampler=sampler2)
update_0.assert_called_once_with(
posterior=p1, base_sampler=sampler2.samplers[0]
)
update_1.assert_called_once_with(
posterior=p2, base_sampler=sampler2.samplers[1]
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.posteriors.deterministic import DeterministicPosterior
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.posteriors.posterior_list import PosteriorList
from botorch.posteriors.torch import TorchPosterior
from botorch.posteriors.transformed import TransformedPosterior
from botorch.sampling.get_sampler import get_sampler
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.sampling.stochastic_samplers import StochasticSampler
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultivariateNormal
from torch.distributions.gamma import Gamma
class TestGetSampler(BotorchTestCase):
def test_get_sampler(self):
# Basic usage w/ gpytorch posterior.
posterior = GPyTorchPosterior(
distribution=MultivariateNormal(torch.rand(2), torch.eye(2))
)
sampler = get_sampler(
posterior=posterior, sample_shape=torch.Size([10]), seed=2
)
self.assertIsInstance(sampler, SobolQMCNormalSampler)
self.assertEqual(sampler.seed, 2)
self.assertEqual(sampler.sample_shape, torch.Size([10]))
# Fallback to IID sampler.
posterior = GPyTorchPosterior(
distribution=MultivariateNormal(torch.rand(22000), torch.eye(22000))
)
sampler = get_sampler(posterior=posterior, sample_shape=torch.Size([10]))
self.assertIsInstance(sampler, IIDNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([10]))
# Transformed posterior.
tf_post = TransformedPosterior(
posterior=posterior, sample_transform=lambda X: X
)
sampler = get_sampler(posterior=tf_post, sample_shape=torch.Size([10]))
self.assertIsInstance(sampler, IIDNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([10]))
# PosteriorList with transformed & deterministic.
post_list = PosteriorList(
tf_post, DeterministicPosterior(values=torch.rand(1, 2))
)
sampler = get_sampler(posterior=post_list, sample_shape=torch.Size([5]))
self.assertIsInstance(sampler, ListSampler)
self.assertIsInstance(sampler.samplers[0], IIDNormalSampler)
self.assertIsInstance(sampler.samplers[1], StochasticSampler)
for s in sampler.samplers:
self.assertEqual(s.sample_shape, torch.Size([5]))
# Unknown torch posterior.
posterior = TorchPosterior(distribution=Gamma(torch.rand(2), torch.rand(2)))
with self.assertRaisesRegex(NotImplementedError, "A registered `MCSampler`"):
get_sampler(posterior=posterior, sample_shape=torch.Size([5]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import torch
from botorch.posteriors.torch import TorchPosterior
from botorch.sampling.stochastic_samplers import ForkedRNGSampler, StochasticSampler
from botorch.utils.testing import BotorchTestCase, MockPosterior
from torch.distributions.exponential import Exponential
class TestForkedRNGSampler(BotorchTestCase):
def test_forked_rng_sampler(self):
posterior = TorchPosterior(Exponential(rate=torch.rand(1, 2)))
sampler = ForkedRNGSampler(sample_shape=torch.Size([2]), seed=0)
with mock.patch.object(
posterior.distribution, "rsample", wraps=posterior.distribution.rsample
) as mock_rsample:
samples = sampler(posterior)
mock_rsample.assert_called_once_with(sample_shape=torch.Size([2]))
with torch.random.fork_rng():
torch.manual_seed(0)
expected = posterior.rsample(sample_shape=torch.Size([2]))
self.assertAllClose(samples, expected)
class TestStochasticSampler(BotorchTestCase):
def test_stochastic_sampler(self):
# Basic usage.
samples = torch.rand(1, 2)
posterior = MockPosterior(samples=samples)
sampler = StochasticSampler(sample_shape=torch.Size([2]))
self.assertTrue(torch.equal(samples.repeat(2, 1, 1), sampler(posterior)))
# Test _update_base_samples.
with self.assertRaisesRegex(NotImplementedError, "_update_base_samples"):
sampler._update_base_samples(posterior=posterior, base_sampler=sampler)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.pairwise_samplers import (
PairwiseIIDNormalSampler,
PairwiseSobolQMCNormalSampler,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultivariateNormal
def _get_test_posterior(device, n=3, dtype=torch.float, batched=False):
mean = torch.zeros(n, device=device, dtype=dtype)
cov = torch.eye(n, device=device, dtype=dtype)
if batched:
cov = cov.repeat(3, 1, 1)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
class TestPairwiseIIDNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = PairwiseIIDNormalSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 3, 2]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior(
device=self.device, dtype=dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior(
device=self.device, dtype=new_dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure error is rasied when number of points are < 2
posterior = _get_test_posterior(device=self.device, n=1, dtype=dtype)
with self.assertRaises(RuntimeError):
sampler(posterior)
# check max_num_comparisons
sampler = PairwiseIIDNormalSampler(
sample_shape=torch.Size([4]), max_num_comparisons=2
)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 2]))
class TestPairwiseSobolQMCNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = PairwiseSobolQMCNormalSampler(
sample_shape=torch.Size([4]), seed=1234
)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 3, 2]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior(
device=self.device, dtype=dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior(
device=self.device, dtype=new_dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure error is rasied when number of points are < 2
posterior = _get_test_posterior(device=self.device, n=1, dtype=dtype)
with self.assertRaises(RuntimeError):
sampler(posterior)
# check max_num_comparisons
sampler = PairwiseSobolQMCNormalSampler(
sample_shape=torch.Size([4]), max_num_comparisons=2
)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 2]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.sampling.index_sampler import IndexSampler
from botorch.utils.testing import BotorchTestCase
class TestIndexSampler(BotorchTestCase):
def test_index_sampler(self):
# Basic usage.
posterior = EnsemblePosterior(
values=torch.randn(torch.Size((50, 16, 1, 1))).to(self.device)
)
sampler = IndexSampler(sample_shape=torch.Size((128,)))
samples = sampler(posterior)
self.assertTrue(samples.shape == torch.Size((128, 50, 1, 1)))
self.assertTrue(sampler.base_samples.max() < 16)
self.assertTrue(sampler.base_samples.min() >= 0)
# check deterministic nature
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# test construct base samples
sampler = IndexSampler(sample_shape=torch.Size((4, 128)), seed=42)
self.assertTrue(sampler.base_samples is None)
sampler._construct_base_samples(posterior=posterior)
self.assertTrue(sampler.base_samples.shape == torch.Size((4, 128)))
self.assertTrue(
sampler.base_samples.device.type
== posterior.device.type
== self.device.type
)
base_samples = sampler.base_samples
sampler = IndexSampler(sample_shape=torch.Size((4, 128)), seed=42)
sampler._construct_base_samples(posterior=posterior)
self.assertAllClose(base_samples, sampler.base_samples)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.normal import (
IIDNormalSampler,
NormalMCSampler,
SobolQMCNormalSampler,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultivariateNormal
from linear_operator.operators import DiagLinearOperator
def _get_test_posterior(device, dtype=torch.float):
mean = torch.zeros(2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
def _get_test_posterior_batched(device, dtype=torch.float):
mean = torch.zeros(3, 2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype).repeat(3, 1, 1)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
class TestNormalMCSampler(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
NormalMCSampler(sample_shape=torch.Size([4]))
class TestIIDNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = IIDNormalSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=new_dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works with a different batch_range
sampler.batch_range_override = (-3, -1)
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
class TestSobolQMCNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=new_dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works with a different batch_range
sampler.batch_range_override = (-3, -1)
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
def test_unsupported_dimension(self):
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
maxdim = torch.quasirandom.SobolEngine.MAXDIM + 1
mean = torch.zeros(maxdim)
cov = DiagLinearOperator(torch.ones(maxdim))
mvn = MultivariateNormal(mean, cov)
posterior = GPyTorchPosterior(mvn)
with self.assertRaises(UnsupportedError) as e:
sampler(posterior)
self.assertIn(f"Requested: {maxdim}", str(e.exception))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import InputDataError
from botorch.sampling.base import MCSampler
from botorch.utils.testing import BotorchTestCase, MockPosterior
class NonAbstractSampler(MCSampler):
def forward(self, posterior):
raise NotImplementedError
class OtherSampler(MCSampler):
def forward(self, posterior):
raise NotImplementedError
class TestBaseMCSampler(BotorchTestCase):
def test_MCSampler_abstract_raises(self):
with self.assertRaises(TypeError):
MCSampler()
def test_init(self):
with self.assertRaises(TypeError):
NonAbstractSampler()
# Current args.
sampler = NonAbstractSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.sample_shape, torch.Size([4]))
self.assertEqual(sampler.seed, 1234)
self.assertIsNone(sampler.base_samples)
# Default seed.
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
self.assertIsInstance(sampler.seed, int)
# Deprecated args & error handling.
with self.assertWarnsRegex(DeprecationWarning, "positional argument"):
NonAbstractSampler(4)
with self.assertRaisesRegex(InputDataError, "sample_shape"):
NonAbstractSampler(4.5)
with self.assertWarnsRegex(DeprecationWarning, "resample"):
NonAbstractSampler(sample_shape=torch.Size([4]), resample=False)
with self.assertRaisesRegex(RuntimeError, "StochasticSampler"):
NonAbstractSampler(sample_shape=torch.Size([4]), resample=True)
with self.assertWarnsRegex(DeprecationWarning, "collapse_batch"):
NonAbstractSampler(sample_shape=torch.Size([4]), collapse_batch_dims=True)
with self.assertRaisesRegex(RuntimeError, "ForkedRNGSampler"):
NonAbstractSampler(sample_shape=torch.Size([4]), collapse_batch_dims=False)
with self.assertRaisesRegex(RuntimeError, "unknown argument"):
NonAbstractSampler(sample_shape=torch.Size([4]), dummy_arg=True)
def test_batch_range(self):
posterior = MockPosterior()
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
# Default: read from the posterior.
self.assertEqual(
sampler._get_batch_range(posterior=posterior), posterior.batch_range
)
# Overwrite.
sampler.batch_range_override = (0, -5)
self.assertEqual(sampler._get_batch_range(posterior=posterior), (0, -5))
def test_get_collapsed_shape(self):
posterior = MockPosterior(base_shape=torch.Size([4, 3, 2]))
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
self.assertEqual(
sampler._get_collapsed_shape(posterior=posterior), torch.Size([4, 1, 3, 2])
)
posterior = MockPosterior(
base_shape=torch.Size([3, 4, 3, 2]), batch_range=(0, 0)
)
self.assertEqual(
sampler._get_collapsed_shape(posterior=posterior),
torch.Size([4, 3, 4, 3, 2]),
)
posterior = MockPosterior(
base_shape=torch.Size([3, 4, 3, 2]), batch_range=(0, -1)
)
self.assertEqual(
sampler._get_collapsed_shape(posterior=posterior),
torch.Size([4, 1, 1, 1, 2]),
)
def test_get_extended_base_sample_shape(self):
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
posterior = MockPosterior(base_shape=torch.Size([3, 2]))
self.assertEqual(
sampler._get_extended_base_sample_shape(posterior=posterior),
torch.Size([4, 3, 2]),
)
posterior = MockPosterior(base_shape=torch.Size([3, 5, 3, 2]))
bss = sampler._get_extended_base_sample_shape(posterior=posterior)
self.assertEqual(bss, torch.Size([4, 3, 5, 3, 2]))
def test_update_base_samples(self):
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
with self.assertRaisesRegex(NotImplementedError, "update_base"):
sampler._update_base_samples(
posterior=MockPosterior(), base_sampler=sampler
)
def test_instance_check(self):
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
# Same type:
sampler._instance_check(sampler)
# Different type:
other = OtherSampler(sample_shape=torch.Size([4]))
with self.assertRaisesRegex(RuntimeError, "an instance of"):
sampler._instance_check(base_sampler=other)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import math
import numpy as np
import torch
from botorch.sampling.qmc import MultivariateNormalQMCEngine, NormalQMCEngine
from botorch.utils.testing import BotorchTestCase
from scipy.stats import shapiro
class NormalQMCTests(BotorchTestCase):
def test_NormalQMCEngine(self):
for d in (1, 2):
engine = NormalQMCEngine(d=d)
samples = engine.draw()
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
# test double dtype
samples = engine.draw(dtype=torch.double)
self.assertEqual(samples.dtype, torch.double)
self.assertEqual(samples.shape, torch.Size([1, d]))
def test_NormalQMCEngineInvTransform(self):
for d in (1, 2):
engine = NormalQMCEngine(d=d, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
# test double dtype
samples = engine.draw(dtype=torch.double)
self.assertEqual(samples.dtype, torch.double)
self.assertEqual(samples.shape, torch.Size([1, d]))
def test_NormalQMCEngineSeeded(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([2, 2]))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.shape, torch.Size([2, 3]))
def test_NormalQMCEngineSeededOut(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345)
out = torch.zeros(2, 2)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345)
out = torch.empty(2, 3)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
def test_NormalQMCEngineSeededInvTransform(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([2, 2]))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345, inv_transform=True)
samples = engine.draw(n=2)
self.assertEqual(samples.shape, torch.Size([2, 3]))
def test_NormalQMCEngineShapiro(self):
engine = NormalQMCEngine(d=2, seed=12345)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
def test_NormalQMCEngineShapiroInvTransform(self):
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
class MultivariateNormalQMCTests(BotorchTestCase):
def test_MultivariateNormalQMCEngineShapeErrors(self):
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=torch.zeros(2), cov=torch.zeros(2, 1))
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=torch.zeros(1), cov=torch.eye(2))
def test_MultivariateNormalQMCEngineNonPSD(self):
for dtype in (torch.float, torch.double):
# try with non-psd, non-pd cov and expect an assertion error
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.tensor([[1, 2], [2, 1]], device=self.device, dtype=dtype)
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=mean, cov=cov)
def test_MultivariateNormalQMCEngineNonPD(self):
for dtype in (torch.float, torch.double):
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=self.device, dtype=dtype
)
# try with non-pd but psd cov; should work
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
self.assertTrue(engine._corr_matrix is not None)
def test_MultivariateNormalQMCEngineSymmetric(self):
for dtype in (torch.float, torch.double):
# try with non-symmetric cov and expect an error
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.tensor([[1, 0], [2, 1]], device=self.device, dtype=dtype)
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=mean, cov=cov)
def test_MultivariateNormalQMCEngine(self):
for d, dtype in itertools.product((1, 2, 3), (torch.float, torch.double)):
mean = torch.rand(d, device=self.device, dtype=dtype)
cov = torch.eye(d, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
def test_MultivariateNormalQMCEngineInvTransform(self):
for d, dtype in itertools.product((1, 2, 3), (torch.float, torch.double)):
mean = torch.rand(d, device=self.device, dtype=dtype)
cov = torch.eye(d, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
def test_MultivariateNormalQMCEngineSeeded(self):
for dtype in (torch.float, torch.double):
# test even dimension
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
# test odd dimension
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
def test_MultivariateNormalQMCEngineSeededOut(self):
for dtype in (torch.float, torch.double):
# test even dimension
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
out = torch.zeros(2, 2, device=self.device, dtype=dtype)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
# test odd dimension
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
out = torch.zeros(2, 3, device=self.device, dtype=dtype)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
def test_MultivariateNormalQMCEngineSeededInvTransform(self):
for dtype in (torch.float, torch.double):
# test even dimension
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
# test odd dimension
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
def test_MultivariateNormalQMCEngineShapiro(self):
for dtype in (torch.float, torch.double):
# test the standard case
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.eye(2, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
# test the correlated, non-zero mean case
mean = torch.tensor([1.0, 2.0], device=self.device, dtype=dtype)
cov = torch.tensor(
[[1.5, 0.5], [0.5, 1.5]], device=self.device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0) - mean) < 1e-2))
self.assertTrue(
torch.all(torch.abs(samples.std(dim=0) - math.sqrt(1.5)) < 1e-2)
)
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# check covariance
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1] - 0.5), 1e-2)
def test_MultivariateNormalQMCEngineShapiroInvTransform(self):
for dtype in (torch.float, torch.double):
# test the standard case
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.eye(2, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
# test the correlated, non-zero mean case
mean = torch.tensor([1.0, 2.0], device=self.device, dtype=dtype)
cov = torch.tensor(
[[1.5, 0.5], [0.5, 1.5]], device=self.device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0) - mean) < 1e-2))
self.assertTrue(
torch.all(torch.abs(samples.std(dim=0) - math.sqrt(1.5)) < 1e-2)
)
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# check covariance
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1] - 0.5), 1e-2)
def test_MultivariateNormalQMCEngineDegenerate(self):
for dtype in (torch.float, torch.double):
# X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=self.device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=4096)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.abs(torch.std(samples[:, 0]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 1]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 2]) - math.sqrt(2)) < 1e-2)
for i in (0, 1, 2):
_, pval = shapiro(samples[:, i].cpu().numpy())
self.assertGreater(pval, 0.9)
cov = np.cov(samples.cpu().numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
self.assertLess(np.abs(cov[0, 2] - 1), 1e-2)
# check to see if X + Y = Z almost exactly
self.assertTrue(
torch.all(
torch.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) < 1e-5
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import patch
import torch
from botorch.models import SingleTaskGP, SingleTaskVariationalGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise.utils import (
get_input_transform,
get_output_transform,
get_train_inputs,
get_train_targets,
InverseLengthscaleTransform,
OutcomeUntransformer,
)
from botorch.utils.context_managers import delattr_ctx
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
class TestTransforms(BotorchTestCase):
def test_inverse_lengthscale_transform(self):
tkwargs = {"device": self.device, "dtype": torch.float64}
kernel = MaternKernel(nu=2.5, ard_num_dims=3).to(**tkwargs)
with self.assertRaisesRegex(RuntimeError, "does not implement `lengthscale`"):
InverseLengthscaleTransform(ScaleKernel(kernel))
x = torch.rand(3, 3, **tkwargs)
transform = InverseLengthscaleTransform(kernel)
self.assertTrue(transform(x).equal(kernel.lengthscale.reciprocal() * x))
def test_outcome_untransformer(self):
for untransformer in (
OutcomeUntransformer(transform=Standardize(m=1), num_outputs=1),
OutcomeUntransformer(transform=Standardize(m=2), num_outputs=2),
):
with torch.random.fork_rng():
torch.random.manual_seed(0)
y = torch.rand(untransformer.num_outputs, 4, device=self.device)
x = untransformer.transform(y.T)[0].T
self.assertTrue(y.allclose(untransformer(x)))
class TestGetters(BotorchTestCase):
def setUp(self):
super().setUp()
with torch.random.fork_rng():
torch.random.manual_seed(0)
train_X = torch.rand(5, 2)
train_Y = torch.randn(5, 2)
self.models = []
for num_outputs in (1, 2):
self.models.append(
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, :num_outputs],
input_transform=Normalize(d=2),
outcome_transform=Standardize(m=num_outputs),
)
)
self.models.append(
SingleTaskVariationalGP(
train_X=train_X,
train_Y=train_Y[:, :num_outputs],
input_transform=Normalize(d=2),
outcome_transform=Standardize(m=num_outputs),
)
)
def test_get_input_transform(self):
for model in self.models:
self.assertIs(get_input_transform(model), model.input_transform)
def test_get_output_transform(self):
for model in self.models:
transform = get_output_transform(model)
self.assertIsInstance(transform, OutcomeUntransformer)
self.assertIs(transform.transform, model.outcome_transform)
def test_get_train_inputs(self):
for model in self.models:
model.train()
X = (
model.model.train_inputs[0]
if isinstance(model, SingleTaskVariationalGP)
else model.train_inputs[0]
)
Z = model.input_transform(X)
train_inputs = get_train_inputs(model, transformed=False)
self.assertIsInstance(train_inputs, tuple)
self.assertEqual(len(train_inputs), 1)
self.assertTrue(X.equal(get_train_inputs(model, transformed=False)[0]))
self.assertTrue(Z.equal(get_train_inputs(model, transformed=True)[0]))
model.eval()
self.assertTrue(X.equal(get_train_inputs(model, transformed=False)[0]))
self.assertTrue(Z.equal(get_train_inputs(model, transformed=True)[0]))
with delattr_ctx(model, "input_transform"), patch.object(
model, "_original_train_inputs", new=None
):
self.assertTrue(Z.equal(get_train_inputs(model, transformed=False)[0]))
self.assertTrue(Z.equal(get_train_inputs(model, transformed=True)[0]))
with self.subTest("test_model_list"):
model_list = ModelListGP(*self.models)
input_list = get_train_inputs(model_list)
self.assertIsInstance(input_list, list)
self.assertEqual(len(input_list), len(self.models))
for model, train_inputs in zip(model_list.models, input_list):
for a, b in zip(train_inputs, get_train_inputs(model)):
self.assertTrue(a.equal(b))
def test_get_train_targets(self):
for model in self.models:
model.train()
if isinstance(model, SingleTaskVariationalGP):
F = model.model.train_targets
Y = model.outcome_transform.untransform(F)[0].squeeze(dim=0)
else:
F = model.train_targets
Y = OutcomeUntransformer(model.outcome_transform, model.num_outputs)(F)
self.assertTrue(F.equal(get_train_targets(model, transformed=True)))
self.assertTrue(Y.equal(get_train_targets(model, transformed=False)))
model.eval()
self.assertTrue(F.equal(get_train_targets(model, transformed=True)))
self.assertTrue(Y.equal(get_train_targets(model, transformed=False)))
with delattr_ctx(model, "outcome_transform"):
self.assertTrue(F.equal(get_train_targets(model, transformed=True)))
self.assertTrue(F.equal(get_train_targets(model, transformed=False)))
with self.subTest("test_model_list"):
model_list = ModelListGP(*self.models)
target_list = get_train_targets(model_list)
self.assertIsInstance(target_list, list)
self.assertEqual(len(target_list), len(self.models))
for model, Y in zip(self.models, target_list):
self.assertTrue(Y.equal(get_train_targets(model)))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from itertools import product
import torch
from botorch.models import (
FixedNoiseGP,
ModelListGP,
SingleTaskGP,
SingleTaskVariationalGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise import draw_matheron_paths, MatheronPath, PathList
from botorch.sampling.pathwise.utils import get_train_inputs
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
from torch import Size
from torch.nn.functional import pad
from .helpers import get_sample_moments, standardize_moments
class TestPosteriorSamplers(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.models = defaultdict(list)
seed = 0
for kernel in (
ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([]))),
):
with torch.random.fork_rng():
torch.manual_seed(seed)
tkwargs = {"device": self.device, "dtype": torch.float64}
base = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
base.lengthscale = 0.1 + 0.3 * torch.rand_like(base.lengthscale)
kernel.to(**tkwargs)
uppers = 1 + 9 * torch.rand(base.lengthscale.shape[-1], **tkwargs)
bounds = pad(uppers.unsqueeze(0), (0, 0, 1, 0))
X = uppers * torch.rand(4, base.lengthscale.shape[-1], **tkwargs)
Y = 10 * kernel(X).cholesky() @ torch.randn(4, 1, **tkwargs)
if kernel.batch_shape:
Y = Y.squeeze(-1).transpose(0, 1) # n x m
input_transform = Normalize(d=X.shape[-1], bounds=bounds)
outcome_transform = Standardize(m=Y.shape[-1])
# SingleTaskGP in eval mode
self.models[SingleTaskGP].append(
SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=deepcopy(kernel),
input_transform=deepcopy(input_transform),
outcome_transform=deepcopy(outcome_transform),
)
.to(**tkwargs)
.eval()
)
# FixedNoiseGP in train mode
self.models[FixedNoiseGP].append(
FixedNoiseGP(
train_X=X,
train_Y=Y,
train_Yvar=0.01 * torch.rand_like(Y),
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
# SingleTaskVariationalGP in train mode
self.models[SingleTaskVariationalGP].append(
SingleTaskVariationalGP(
train_X=X,
train_Y=Y,
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
seed += 1
def test_draw_matheron_paths(self):
for seed, models in enumerate(self.models.values()):
for model, sample_shape in product(models, [Size([1024]), Size([32, 32])]):
with torch.random.fork_rng():
torch.random.manual_seed(seed)
paths = draw_matheron_paths(model=model, sample_shape=sample_shape)
self.assertIsInstance(paths, MatheronPath)
self._test_draw_matheron_paths(model, paths, sample_shape)
with self.subTest("test_model_list"):
model_list = ModelListGP(
self.models[SingleTaskGP][0], self.models[FixedNoiseGP][0]
)
path_list = draw_matheron_paths(model_list, sample_shape=sample_shape)
(train_X,) = get_train_inputs(model_list.models[0], transformed=False)
X = torch.zeros(
4, train_X.shape[-1], dtype=train_X.dtype, device=self.device
)
sample_list = path_list(X)
self.assertIsInstance(path_list, PathList)
self.assertIsInstance(sample_list, list)
self.assertEqual(len(sample_list), len(path_list.paths))
def _test_draw_matheron_paths(self, model, paths, sample_shape, atol=3):
(train_X,) = get_train_inputs(model, transformed=False)
X = torch.rand(16, train_X.shape[-1], dtype=train_X.dtype, device=self.device)
# Evaluate sample paths and compute sample statistics
samples = paths(X)
batch_shape = (
model.model.covar_module.batch_shape
if isinstance(model, SingleTaskVariationalGP)
else model.covar_module.batch_shape
)
self.assertEqual(samples.shape, sample_shape + batch_shape + X.shape[-2:-1])
sample_moments = get_sample_moments(samples, sample_shape)
if hasattr(model, "outcome_transform"):
# Do this instead of untransforming exact moments
sample_moments = standardize_moments(
model.outcome_transform, *sample_moments
)
if model.training:
model.eval()
mvn = model(model.transform_inputs(X))
model.train()
else:
mvn = model(model.transform_inputs(X))
exact_moments = (mvn.loc, mvn.covariance_matrix)
# Compare moments
num_features = paths["prior_paths"].weight.shape[-1]
tol = atol * (num_features**-0.5 + sample_shape.numel() ** -0.5)
for exact, estimate in zip(exact_moments, sample_moments):
self.assertTrue(exact.allclose(estimate, atol=tol, rtol=0))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.pathwise.paths import PathDict, PathList, SamplePath
from botorch.utils.testing import BotorchTestCase
from torch.nn import ModuleDict, ModuleList
class IdentityPath(SamplePath):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x
class TestGenericPaths(BotorchTestCase):
def test_path_dict(self):
with self.assertRaisesRegex(UnsupportedError, "must be preceded by a join"):
PathDict(output_transform="foo")
A = IdentityPath()
B = IdentityPath()
# Test __init__
module_dict = ModuleDict({"0": A, "1": B})
path_dict = PathDict(paths={"0": A, "1": B})
self.assertTrue(path_dict.paths is not module_dict)
path_dict = PathDict(paths=module_dict)
self.assertIs(path_dict.paths, module_dict)
# Test __call__
x = torch.rand(3, device=self.device)
output = path_dict(x)
self.assertIsInstance(output, dict)
self.assertTrue(x.equal(output.pop("0")))
self.assertTrue(x.equal(output.pop("1")))
self.assertTrue(not output)
path_dict.join = torch.stack
output = path_dict(x)
self.assertIsInstance(output, torch.Tensor)
self.assertEqual(output.shape, (2,) + x.shape)
self.assertTrue(output.eq(x).all())
# Test `dict`` methods
self.assertEqual(len(path_dict), 2)
for key, val, (key_0, val_0), (key_1, val_1), key_2 in zip(
path_dict,
path_dict.values(),
path_dict.items(),
path_dict.paths.items(),
path_dict.keys(),
):
self.assertEqual(1, len({key, key_0, key_1, key_2}))
self.assertEqual(1, len({val, val_0, val_1, path_dict[key]}))
path_dict["1"] = A # test __setitem__
self.assertIs(path_dict.paths["1"], A)
del path_dict["1"] # test __delitem__
self.assertEqual(("0",), tuple(path_dict))
def test_path_list(self):
with self.assertRaisesRegex(UnsupportedError, "must be preceded by a join"):
PathList(output_transform="foo")
# Test __init__
A = IdentityPath()
B = IdentityPath()
module_list = ModuleList((A, B))
path_list = PathList(paths=list(module_list))
self.assertTrue(path_list.paths is not module_list)
path_list = PathList(paths=module_list)
self.assertIs(path_list.paths, module_list)
# Test __call__
x = torch.rand(3, device=self.device)
output = path_list(x)
self.assertIsInstance(output, list)
self.assertTrue(x.equal(output.pop()))
self.assertTrue(x.equal(output.pop()))
self.assertTrue(not output)
path_list.join = torch.stack
output = path_list(x)
self.assertIsInstance(output, torch.Tensor)
self.assertEqual(output.shape, (2,) + x.shape)
self.assertTrue(output.eq(x).all())
# Test `list` methods
self.assertEqual(len(path_list), 2)
for key, (path, path_0) in enumerate(zip(path_list, path_list.paths)):
self.assertEqual(1, len({path, path_0, path_list[key]}))
path_list[1] = A # test __setitem__
self.assertIs(path_list.paths[1], A)
del path_list[1] # test __delitem__
self.assertEqual((A,), tuple(path_list))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Tuple
from botorch.models.transforms.outcome import Standardize
from torch import Size, Tensor
def get_sample_moments(samples: Tensor, sample_shape: Size) -> Tuple[Tensor, Tensor]:
sample_dim = len(sample_shape)
samples = samples.view(-1, *samples.shape[sample_dim:])
loc = samples.mean(dim=0)
residuals = (samples - loc).permute(*range(1, samples.ndim), 0)
return loc, (residuals @ residuals.transpose(-2, -1)) / sample_shape.numel()
def standardize_moments(
transform: Standardize,
loc: Tensor,
covariance_matrix: Tensor,
) -> Tuple[Tensor, Tensor]:
m = transform.means.squeeze().unsqueeze(-1)
s = transform.stdvs.squeeze().reciprocal().unsqueeze(-1)
loc = s * (loc - m)
correlation_matrix = s.unsqueeze(-1) * covariance_matrix * s.unsqueeze(-2)
return loc, correlation_matrix
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from itertools import product
from unittest.mock import MagicMock
import torch
from botorch.models import (
FixedNoiseGP,
ModelListGP,
SingleTaskGP,
SingleTaskVariationalGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise import (
draw_kernel_feature_paths,
GeneralizedLinearPath,
PathList,
)
from botorch.sampling.pathwise.utils import get_train_inputs
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from torch import Size
from torch.nn.functional import pad
from .helpers import get_sample_moments, standardize_moments
class TestPriorSamplers(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.models = defaultdict(list)
self.num_features = 1024
seed = 0
for kernel in (
MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([])),
ScaleKernel(RBFKernel(ard_num_dims=2, batch_shape=Size([2]))),
):
with torch.random.fork_rng():
torch.manual_seed(seed)
tkwargs = {"device": self.device, "dtype": torch.float64}
base = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
base.lengthscale = 0.1 + 0.3 * torch.rand_like(base.lengthscale)
kernel.to(**tkwargs)
uppers = 1 + 9 * torch.rand(base.lengthscale.shape[-1], **tkwargs)
bounds = pad(uppers.unsqueeze(0), (0, 0, 1, 0))
X = uppers * torch.rand(4, base.lengthscale.shape[-1], **tkwargs)
Y = 10 * kernel(X).cholesky() @ torch.randn(4, 1, **tkwargs)
if kernel.batch_shape:
Y = Y.squeeze(-1).transpose(0, 1) # n x m
input_transform = Normalize(d=X.shape[-1], bounds=bounds)
outcome_transform = Standardize(m=Y.shape[-1])
# SingleTaskGP in eval mode
self.models[SingleTaskGP].append(
SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=deepcopy(kernel),
input_transform=deepcopy(input_transform),
outcome_transform=deepcopy(outcome_transform),
)
.to(**tkwargs)
.eval()
)
# FixedNoiseGP in train mode
self.models[FixedNoiseGP].append(
FixedNoiseGP(
train_X=X,
train_Y=Y,
train_Yvar=0.01 * torch.rand_like(Y),
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
# SingleTaskVariationalGP in train mode
# When batched, uses a multitask format which break the tests below
if not kernel.batch_shape:
self.models[SingleTaskVariationalGP].append(
SingleTaskVariationalGP(
train_X=X,
train_Y=Y,
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
seed += 1
def test_draw_kernel_feature_paths(self):
for seed, models in enumerate(self.models.values()):
for model, sample_shape in product(models, [Size([1024]), Size([2, 512])]):
with torch.random.fork_rng():
torch.random.manual_seed(seed)
paths = draw_kernel_feature_paths(
model=model,
sample_shape=sample_shape,
num_features=self.num_features,
)
self.assertIsInstance(paths, GeneralizedLinearPath)
self._test_draw_kernel_feature_paths(model, paths, sample_shape)
with self.subTest("test_model_list"):
model_list = ModelListGP(
self.models[SingleTaskGP][0], self.models[FixedNoiseGP][0]
)
path_list = draw_kernel_feature_paths(
model=model_list,
sample_shape=sample_shape,
num_features=self.num_features,
)
(train_X,) = get_train_inputs(model_list.models[0], transformed=False)
X = torch.zeros(
4, train_X.shape[-1], dtype=train_X.dtype, device=self.device
)
sample_list = path_list(X)
self.assertIsInstance(path_list, PathList)
self.assertIsInstance(sample_list, list)
self.assertEqual(len(sample_list), len(path_list.paths))
with self.subTest("test_initialization"):
model = self.models[SingleTaskGP][0]
sample_shape = torch.Size([16])
expected_weight_shape = (
sample_shape + model.covar_module.batch_shape + (self.num_features,)
)
weight_generator = MagicMock(
side_effect=lambda _: torch.rand(expected_weight_shape)
)
draw_kernel_feature_paths(
model=model,
sample_shape=sample_shape,
num_features=self.num_features,
weight_generator=weight_generator,
)
weight_generator.assert_called_once_with(expected_weight_shape)
def _test_draw_kernel_feature_paths(self, model, paths, sample_shape, atol=3):
(train_X,) = get_train_inputs(model, transformed=False)
X = torch.rand(16, train_X.shape[-1], dtype=train_X.dtype, device=self.device)
# Evaluate sample paths
samples = paths(X)
batch_shape = (
model.model.covar_module.batch_shape
if isinstance(model, SingleTaskVariationalGP)
else model.covar_module.batch_shape
)
self.assertEqual(samples.shape, sample_shape + batch_shape + X.shape[-2:-1])
# Calculate sample statistics
sample_moments = get_sample_moments(samples, sample_shape)
if hasattr(model, "outcome_transform"):
# Do this instead of untransforming exact moments
sample_moments = standardize_moments(
model.outcome_transform, *sample_moments
)
# Compute prior distribution
prior = model.forward(X if model.training else model.input_transform(X))
exact_moments = (prior.loc, prior.covariance_matrix)
# Compare moments
tol = atol * (paths.weight.shape[-1] ** -0.5 + sample_shape.numel() ** -0.5)
for exact, estimate in zip(exact_moments, sample_moments):
self.assertTrue(exact.allclose(estimate, atol=tol, rtol=0))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from itertools import chain
from unittest.mock import patch
import torch
from botorch.models import FixedNoiseGP, SingleTaskGP, SingleTaskVariationalGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise import (
draw_kernel_feature_paths,
gaussian_update,
GeneralizedLinearPath,
KernelEvaluationMap,
)
from botorch.sampling.pathwise.utils import get_train_inputs, get_train_targets
from botorch.utils.context_managers import delattr_ctx
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import BernoulliLikelihood
from linear_operator.operators import ZeroLinearOperator
from linear_operator.utils.cholesky import psd_safe_cholesky
from torch import Size
from torch.nn.functional import pad
class TestPathwiseUpdates(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.models = defaultdict(list)
seed = 0
for kernel in (
RBFKernel(ard_num_dims=2),
ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([2]))),
):
with torch.random.fork_rng():
torch.manual_seed(seed)
tkwargs = {"device": self.device, "dtype": torch.float64}
base = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
base.lengthscale = 0.1 + 0.3 * torch.rand_like(base.lengthscale)
kernel.to(**tkwargs)
uppers = 1 + 9 * torch.rand(base.lengthscale.shape[-1], **tkwargs)
bounds = pad(uppers.unsqueeze(0), (0, 0, 1, 0))
X = uppers * torch.rand(4, base.lengthscale.shape[-1], **tkwargs)
Y = 10 * kernel(X).cholesky() @ torch.randn(4, 1, **tkwargs)
if kernel.batch_shape:
Y = Y.squeeze(-1).transpose(0, 1) # n x m
input_transform = Normalize(d=X.shape[-1], bounds=bounds)
outcome_transform = Standardize(m=Y.shape[-1])
# SingleTaskGP in eval mode
self.models[SingleTaskGP].append(
SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=deepcopy(kernel),
input_transform=deepcopy(input_transform),
outcome_transform=deepcopy(outcome_transform),
)
.to(**tkwargs)
.eval()
)
# FixedNoiseGP in train mode
self.models[FixedNoiseGP].append(
FixedNoiseGP(
train_X=X,
train_Y=Y,
train_Yvar=0.01 * torch.rand_like(Y),
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
# SingleTaskVariationalGP in train mode
# When batched, uses a multitask format which break the tests below
if not kernel.batch_shape:
self.models[SingleTaskVariationalGP].append(
SingleTaskVariationalGP(
train_X=X,
train_Y=Y,
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
seed += 1
def test_gaussian_updates(self):
for seed, model in enumerate(chain.from_iterable(self.models.values())):
with torch.random.fork_rng():
torch.manual_seed(seed)
self._test_gaussian_updates(model)
def _test_gaussian_updates(self, model):
sample_shape = torch.Size([3])
# Extract exact conditions and precompute covariances
if isinstance(model, SingleTaskVariationalGP):
Z = model.model.variational_strategy.inducing_points
X = (
Z
if model.input_transform is None
else model.input_transform.untransform(Z)
)
U = torch.randn(len(Z), device=Z.device, dtype=Z.dtype)
Kuu = Kmm = model.model.covar_module(Z)
noise_values = None
else:
(X,) = get_train_inputs(model, transformed=False)
(Z,) = get_train_inputs(model, transformed=True)
U = get_train_targets(model, transformed=True)
Kmm = model.forward(X if model.training else Z).lazy_covariance_matrix
Kuu = Kmm + model.likelihood.noise_covar(shape=Z.shape[:-1])
noise_values = torch.randn(
*sample_shape, *U.shape, device=U.device, dtype=U.dtype
)
# Disable sampling of noise variables `e` used to obtain `y = f + e`
with delattr_ctx(model, "outcome_transform"), patch.object(
torch,
"randn_like",
return_value=noise_values,
):
prior_paths = draw_kernel_feature_paths(model, sample_shape=sample_shape)
sample_values = prior_paths(X)
update_paths = gaussian_update(
model=model,
sample_values=sample_values,
target_values=U,
)
# Test initialization
self.assertIsInstance(update_paths, GeneralizedLinearPath)
self.assertIsInstance(update_paths.feature_map, KernelEvaluationMap)
self.assertTrue(update_paths.feature_map.points.equal(Z))
self.assertIs(
update_paths.feature_map.input_transform,
getattr(model, "input_transform", None),
)
# Compare with manually computed update weights `Cov(y, y)^{-1} (y - f - e)`
Luu = psd_safe_cholesky(Kuu.to_dense())
errors = U - sample_values
if noise_values is not None:
errors -= (
model.likelihood.noise_covar(shape=Z.shape[:-1]).cholesky()
@ noise_values.unsqueeze(-1)
).squeeze(-1)
weight = torch.cholesky_solve(errors.unsqueeze(-1), Luu).squeeze(-1)
self.assertTrue(weight.allclose(update_paths.weight))
# Compare with manually computed update values at test locations
Z2 = torch.rand(16, Z.shape[-1], device=self.device, dtype=Z.dtype)
X2 = (
model.input_transform.untransform(Z2)
if hasattr(model, "input_transform")
else Z2
)
features = update_paths.feature_map(X2)
expected_updates = (features @ update_paths.weight.unsqueeze(-1)).squeeze(-1)
actual_updates = update_paths(X2)
self.assertTrue(actual_updates.allclose(expected_updates))
# Test passing `noise_covariance`
m = Z.shape[-2]
update_paths = gaussian_update(
model=model,
sample_values=sample_values,
target_values=U,
noise_covariance=ZeroLinearOperator(m, m, dtype=X.dtype),
)
Lmm = psd_safe_cholesky(Kmm.to_dense())
errors = U - sample_values
weight = torch.cholesky_solve(errors.unsqueeze(-1), Lmm).squeeze(-1)
self.assertTrue(weight.allclose(update_paths.weight))
if isinstance(model, SingleTaskVariationalGP):
# Test passing non-zero `noise_covariance``
with patch.object(model, "likelihood", new=BernoulliLikelihood()):
with self.assertRaisesRegex(NotImplementedError, "not yet supported"):
gaussian_update(
model=model,
sample_values=sample_values,
noise_covariance="foo",
)
else:
# Test exact models with non-Gaussian likelihoods
with patch.object(model, "likelihood", new=BernoulliLikelihood()):
with self.assertRaises(NotImplementedError):
gaussian_update(model=model, sample_values=sample_values)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import MagicMock, patch
import torch
from botorch.sampling.pathwise.features import KernelEvaluationMap, KernelFeatureMap
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel
from torch import Size
class TestFeatureMaps(BotorchTestCase):
def test_kernel_evaluation_map(self):
kernel = MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([2]))
kernel.to(device=self.device)
with torch.random.fork_rng():
torch.manual_seed(0)
kernel.lengthscale = 0.1 + 0.3 * torch.rand_like(kernel.lengthscale)
with self.assertRaisesRegex(RuntimeError, "Shape mismatch"):
KernelEvaluationMap(kernel=kernel, points=torch.rand(4, 3, 2))
for dtype in (torch.float32, torch.float64):
kernel.to(dtype=dtype)
X0, X1 = torch.rand(5, 2, dtype=dtype, device=self.device).split([2, 3])
kernel_map = KernelEvaluationMap(kernel=kernel, points=X1)
self.assertEqual(kernel_map.batch_shape, kernel.batch_shape)
self.assertEqual(kernel_map.num_outputs, X1.shape[-1])
self.assertTrue(kernel_map(X0).to_dense().equal(kernel(X0, X1).to_dense()))
with patch.object(
kernel_map, "output_transform", new=lambda z: torch.concat([z, z], dim=-1)
):
self.assertEqual(kernel_map.num_outputs, 2 * X1.shape[-1])
def test_kernel_feature_map(self):
d = 2
m = 3
weight = torch.rand(m, d, device=self.device)
bias = torch.rand(m, device=self.device)
kernel = MaternKernel(nu=2.5, batch_shape=Size([3])).to(self.device)
feature_map = KernelFeatureMap(
kernel=kernel,
weight=weight,
bias=bias,
input_transform=MagicMock(side_effect=lambda x: x),
output_transform=MagicMock(side_effect=lambda z: z.exp()),
)
X = torch.rand(2, d, device=self.device)
features = feature_map(X)
feature_map.input_transform.assert_called_once_with(X)
feature_map.output_transform.assert_called_once()
self.assertTrue((X @ weight.transpose(-2, -1) + bias).exp().equal(features))
# Test batch_shape and num_outputs
self.assertIs(feature_map.batch_shape, kernel.batch_shape)
self.assertEqual(feature_map.num_outputs, weight.shape[-2])
with patch.object(feature_map, "output_transform", new=None):
self.assertEqual(feature_map.num_outputs, weight.shape[-2])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from math import ceil
from unittest.mock import patch
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.pathwise.features import generators
from botorch.sampling.pathwise.features.generators import gen_kernel_features
from botorch.sampling.pathwise.features.maps import FeatureMap
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from gpytorch.kernels.kernel import Kernel
from torch import Size, Tensor
class TestFeatureGenerators(BotorchTestCase):
def setUp(self, seed: int = 0) -> None:
super().setUp()
self.kernels = []
self.num_inputs = d = 2
self.num_features = 4096
for kernel in (
MaternKernel(nu=0.5, batch_shape=Size([])),
MaternKernel(nu=1.5, ard_num_dims=1, active_dims=[0]),
ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=d, batch_shape=Size([2]))),
ScaleKernel(
RBFKernel(ard_num_dims=1, batch_shape=Size([2, 2])), active_dims=[1]
),
):
kernel.to(
dtype=torch.float32 if (seed % 2) else torch.float64, device=self.device
)
with torch.random.fork_rng():
torch.manual_seed(seed)
kern = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
kern.lengthscale = 0.1 + 0.2 * torch.rand_like(kern.lengthscale)
seed += 1
self.kernels.append(kernel)
def test_gen_kernel_features(self):
for seed, kernel in enumerate(self.kernels):
with torch.random.fork_rng():
torch.random.manual_seed(seed)
feature_map = gen_kernel_features(
kernel=kernel,
num_inputs=self.num_inputs,
num_outputs=self.num_features,
)
n = 4
m = ceil(n * kernel.batch_shape.numel() ** -0.5)
for input_batch_shape in ((n**2,), (m, *kernel.batch_shape, m)):
X = torch.rand(
(*input_batch_shape, self.num_inputs),
device=kernel.device,
dtype=kernel.dtype,
)
self._test_gen_kernel_features(kernel, feature_map, X)
def _test_gen_kernel_features(
self, kernel: Kernel, feature_map: FeatureMap, X: Tensor, atol: float = 3.0
):
with self.subTest("test_initialization"):
self.assertEqual(feature_map.weight.dtype, kernel.dtype)
self.assertEqual(feature_map.weight.device, kernel.device)
self.assertEqual(
feature_map.weight.shape[-1],
self.num_inputs
if kernel.active_dims is None
else len(kernel.active_dims),
)
with self.subTest("test_covariance"):
features = feature_map(X)
test_shape = torch.broadcast_shapes(
(*X.shape[:-1], self.num_features), kernel.batch_shape + (1, 1)
)
self.assertEqual(features.shape, test_shape)
K0 = features @ features.transpose(-2, -1)
K1 = kernel(X).to_dense()
self.assertTrue(
K0.allclose(K1, atol=atol * self.num_features**-0.5, rtol=0)
)
# Test passing the wrong dimensional shape to `weight_generator`
with self.assertRaisesRegex(UnsupportedError, "2-dim"), patch.object(
generators,
"_gen_fourier_features",
side_effect=lambda **kwargs: kwargs["weight_generator"](Size([])),
):
gen_kernel_features(
kernel=kernel,
num_inputs=self.num_inputs,
num_outputs=self.num_features,
)
# Test requesting an odd number of features
with self.assertRaisesRegex(UnsupportedError, "Expected an even number"):
gen_kernel_features(
kernel=kernel, num_inputs=self.num_inputs, num_outputs=3
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import numpy as np
import torch
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
from botorch.optim.parameter_constraints import (
_arrayify,
_generate_unfixed_lin_constraints,
_generate_unfixed_nonlin_constraints,
_make_linear_constraints,
eval_lin_constraint,
lin_constraint_jac,
make_scipy_bounds,
make_scipy_linear_constraints,
)
from botorch.utils.testing import BotorchTestCase
from scipy.optimize import Bounds
class TestParameterConstraints(BotorchTestCase):
def test_arrayify(self):
for dtype in (torch.float, torch.double, torch.int, torch.long):
t = torch.tensor([[1, 2], [3, 4]], device=self.device).type(dtype)
t_np = _arrayify(t)
self.assertIsInstance(t_np, np.ndarray)
self.assertTrue(t_np.dtype == np.float64)
def test_eval_lin_constraint(self):
res = eval_lin_constraint(
flat_idxr=[0, 2],
coeffs=np.array([1.0, -2.0]),
rhs=0.5,
x=np.array([1.0, 2.0, 3.0]),
)
self.assertEqual(res, -5.5)
def test_lin_constraint_jac(self):
dummy_array = np.array([1.0])
res = lin_constraint_jac(
dummy_array, flat_idxr=[0, 2], coeffs=np.array([1.0, -2.0]), n=3
)
self.assertTrue(all(np.equal(res, np.array([1.0, 0.0, -2.0]))))
def test_make_linear_constraints(self):
# equality constraints, 1d indices
indices = torch.tensor([1, 2], dtype=torch.long, device=self.device)
for dtype, shapeX in product(
(torch.float, torch.double), (torch.Size([3, 2, 4]), torch.Size([2, 4]))
):
coefficients = torch.tensor([1.0, 2.0], dtype=dtype, device=self.device)
constraints = _make_linear_constraints(
indices=indices,
coefficients=coefficients,
rhs=1.0,
shapeX=shapeX,
eq=True,
)
self.assertTrue(
all(set(c.keys()) == {"fun", "jac", "type"} for c in constraints)
)
self.assertTrue(all(c["type"] == "eq" for c in constraints))
self.assertEqual(len(constraints), shapeX[:-1].numel())
x = np.random.rand(shapeX.numel())
self.assertEqual(constraints[0]["fun"](x), x[1] + 2 * x[2] - 1.0)
jac_exp = np.zeros(shapeX.numel())
jac_exp[[1, 2]] = [1, 2]
self.assertTrue(np.allclose(constraints[0]["jac"](x), jac_exp))
self.assertEqual(constraints[-1]["fun"](x), x[-3] + 2 * x[-2] - 1.0)
jac_exp = np.zeros(shapeX.numel())
jac_exp[[-3, -2]] = [1, 2]
self.assertTrue(np.allclose(constraints[-1]["jac"](x), jac_exp))
# inequality constraints, 1d indices
for shapeX in [torch.Size([1, 1, 2]), torch.Size([1, 2])]:
lcs = _make_linear_constraints(
indices=torch.tensor([1]),
coefficients=torch.tensor([1.0]),
rhs=1.0,
shapeX=shapeX,
eq=False,
)
self.assertEqual(len(lcs), 1)
self.assertEqual(lcs[0]["type"], "ineq")
# constraint across q-batch (2d indics), equality constraint
indices = torch.tensor([[0, 3], [1, 2]], dtype=torch.long, device=self.device)
for dtype, shapeX in product(
(torch.float, torch.double), (torch.Size([3, 2, 4]), torch.Size([2, 4]))
):
q, d = shapeX[-2:]
b = 1 if len(shapeX) == 2 else shapeX[0]
coefficients = torch.tensor([1.0, 2.0], dtype=dtype, device=self.device)
constraints = _make_linear_constraints(
indices=indices,
coefficients=coefficients,
rhs=1.0,
shapeX=shapeX,
eq=True,
)
self.assertTrue(
all(set(c.keys()) == {"fun", "jac", "type"} for c in constraints)
)
self.assertTrue(all(c["type"] == "eq" for c in constraints))
self.assertEqual(len(constraints), b)
x = np.random.rand(shapeX.numel())
offsets = [q * d, d]
# rule is [i, j, k] is i * offset[0] + j * offset[1] + k
for i in range(b):
pos1 = i * offsets[0] + 3
pos2 = i * offsets[0] + 1 * offsets[1] + 2
self.assertEqual(constraints[i]["fun"](x), x[pos1] + 2 * x[pos2] - 1.0)
jac_exp = np.zeros(shapeX.numel())
jac_exp[[pos1, pos2]] = [1, 2]
self.assertTrue(np.allclose(constraints[i]["jac"](x), jac_exp))
# make sure error is raised for scalar tensors
with self.assertRaises(ValueError):
constraints = _make_linear_constraints(
indices=torch.tensor(0),
coefficients=torch.tensor([1.0]),
rhs=1.0,
shapeX=torch.Size([1, 1, 2]),
eq=False,
)
# test that len(shapeX) < 2 raises an error
with self.assertRaises(UnsupportedError):
_make_linear_constraints(
shapeX=torch.Size([2]),
indices=indices,
coefficients=coefficients,
rhs=0.0,
)
def test_make_scipy_linear_constraints(self):
for shapeX in [torch.Size([2, 1, 4]), torch.Size([1, 4])]:
b = shapeX[0] if len(shapeX) == 3 else 1
res = make_scipy_linear_constraints(
shapeX=shapeX, inequality_constraints=None, equality_constraints=None
)
self.assertEqual(res, [])
indices = torch.tensor([0, 1], dtype=torch.long, device=self.device)
coefficients = torch.tensor([1.5, -1.0], device=self.device)
# both inequality and equality constraints
cs = make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
self.assertEqual(len(cs), 2 * b)
self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
# inequality only
cs = make_scipy_linear_constraints(
shapeX=shapeX, inequality_constraints=[(indices, coefficients, 1.0)]
)
self.assertEqual(len(cs), b)
self.assertTrue(all(c["type"] == "ineq" for c in cs))
# equality only
cs = make_scipy_linear_constraints(
shapeX=shapeX, equality_constraints=[(indices, coefficients, 1.0)]
)
self.assertEqual(len(cs), b)
self.assertTrue(all(c["type"] == "eq" for c in cs))
# test that 2-dim indices work properly
indices = indices.unsqueeze(0)
cs = make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
self.assertEqual(len(cs), 2 * b)
self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
def test_make_scipy_linear_constraints_unsupported(self):
shapeX = torch.Size([2, 1, 4])
coefficients = torch.tensor([1.5, -1.0], device=self.device)
# test that >2-dim indices raises an UnsupportedError
indices = torch.tensor([0, 1], dtype=torch.long, device=self.device)
indices = indices.unsqueeze(0).unsqueeze(0)
with self.assertRaises(UnsupportedError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
# test that out of bounds index raises an error
indices = torch.tensor([0, 4], dtype=torch.long, device=self.device)
with self.assertRaises(RuntimeError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
# test that two-d index out-of-bounds raises an error
# q out of bounds
indices = torch.tensor([[0, 0], [1, 0]], dtype=torch.long, device=self.device)
with self.assertRaises(RuntimeError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
# d out of bounds
indices = torch.tensor([[0, 0], [0, 4]], dtype=torch.long, device=self.device)
with self.assertRaises(RuntimeError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
def test_generate_unfixed_nonlin_constraints(self):
def nlc1(x):
return 4 - x.sum(dim=-1)
def nlc2(x):
return x[..., 0] - 1
# first test with one constraint
(new_nlc1,) = _generate_unfixed_nonlin_constraints(
constraints=[nlc1], fixed_features={1: 2.0}, dimension=3
)
self.assertAllClose(
nlc1(torch.tensor([[4.0, 2.0, 2.0]], device=self.device)),
new_nlc1(torch.tensor([[4.0, 2.0]], device=self.device)),
)
# test with several constraints
constraints = [nlc1, nlc2]
new_constraints = _generate_unfixed_nonlin_constraints(
constraints=constraints, fixed_features={1: 2.0}, dimension=3
)
for nlc, new_nlc in zip(constraints, new_constraints):
self.assertAllClose(
nlc(torch.tensor([[4.0, 2.0, 2.0]], device=self.device)),
new_nlc(torch.tensor([[4.0, 2.0]], device=self.device)),
)
# test with several constraints and two fixes
constraints = [nlc1, nlc2]
new_constraints = _generate_unfixed_nonlin_constraints(
constraints=constraints, fixed_features={1: 2.0, 2: 1.0}, dimension=3
)
for nlc, new_nlc in zip(constraints, new_constraints):
self.assertAllClose(
nlc(torch.tensor([[4.0, 2.0, 1.0]], device=self.device)),
new_nlc(torch.tensor([[4.0]], device=self.device)),
)
def test_generate_unfixed_lin_constraints(self):
# Case 1: some fixed features are in the indices
indices = [
torch.arange(4, device=self.device),
torch.arange(2, -1, -1, device=self.device),
]
coefficients = [
torch.tensor([-0.1, 0.2, -0.3, 0.4], device=self.device),
torch.tensor([-0.1, 0.3, -0.5], device=self.device),
]
rhs = [0.5, 0.5]
dimension = 4
fixed_features = {1: 1, 3: 2}
new_constraints = _generate_unfixed_lin_constraints(
constraints=list(zip(indices, coefficients, rhs)),
fixed_features=fixed_features,
dimension=dimension,
eq=False,
)
for i, (new_indices, new_coefficients, new_rhs) in enumerate(new_constraints):
if i % 2 == 0: # first list of indices is [0, 1, 2, 3]
self.assertTrue(
torch.equal(new_indices, torch.arange(2, device=self.device))
)
else: # second list of indices is [2, 1, 0]
self.assertTrue(
torch.equal(
new_indices, torch.arange(1, -1, -1, device=self.device)
)
)
mask = [True] * indices[i].shape[0]
subtract = 0
for j, old_idx in enumerate(indices[i]):
if old_idx.item() in fixed_features:
mask[j] = False
subtract += fixed_features[old_idx.item()] * coefficients[i][j]
self.assertTrue(torch.equal(new_coefficients, coefficients[i][mask]))
self.assertEqual(new_rhs, rhs[i] - subtract)
# Case 2: none of fixed features are in the indices, but have to be renumbered
indices = [
torch.arange(2, 6, device=self.device),
torch.arange(5, 2, -1, device=self.device),
]
fixed_features = {0: -10, 1: 10}
dimension = 6
new_constraints = _generate_unfixed_lin_constraints(
constraints=list(zip(indices, coefficients, rhs)),
fixed_features=fixed_features,
dimension=dimension,
eq=False,
)
for i, (new_indices, new_coefficients, new_rhs) in enumerate(new_constraints):
if i % 2 == 0: # first list of indices is [2, 3, 4, 5]
self.assertTrue(
torch.equal(new_indices, torch.arange(4, device=self.device))
)
else: # second list of indices is [5, 4, 3]
self.assertTrue(
torch.equal(new_indices, torch.arange(3, 0, -1, device=self.device))
)
self.assertTrue(torch.equal(new_coefficients, coefficients[i]))
self.assertEqual(new_rhs, rhs[i])
# Case 3: all fixed features are in the indices
indices = [
torch.arange(4, device=self.device),
torch.arange(2, -1, -1, device=self.device),
]
# Case 3a: problem is feasible
dimension = 4
fixed_features = {0: 2, 1: 1, 2: 1, 3: 2}
for eq in [False, True]:
new_constraints = _generate_unfixed_lin_constraints(
constraints=[(indices[0], coefficients[0], rhs[0])],
fixed_features=fixed_features,
dimension=dimension,
eq=eq,
)
self.assertEqual(new_constraints, [])
# Case 3b: problem is infeasible
for eq in [False, True]:
prefix = "Ineq" if not eq else "Eq"
with self.assertRaisesRegex(CandidateGenerationError, prefix):
new_constraints = _generate_unfixed_lin_constraints(
constraints=[(indices[1], coefficients[1], rhs[1])],
fixed_features=fixed_features,
dimension=dimension,
eq=eq,
)
class TestMakeScipyBounds(BotorchTestCase):
def test_make_scipy_bounds(self):
X = torch.zeros(3, 1, 2)
# both None
self.assertIsNone(make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=None))
# lower None
upper_bounds = torch.ones(2)
bounds = make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=upper_bounds)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(
np.all(np.equal(bounds.lb, np.full((3, 1, 2), float("-inf")).flatten()))
)
self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
# upper None
lower_bounds = torch.zeros(2)
bounds = make_scipy_bounds(X=X, lower_bounds=lower_bounds, upper_bounds=None)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
self.assertTrue(
np.all(np.equal(bounds.ub, np.full((3, 1, 2), float("inf")).flatten()))
)
# floats
bounds = make_scipy_bounds(X=X, lower_bounds=0.0, upper_bounds=1.0)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
# 1-d tensors
bounds = make_scipy_bounds(
X=X, lower_bounds=lower_bounds, upper_bounds=upper_bounds
)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest.mock as mock
import torch
from botorch.acquisition import PosteriorMean
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.models import GenericDeterministicModel
from botorch.optim.homotopy import (
FixedHomotopySchedule,
Homotopy,
HomotopyParameter,
LinearHomotopySchedule,
LogLinearHomotopySchedule,
)
from botorch.optim.optimize_homotopy import optimize_acqf_homotopy, prune_candidates
from botorch.utils.testing import BotorchTestCase
from torch.nn import Parameter
PRUNE_CANDIDATES_PATH = f"{prune_candidates.__module__}"
class TestHomotopy(BotorchTestCase):
def _test_schedule(self, schedule, values):
self.assertEqual(schedule.num_steps, len(values))
self.assertEqual(schedule.value, values[0])
self.assertFalse(schedule.should_stop)
for i in range(len(values) - 1):
schedule.step()
self.assertEqual(schedule.value, values[i + 1])
self.assertFalse(schedule.should_stop)
schedule.step()
self.assertTrue(schedule.should_stop)
schedule.restart()
self.assertEqual(schedule.value, values[0])
self.assertFalse(schedule.should_stop)
def test_fixed_schedule(self):
values = [1, 3, 7]
fixed = FixedHomotopySchedule(values=values)
self.assertEqual(fixed._values, values)
self._test_schedule(schedule=fixed, values=values)
def test_linear_schedule(self):
values = [1, 2, 3, 4, 5]
linear = LinearHomotopySchedule(start=1, end=5, num_steps=5)
self.assertEqual(linear._values, values)
self._test_schedule(schedule=linear, values=values)
def test_log_linear_schedule(self):
values = [0.01, 0.1, 1, 10, 100]
linear = LogLinearHomotopySchedule(start=0.01, end=100, num_steps=5)
self.assertEqual(linear._values, values)
self._test_schedule(schedule=linear, values=values)
def test_homotopy(self):
tkwargs = {"device": self.device, "dtype": torch.double}
p1 = Parameter(-2 * torch.ones(1, **tkwargs))
v1 = [1, 2, 3, 4, 5]
p2 = -3 * torch.ones(1, **tkwargs)
v2 = [0.01, 0.1, 1, 10, 100]
callback = mock.Mock()
homotopy_parameters = [
HomotopyParameter(
parameter=p1,
schedule=LinearHomotopySchedule(start=1, end=5, num_steps=5),
),
HomotopyParameter(
parameter=p2,
schedule=LogLinearHomotopySchedule(start=0.01, end=100, num_steps=5),
),
]
homotopy = Homotopy(
homotopy_parameters=homotopy_parameters, callbacks=[callback]
)
self.assertEqual(homotopy._original_values, [-2, -3])
self.assertEqual(homotopy._homotopy_parameters, homotopy_parameters)
self.assertEqual(homotopy._callbacks, [callback])
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters], [v1[0], v2[0]]
)
for i in range(4):
homotopy.step()
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters],
[v1[i + 1], v2[i + 1]],
)
self.assertFalse(homotopy.should_stop)
homotopy.step()
self.assertTrue(homotopy.should_stop)
# Restart the schedules
homotopy.restart()
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters], [v1[0], v2[0]]
)
# Reset the parameters to their original values
homotopy.reset()
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters], [-2, -3]
)
# Expect the call count to be 8: init (1), step (5), restart (1), reset (1).
self.assertEqual(callback.call_count, 8)
def test_optimize_acqf_homotopy(self):
tkwargs = {"device": self.device, "dtype": torch.double}
p = Parameter(-2 * torch.ones(1, **tkwargs))
hp = HomotopyParameter(
parameter=p,
schedule=LinearHomotopySchedule(start=4, end=0, num_steps=5),
)
model = GenericDeterministicModel(f=lambda x: 5 - (x - p) ** 2)
acqf = PosteriorMean(model=model)
candidate, acqf_val = optimize_acqf_homotopy(
q=1,
acq_function=acqf,
bounds=torch.tensor([[-10], [5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=2,
raw_samples=16,
post_processing_func=lambda x: x.round(),
)
self.assertEqual(candidate, torch.zeros(1, **tkwargs))
self.assertEqual(acqf_val, 5 * torch.ones(1, **tkwargs))
# test fixed feature
fixed_features = {0: 1.0}
model = GenericDeterministicModel(
f=lambda x: 5 - (x - p).sum(dim=-1, keepdims=True) ** 2
)
acqf = PosteriorMean(model=model)
candidate, acqf_val = optimize_acqf_homotopy(
q=1,
acq_function=acqf,
bounds=torch.tensor([[-10, -10], [5, 5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=2,
raw_samples=16,
fixed_features=fixed_features,
)
self.assertEqual(candidate[0, 0], torch.tensor(1, **tkwargs))
# With q > 1.
acqf = qExpectedImprovement(model=model, best_f=0.0)
candidate, acqf_val = optimize_acqf_homotopy(
q=3,
acq_function=acqf,
bounds=torch.tensor([[-10, -10], [5, 5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=2,
raw_samples=16,
fixed_features=fixed_features,
)
self.assertEqual(candidate.shape, torch.Size([3, 2]))
self.assertEqual(acqf_val.shape, torch.Size([3]))
def test_prune_candidates(self):
tkwargs = {"device": self.device, "dtype": torch.double}
# no pruning
X = torch.rand(6, 3, **tkwargs)
vals = X.sum(dim=-1)
X_pruned = prune_candidates(candidates=X, acq_values=vals, prune_tolerance=1e-6)
self.assertTrue((X[vals.argsort(descending=True), :] == X_pruned).all())
# pruning
X[1, :] = X[0, :] + 1e-10
X[4, :] = X[2, :] - 1e-10
vals = torch.tensor([1, 6, 3, 4, 2, 5], **tkwargs)
X_pruned = prune_candidates(candidates=X, acq_values=vals, prune_tolerance=1e-6)
self.assertTrue((X[[1, 5, 3, 2]] == X_pruned).all())
# invalid shapes
with self.assertRaisesRegex(
ValueError, "`candidates` must be of size `n x d`."
):
prune_candidates(
candidates=torch.zeros(3, 2, 1),
acq_values=torch.zeros(2, 1),
prune_tolerance=1e-6,
)
with self.assertRaisesRegex(ValueError, "`acq_values` must be of size `n`."):
prune_candidates(
candidates=torch.zeros(3, 2),
acq_values=torch.zeros(3, 1),
prune_tolerance=1e-6,
)
with self.assertRaisesRegex(ValueError, "`prune_tolerance` must be >= 0."):
prune_candidates(
candidates=torch.zeros(3, 2),
acq_values=torch.zeros(3),
prune_tolerance=-1.2345,
)
@mock.patch(f"{PRUNE_CANDIDATES_PATH}.prune_candidates", wraps=prune_candidates)
def test_optimize_acqf_homotopy_pruning(self, prune_candidates_mock):
tkwargs = {"device": self.device, "dtype": torch.double}
p = Parameter(torch.zeros(1, **tkwargs))
hp = HomotopyParameter(
parameter=p,
schedule=LinearHomotopySchedule(start=4, end=0, num_steps=5),
)
model = GenericDeterministicModel(f=lambda x: 5 - (x - p) ** 2)
acqf = PosteriorMean(model=model)
candidate, acqf_val = optimize_acqf_homotopy(
q=1,
acq_function=acqf,
bounds=torch.tensor([[-10], [5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=4,
raw_samples=16,
post_processing_func=lambda x: x.round(),
)
# First time we expect to call `prune_candidates` with 4 candidates
self.assertEqual(
prune_candidates_mock.call_args_list[0][1]["candidates"].shape,
torch.Size([4, 1]),
)
for i in range(1, 5): # The paths should have been pruned to just one path
self.assertEqual(
prune_candidates_mock.call_args_list[i][1]["candidates"].shape,
torch.Size([1, 1]),
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
from inspect import signature
from itertools import product
from unittest import mock
import numpy as np
import torch
from botorch.acquisition.acquisition import (
AcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.exceptions import InputDataError, UnsupportedError
from botorch.generation.gen import gen_candidates_scipy, gen_candidates_torch
from botorch.optim.optimize import (
_filter_infeasible,
_filter_invalid,
_gen_batch_initial_conditions_local_search,
_generate_neighbors,
optimize_acqf,
optimize_acqf_cyclic,
optimize_acqf_discrete,
optimize_acqf_discrete_local_search,
optimize_acqf_list,
optimize_acqf_mixed,
)
from botorch.optim.parameter_constraints import (
_arrayify,
_make_f_and_grad_nonlinear_inequality_constraints,
)
from botorch.optim.utils.timeout import minimize_with_timeout
from botorch.utils.testing import BotorchTestCase, MockAcquisitionFunction
from scipy.optimize import OptimizeResult
from torch import Tensor
class MockOneShotAcquisitionFunction(
MockAcquisitionFunction, OneShotAcquisitionFunction
):
def __init__(self, num_fantasies=2):
r"""
Args:
num_fantasies: The number of fantasies.
"""
super().__init__()
self.num_fantasies = num_fantasies
def get_augmented_q_batch_size(self, q: int) -> int:
return q + self.num_fantasies
def extract_candidates(self, X_full: Tensor) -> Tensor:
return X_full[..., : -self.num_fantasies, :]
def forward(self, X):
pass
class SquaredAcquisitionFunction(AcquisitionFunction):
def __init__(self, model=None): # noqa: D107
super().__init__(model=model)
def forward(self, X):
return torch.linalg.norm(X, dim=-1).squeeze(-1)
class MockOneShotEvaluateAcquisitionFunction(MockOneShotAcquisitionFunction):
def evaluate(self, X: Tensor, bounds: Tensor):
return X.sum()
class SinOneOverXAcqusitionFunction(MockAcquisitionFunction):
"""
Acquisition function for sin(1/x).
This is useful for testing because it behaves pathologically only zero, so
optimization is likely to fail when initializing near zero but not
elsewhere.
"""
def __call__(self, X):
return torch.sin(1 / X[..., 0].max(dim=-1).values)
def rounding_func(X: Tensor) -> Tensor:
batch_shape, d = X.shape[:-1], X.shape[-1]
X_round = torch.stack([x.round() for x in X.view(-1, d)])
return X_round.view(*batch_shape, d)
class TestOptimizeAcqf(BotorchTestCase):
@mock.patch("botorch.generation.gen.gen_candidates_torch")
@mock.patch("botorch.optim.optimize.gen_batch_initial_conditions")
@mock.patch("botorch.optim.optimize.gen_candidates_scipy")
@mock.patch("botorch.optim.utils.common.signature")
def test_optimize_acqf_joint(
self,
mock_signature,
mock_gen_candidates_scipy,
mock_gen_batch_initial_conditions,
mock_gen_candidates_torch,
):
q = 3
num_restarts = 2
raw_samples = 10
options = {}
mock_acq_function = MockAcquisitionFunction()
cnt = 0
for dtype in (torch.float, torch.double):
for mock_gen_candidates in (
mock_gen_candidates_scipy,
mock_gen_candidates_torch,
):
if mock_gen_candidates == mock_gen_candidates_torch:
mock_signature.return_value = signature(gen_candidates_torch)
else:
mock_signature.return_value = signature(gen_candidates_scipy)
mock_gen_batch_initial_conditions.return_value = torch.zeros(
num_restarts, q, 3, device=self.device, dtype=dtype
)
base_cand = torch.arange(3, device=self.device, dtype=dtype).expand(
1, q, 3
)
mock_candidates = torch.cat(
[i * base_cand for i in range(num_restarts)], dim=0
)
mock_acq_values = num_restarts - torch.arange(
num_restarts, device=self.device, dtype=dtype
)
mock_gen_candidates.return_value = (mock_candidates, mock_acq_values)
bounds = torch.stack(
[
torch.zeros(3, device=self.device, dtype=dtype),
4 * torch.ones(3, device=self.device, dtype=dtype),
]
)
mock_gen_candidates.reset_mock()
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
gen_candidates=mock_gen_candidates,
)
mock_gen_candidates.assert_called_once()
self.assertTrue(torch.equal(candidates, mock_candidates[0]))
self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
cnt += 1
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test generation with provided initial conditions
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
return_best_only=False,
batch_initial_conditions=torch.zeros(
num_restarts, q, 3, device=self.device, dtype=dtype
),
gen_candidates=mock_gen_candidates,
)
self.assertTrue(torch.equal(candidates, mock_candidates))
self.assertTrue(torch.equal(acq_vals, mock_acq_values))
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test fixed features
fixed_features = {0: 0.1}
mock_candidates[:, 0] = 0.1
mock_gen_candidates.return_value = (mock_candidates, mock_acq_values)
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
fixed_features=fixed_features,
gen_candidates=mock_gen_candidates,
)
self.assertEqual(
mock_gen_candidates.call_args[1]["fixed_features"], fixed_features
)
self.assertTrue(torch.equal(candidates, mock_candidates[0]))
cnt += 1
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test trivial case when all features are fixed
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
fixed_features={0: 0.1, 1: 0.2, 2: 0.3},
gen_candidates=mock_gen_candidates,
)
self.assertTrue(
torch.equal(
candidates,
torch.tensor(
[0.1, 0.2, 0.3], device=self.device, dtype=dtype
).expand(3, 3),
)
)
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test OneShotAcquisitionFunction
mock_acq_function = MockOneShotAcquisitionFunction()
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
gen_candidates=mock_gen_candidates,
)
self.assertTrue(
torch.equal(
candidates, mock_acq_function.extract_candidates(mock_candidates[0])
)
)
self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
# verify ValueError
with self.assertRaisesRegex(ValueError, "Must specify"):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=bounds,
q=q,
num_restarts=num_restarts,
options=options,
gen_candidates=mock_gen_candidates,
)
@mock.patch("botorch.optim.optimize.gen_batch_initial_conditions")
@mock.patch("botorch.optim.optimize.gen_candidates_scipy")
@mock.patch("botorch.generation.gen.gen_candidates_torch")
@mock.patch("botorch.optim.utils.common.signature")
def test_optimize_acqf_sequential(
self,
mock_signature,
mock_gen_candidates_torch,
mock_gen_candidates_scipy,
mock_gen_batch_initial_conditions,
timeout_sec=None,
):
for mock_gen_candidates, timeout_sec in product(
[mock_gen_candidates_scipy, mock_gen_candidates_torch], [None, 1e-4]
):
if mock_gen_candidates == mock_gen_candidates_torch:
mock_signature.return_value = signature(gen_candidates_torch)
else:
mock_signature.return_value = signature(gen_candidates_scipy)
mock_gen_candidates.__name__ = "gen_candidates"
q = 3
num_restarts = 2
raw_samples = 10
options = {}
for dtype, use_rounding in ((torch.float, True), (torch.double, False)):
mock_acq_function = MockAcquisitionFunction()
mock_gen_batch_initial_conditions.side_effect = [
torch.zeros(num_restarts, 1, 3, device=self.device, dtype=dtype)
for _ in range(q)
]
gcs_return_vals = [
(
torch.tensor(
[[[1.1, 2.1, 3.1]]], device=self.device, dtype=dtype
),
torch.tensor([i], device=self.device, dtype=dtype),
)
for i in range(q)
]
mock_gen_candidates.side_effect = gcs_return_vals
bounds = torch.stack(
[
torch.zeros(3, device=self.device, dtype=dtype),
4 * torch.ones(3, device=self.device, dtype=dtype),
]
)
if mock_gen_candidates is mock_gen_candidates_scipy:
# x[2] * 4 >= 5
inequality_constraints = [
(torch.tensor([2]), torch.tensor([4]), torch.tensor(5))
]
equality_constraints = [
(torch.tensor([0, 1]), torch.ones(2), torch.tensor(4.0))
]
# gen_candidates_torch does not support constraints
else:
inequality_constraints = None
equality_constraints = None
mock_gen_candidates.reset_mock()
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
post_processing_func=rounding_func if use_rounding else None,
sequential=True,
timeout_sec=timeout_sec,
gen_candidates=mock_gen_candidates,
)
self.assertEqual(mock_gen_candidates.call_count, q)
base_candidates = torch.cat(
[cands[0] for cands, _ in gcs_return_vals], dim=-2
)
if use_rounding:
expected_candidates = base_candidates.round()
expected_val = mock_acq_function(expected_candidates.unsqueeze(-2))
else:
expected_candidates = base_candidates
expected_val = torch.cat([acqval for _, acqval in gcs_return_vals])
self.assertTrue(torch.equal(candidates, expected_candidates))
self.assertTrue(torch.equal(acq_value, expected_val))
# verify error when using a OneShotAcquisitionFunction
with self.assertRaises(NotImplementedError):
optimize_acqf(
acq_function=mock.Mock(spec=OneShotAcquisitionFunction),
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
sequential=True,
)
# Verify error for passing in incorrect bounds
with self.assertRaisesRegex(
ValueError,
"bounds should be a `2 x d` tensor",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds.T,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
sequential=True,
)
# Verify error when using sequential=True in
# conjunction with user-supplied batch_initial_conditions
with self.assertRaisesRegex(
UnsupportedError,
"`batch_initial_conditions` is not supported for sequential "
"optimization. Either avoid specifying `batch_initial_conditions` "
"to use the custom initializer or use the `ic_generator` kwarg to "
"generate initial conditions for the case of "
"nonlinear inequality constraints.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
batch_initial_conditions=torch.zeros((1, 1, 3)),
sequential=True,
)
@mock.patch(
"botorch.generation.gen.minimize_with_timeout",
wraps=minimize_with_timeout,
)
@mock.patch("botorch.optim.utils.timeout.optimize.minimize")
def test_optimize_acqf_timeout(
self, mock_minimize, mock_minimize_with_timeout
) -> None:
"""
Check that the right value of `timeout_sec` is passed to `minimize_with_timeout`
"""
num_restarts = 2
q = 3
dim = 4
for timeout_sec, sequential, expected_call_count, expected_timeout_arg in [
(1.0, True, num_restarts * q, 1.0 / (num_restarts * q)),
(0.0, True, num_restarts * q, 0.0),
(1.0, False, num_restarts, 1.0 / num_restarts),
(0.0, False, num_restarts, 0.0),
]:
with self.subTest(
timeout_sec=timeout_sec,
sequential=sequential,
expected_call_count=expected_call_count,
expected_timeout_arg=expected_timeout_arg,
):
mock_minimize.return_value = OptimizeResult(
{
"x": np.zeros(dim if sequential else dim * q),
"success": True,
"status": 0,
},
)
optimize_acqf(
timeout_sec=timeout_sec,
q=q,
sequential=sequential,
num_restarts=num_restarts,
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
raw_samples=7,
options={"batch_limit": 1},
)
self.assertEqual(
mock_minimize_with_timeout.call_count, expected_call_count
)
timeout_times = torch.tensor(
[
elt.kwargs["timeout_sec"]
for elt in mock_minimize_with_timeout.mock_calls
]
)
self.assertGreaterEqual(timeout_times.min(), 0)
self.assertAllClose(
timeout_times,
torch.full_like(timeout_times, expected_timeout_arg),
rtol=float("inf"),
atol=1e-8,
)
mock_minimize_with_timeout.reset_mock()
def test_optimize_acqf_sequential_notimplemented(self):
# Sequential acquisition function optimization only supported
# when return_best_only=True
with self.assertRaises(NotImplementedError):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
q=3,
num_restarts=2,
raw_samples=10,
return_best_only=False,
sequential=True,
)
def test_optimize_acqf_sequential_q_constraint_notimplemented(self):
# Sequential acquisition function not supported with q-constraints
with self.assertRaises(UnsupportedError):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
equality_constraints=[
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor(
[1.0, -1.0], device=self.device, dtype=torch.float64
),
0,
),
],
q=3,
num_restarts=2,
raw_samples=10,
return_best_only=True,
sequential=True,
)
with self.assertRaises(UnsupportedError):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
inequality_constraints=[
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor(
[1.0, -1.0], device=self.device, dtype=torch.float64
),
0,
),
],
q=3,
num_restarts=2,
raw_samples=10,
return_best_only=True,
sequential=True,
)
def test_optimize_acqf_batch_limit(self) -> None:
num_restarts = 3
raw_samples = 5
dim = 4
q = 4
batch_limit = 2
options = {"batch_limit": batch_limit}
initial_conditions = [
torch.ones(shape) for shape in [(1, 2, dim), (2, 1, dim), (1, dim)]
] + [None]
for gen_candidates, ics in zip(
[gen_candidates_scipy, gen_candidates_torch], initial_conditions
):
with self.subTest(gen_candidates=gen_candidates, initial_conditions=ics):
_, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
return_best_only=False,
gen_candidates=gen_candidates,
batch_initial_conditions=ics,
)
expected_shape = (num_restarts,) if ics is None else (ics.shape[0],)
self.assertEqual(acq_value_list.shape, expected_shape)
def test_optimize_acqf_runs_given_batch_initial_conditions(self):
num_restarts, raw_samples, dim = 1, 2, 3
opt_x = 2 / np.pi
# -x[i] * 1 >= -opt_x * 1.01 => x[i] <= opt_x * 1.01
inequality_constraints = [
(torch.tensor([i]), -torch.tensor([1]), -opt_x * 1.01) for i in range(dim)
] + [
# x[i] * 1 >= opt_x * .99
(torch.tensor([i]), torch.tensor([1]), opt_x * 0.99)
for i in range(dim)
]
q = 1
ic_shapes = [(1, 2, dim), (2, 1, dim), (1, dim)]
torch.manual_seed(0)
for shape in ic_shapes:
with self.subTest(shape=shape):
# start near one (of many) optima
initial_conditions = (opt_x * 1.01) * torch.ones(shape)
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
batch_initial_conditions=initial_conditions,
inequality_constraints=inequality_constraints,
)
self.assertAllClose(
batch_candidates,
opt_x * torch.ones_like(batch_candidates),
# must be at least 50% closer to the optimum than it started
atol=0.004,
rtol=0.005,
)
self.assertAlmostEqual(acq_value_list.item(), 1, places=3)
def test_optimize_acqf_wrong_ic_shape_inequality_constraints(self) -> None:
dim = 3
ic_shapes = [(1, 2, dim + 1), (1, 2, dim, 1), (1, dim + 1), (1, 1), (dim,)]
for shape in ic_shapes:
with self.subTest(shape=shape):
initial_conditions = torch.ones(shape)
expected_error = (
rf"batch_initial_conditions.shape\[-1\] must be {dim}\."
if len(shape) in (2, 3)
else r"batch_initial_conditions must be 2\-dimensional or "
)
with self.assertRaisesRegex(ValueError, expected_error):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=4,
batch_initial_conditions=initial_conditions,
num_restarts=1,
)
def test_optimize_acqf_warns_on_opt_failure(self):
"""
Test error handling in `scipy.optimize.minimize`.
Expected behavior is that a warning is raised when optimization fails
in `scipy.optimize.minimize`, and then it restarts and tries again.
This is a test case cooked up to fail. It is trying to optimize
sin(1/x), which is pathological near zero, given a starting point near
zero.
"""
num_restarts, raw_samples, dim = 1, 1, 1
initial_conditions = 1e-8 * torch.ones((num_restarts, raw_samples, dim))
torch.manual_seed(0)
with warnings.catch_warnings(record=True) as ws:
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
batch_initial_conditions=initial_conditions,
)
message = (
"Optimization failed in `gen_candidates_scipy` with the following "
"warning(s):\n[OptimizationWarning('Optimization failed within "
"`scipy.optimize.minimize` with status 2 and message ABNORMAL_TERMINATION"
"_IN_LNSRCH.')]\nBecause you specified `batch_initial_conditions`, "
"optimization will not be retried with new initial conditions and will "
"proceed with the current solution. Suggested remediation: Try again with "
"different `batch_initial_conditions`, or don't provide "
"`batch_initial_conditions.`"
)
expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message in str(w.message)
for w in ws
)
)
self.assertTrue(expected_warning_raised)
def test_optimize_acqf_successfully_restarts_on_opt_failure(self):
"""
Test that `optimize_acqf` can succeed after restarting on opt failure.
With the given seed (5), `optimize_acqf` will choose an initial
condition that causes failure in the first run of
`gen_candidates_scipy`, then re-tries with a new starting point and
succeed.
Also tests that this can be turned off by setting
`retry_on_optimization_warning = False`.
"""
num_restarts, raw_samples, dim = 1, 1, 1
bounds = torch.stack(
[
-1 * torch.ones(dim, dtype=torch.double),
torch.ones(dim, dtype=torch.double),
]
)
torch.manual_seed(5)
with warnings.catch_warnings(record=True) as ws:
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
# shorten the line search to make it faster and make failure
# more likely
options={"maxls": 2},
)
message = (
"Optimization failed in `gen_candidates_scipy` with the following "
"warning(s):\n[OptimizationWarning('Optimization failed within "
"`scipy.optimize.minimize` with status 2 and message ABNORMAL_TERMINATION"
"_IN_LNSRCH.')]\nTrying again with a new set of initial conditions."
)
expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message in str(w.message)
for w in ws
)
)
self.assertTrue(expected_warning_raised)
# check if it succeeded on restart -- the maximum value of sin(1/x) is 1
self.assertAlmostEqual(acq_value_list.item(), 1.0)
# Test with retry_on_optimization_warning = False.
torch.manual_seed(5)
with warnings.catch_warnings(record=True) as ws:
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
# shorten the line search to make it faster and make failure
# more likely
options={"maxls": 2},
retry_on_optimization_warning=False,
)
expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message in str(w.message)
for w in ws
)
)
self.assertFalse(expected_warning_raised)
def test_optimize_acqf_warns_on_second_opt_failure(self):
"""
Test that `optimize_acqf` warns if it fails on a second optimization try.
With the given seed (230), `optimize_acqf` will choose an initial
condition that causes failure in the first run of
`gen_candidates_scipy`, then re-tries and still does not succeed. Since
this doesn't happen with seeds 0 - 229, this test might be broken by
future refactorings affecting calls to `torch`.
"""
num_restarts, raw_samples, dim = 1, 1, 1
bounds = torch.stack(
[
-1 * torch.ones(dim, dtype=torch.double),
torch.ones(dim, dtype=torch.double),
]
)
with warnings.catch_warnings(record=True) as ws:
torch.manual_seed(230)
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
# shorten the line search to make it faster and make failure
# more likely
options={"maxls": 2},
)
message_1 = (
"Optimization failed in `gen_candidates_scipy` with the following "
"warning(s):\n[OptimizationWarning('Optimization failed within "
"`scipy.optimize.minimize` with status 2 and message ABNORMAL_TERMINATION"
"_IN_LNSRCH.')]\nTrying again with a new set of initial conditions."
)
message_2 = (
"Optimization failed on the second try, after generating a new set "
"of initial conditions."
)
first_expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message_1 in str(w.message)
for w in ws
)
)
second_expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message_2 in str(w.message)
for w in ws
)
)
self.assertTrue(first_expected_warning_raised)
self.assertTrue(second_expected_warning_raised)
def test_optimize_acqf_nonlinear_constraints(self):
num_restarts = 2
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
mock_acq_function = SquaredAcquisitionFunction()
bounds = torch.stack(
[torch.zeros(3, **tkwargs), 4 * torch.ones(3, **tkwargs)]
)
# Make sure we find the global optimum [4, 4, 4] without constraints
with torch.random.fork_rng():
torch.manual_seed(0)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
sequential=True,
raw_samples=16,
)
self.assertAllClose(candidates, 4 * torch.ones(1, 3, **tkwargs))
# Constrain the sum to be <= 4 in which case the solution is a
# permutation of [4, 0, 0]
def nlc1(x):
return 4 - x.sum(dim=-1)
batch_initial_conditions = torch.tensor([[[0.5, 0.5, 3]]], **tkwargs)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=batch_initial_conditions,
num_restarts=1,
)
self.assertTrue(
torch.allclose(
torch.sort(candidates).values,
torch.tensor([[0, 0, 4]], **tkwargs),
)
)
self.assertTrue(
torch.allclose(acq_value, torch.tensor([4], **tkwargs), atol=1e-3)
)
# Make sure we return the initial solution if SLSQP fails to return
# a feasible point.
with mock.patch(
"botorch.generation.gen.minimize_with_timeout"
) as mock_minimize:
# By setting "success" to True and "status" to 0, we prevent a
# warning that `minimize` failed, which isn't the behavior
# we're looking to test here.
mock_minimize.return_value = OptimizeResult(
x=np.array([4, 4, 4]), success=True, status=0
)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=batch_initial_conditions,
num_restarts=1,
)
self.assertAllClose(candidates, batch_initial_conditions[0, ...])
# Constrain all variables to be >= 1. The global optimum is 2.45 and
# is attained by some permutation of [1, 1, 2]
def nlc2(x):
return x[..., 0] - 1
def nlc3(x):
return x[..., 1] - 1
def nlc4(x):
return x[..., 2] - 1
with torch.random.fork_rng():
torch.manual_seed(0)
batch_initial_conditions = 1 + 0.33 * torch.rand(
num_restarts, 1, 3, **tkwargs
)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1, nlc2, nlc3, nlc4],
batch_initial_conditions=batch_initial_conditions,
num_restarts=num_restarts,
)
self.assertTrue(
torch.allclose(
torch.sort(candidates).values,
torch.tensor([[1, 1, 2]], **tkwargs),
)
)
self.assertTrue(
torch.allclose(acq_value, torch.tensor(2.45, **tkwargs), atol=1e-3)
)
with torch.random.fork_rng():
torch.manual_seed(0)
batch_initial_conditions = torch.rand(num_restarts, 1, 3, **tkwargs)
batch_initial_conditions[..., 0] = 2
# test with fixed features
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1, nlc2],
batch_initial_conditions=batch_initial_conditions,
num_restarts=num_restarts,
fixed_features={0: 2},
)
self.assertEqual(candidates[0, 0], 2.0)
self.assertTrue(
torch.allclose(
torch.sort(candidates).values,
torch.tensor([[0, 2, 2]], **tkwargs),
)
)
self.assertTrue(
torch.allclose(acq_value, torch.tensor(2.8284, **tkwargs), atol=1e-3)
)
# Test that an ic_generator object with the same API as
# gen_batch_initial_conditions returns candidates of the
# required shape.
with mock.patch(
"botorch.optim.optimize.gen_batch_initial_conditions"
) as ic_generator:
ic_generator.return_value = batch_initial_conditions
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=3,
nonlinear_inequality_constraints=[nlc1],
num_restarts=1,
ic_generator=ic_generator,
)
self.assertEqual(candidates.size(), torch.Size([1, 3]))
# Constraints must be passed in as lists
with self.assertRaisesRegex(
ValueError,
"`nonlinear_inequality_constraints` must be a list of callables, "
"got <class 'function'>.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=nlc1,
num_restarts=num_restarts,
batch_initial_conditions=batch_initial_conditions,
)
# batch_initial_conditions must be feasible
with self.assertRaisesRegex(
ValueError,
"`batch_initial_conditions` must satisfy the non-linear "
"inequality constraints.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
num_restarts=num_restarts,
batch_initial_conditions=4 * torch.ones(1, 1, 3, **tkwargs),
)
# Explicitly setting batch_limit to be >1 should raise
with self.assertRaisesRegex(
ValueError,
"`batch_limit` must be 1 when non-linear inequality constraints "
"are given.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=torch.rand(5, 1, 3, **tkwargs),
num_restarts=5,
options={"batch_limit": 5},
)
# If there are non-linear inequality constraints an initial condition
# generator object `ic_generator` must be supplied.
with self.assertRaisesRegex(
RuntimeError,
"`ic_generator` must be given if "
"there are non-linear inequality constraints.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
num_restarts=1,
raw_samples=16,
)
@mock.patch("botorch.generation.gen.gen_candidates_torch")
@mock.patch("botorch.optim.optimize.gen_batch_initial_conditions")
@mock.patch("botorch.optim.optimize.gen_candidates_scipy")
@mock.patch("botorch.optim.utils.common.signature")
def test_optimize_acqf_non_linear_constraints_sequential(
self,
mock_signature,
mock_gen_candidates_scipy,
mock_gen_batch_initial_conditions,
mock_gen_candidates_torch,
):
def nlc(x):
return 4 * x[..., 2] - 5
q = 3
num_restarts = 2
raw_samples = 10
options = {}
for mock_gen_candidates in (
mock_gen_candidates_torch,
mock_gen_candidates_scipy,
):
if mock_gen_candidates == mock_gen_candidates_torch:
mock_signature.return_value = signature(gen_candidates_torch)
else:
mock_signature.return_value = signature(gen_candidates_scipy)
for dtype in (torch.float, torch.double):
mock_acq_function = MockAcquisitionFunction()
mock_gen_batch_initial_conditions.side_effect = [
torch.zeros(num_restarts, 1, 3, device=self.device, dtype=dtype)
for _ in range(q)
]
gcs_return_vals = [
(
torch.tensor(
[[[1.0, 2.0, 3.0]]], device=self.device, dtype=dtype
),
torch.tensor([i], device=self.device, dtype=dtype),
)
# for nonlinear inequality constraints the batch_limit variable is
# currently set to 1 by default and hence gen_candidates_scipy is
# called num_restarts*q times
for i in range(num_restarts * q)
]
mock_gen_candidates.side_effect = gcs_return_vals
expected_candidates = torch.cat(
[cands[0] for cands, _ in gcs_return_vals[::num_restarts]], dim=-2
)
bounds = torch.stack(
[
torch.zeros(3, device=self.device, dtype=dtype),
4 * torch.ones(3, device=self.device, dtype=dtype),
]
)
with warnings.catch_warnings(record=True) as ws:
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
nonlinear_inequality_constraints=[nlc],
sequential=True,
ic_generator=mock_gen_batch_initial_conditions,
gen_candidates=mock_gen_candidates,
)
if mock_gen_candidates == mock_gen_candidates_torch:
self.assertEqual(len(ws), 3)
message = (
"Keyword arguments ['nonlinear_inequality_constraints']"
" will be ignored because they are not allowed parameters for"
" function gen_candidates. Allowed parameters are "
" ['initial_conditions', 'acquisition_function', "
"'lower_bounds', 'upper_bounds', 'optimizer', 'options',"
" 'callback', 'fixed_features', 'timeout_sec']."
)
expected_warning_raised = (
issubclass(w.category, UserWarning)
and message == str(w.message)
for w in ws
)
self.assertTrue(expected_warning_raised)
# check message
else:
self.assertEqual(len(ws), 0)
self.assertTrue(torch.equal(candidates, expected_candidates))
# Extract the relevant entries from gcs_return_vals to
# perform comparison with.
self.assertTrue(
torch.equal(
acq_value,
torch.cat(
[
expected_acq_value
for _, expected_acq_value in gcs_return_vals[
num_restarts - 1 :: num_restarts
]
]
),
),
)
def test_constraint_caching(self):
def nlc(x):
return 4 - x.sum(dim=-1)
class FunWrapperWithCallCount:
def __init__(self):
self.call_count = 0
def __call__(self, x, f):
self.call_count += 1
X = torch.from_numpy(x).view(-1).contiguous().requires_grad_(True)
loss = f(X).sum()
gradf = _arrayify(torch.autograd.grad(loss, X)[0].contiguous().view(-1))
return loss.item(), gradf
f_np_wrapper = FunWrapperWithCallCount()
f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints(
f_np_wrapper=f_np_wrapper, nlc=nlc
)
x1, x2 = np.array([1.0, 0.5, 0.25]), np.array([1.0, 0.5, 0.5])
# Call f_obj once, this requires calling f_np_wrapper
self.assertEqual(f_obj(x1), 2.25)
self.assertEqual(f_np_wrapper.call_count, 1)
# Call f_obj again, we should use the cached value this time
self.assertEqual(f_obj(x1), 2.25)
self.assertEqual(f_np_wrapper.call_count, 1)
# Call f_grad, we should use the cached value here as well
self.assertTrue(np.array_equal(f_grad(x1), -np.ones(3)))
self.assertEqual(f_np_wrapper.call_count, 1)
# Call f_grad with a new input
self.assertTrue(np.array_equal(f_grad(x2), -np.ones(3)))
self.assertEqual(f_np_wrapper.call_count, 2)
# Call f_obj on the new input, should use the cache
self.assertEqual(f_obj(x2), 2.0)
self.assertEqual(f_np_wrapper.call_count, 2)
class TestOptimizeAcqfCyclic(BotorchTestCase):
@mock.patch("botorch.optim.optimize._optimize_acqf") # noqa: C901
# TODO: make sure this runs without mock
def test_optimize_acqf_cyclic(self, mock_optimize_acqf):
num_restarts = 2
raw_samples = 10
num_cycles = 2
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
inequality_constraints = [
[torch.tensor([2], dtype=int), torch.tensor([4.0]), torch.tensor(5.0)]
]
mock_acq_function = MockAcquisitionFunction()
for q, dtype in itertools.product([1, 3], (torch.float, torch.double)):
tkwargs["dtype"] = dtype
inequality_constraints = [
(
# indices can't be floats or doubles
inequality_constraints[0][0],
inequality_constraints[0][1].to(**tkwargs),
inequality_constraints[0][2].to(**tkwargs),
)
]
mock_optimize_acqf.reset_mock()
bounds = bounds.to(**tkwargs)
candidate_rvs = []
acq_val_rvs = []
for cycle_j in range(num_cycles):
gcs_return_vals = [
(torch.rand(1, 3, **tkwargs), torch.rand(1, **tkwargs))
for _ in range(q)
]
if cycle_j == 0:
# return `q` candidates for first call
candidate_rvs.append(
torch.cat([rv[0] for rv in gcs_return_vals], dim=-2)
)
acq_val_rvs.append(torch.cat([rv[1] for rv in gcs_return_vals]))
else:
# return 1 candidate for subsequent calls
for rv in gcs_return_vals:
candidate_rvs.append(rv[0])
acq_val_rvs.append(rv[1])
mock_optimize_acqf.side_effect = list(zip(candidate_rvs, acq_val_rvs))
orig_candidates = candidate_rvs[0].clone()
# wrap the set_X_pending method for checking that call arguments
with mock.patch.object(
MockAcquisitionFunction,
"set_X_pending",
wraps=mock_acq_function.set_X_pending,
) as mock_set_X_pending:
candidates, acq_value = optimize_acqf_cyclic(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
post_processing_func=rounding_func,
cyclic_options={"maxiter": num_cycles},
)
# check that X_pending is set correctly in cyclic optimization
if q > 1:
x_pending_call_args_list = mock_set_X_pending.call_args_list
idxr = torch.ones(q, dtype=torch.bool, device=self.device)
for i in range(len(x_pending_call_args_list) - 1):
idxr[i] = 0
self.assertTrue(
torch.equal(
x_pending_call_args_list[i][0][0], orig_candidates[idxr]
)
)
idxr[i] = 1
orig_candidates[i] = candidate_rvs[i + 1]
# check reset to base_X_pendingg
self.assertIsNone(x_pending_call_args_list[-1][0][0])
else:
mock_set_X_pending.assert_not_called()
# check final candidates
expected_candidates = (
torch.cat(candidate_rvs[-q:], dim=0) if q > 1 else candidate_rvs[0]
)
self.assertTrue(torch.equal(candidates, expected_candidates))
# check call arguments for optimize_acqf
call_args_list = mock_optimize_acqf.call_args_list
expected_call_args = {
"acq_function": mock_acq_function,
"bounds": bounds,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": inequality_constraints,
"equality_constraints": None,
"fixed_features": None,
"post_processing_func": rounding_func,
"return_best_only": True,
"sequential": True,
}
orig_candidates = candidate_rvs[0].clone()
for i in range(len(call_args_list)):
if i == 0:
# first cycle
expected_call_args.update(
{"batch_initial_conditions": None, "q": q}
)
else:
expected_call_args.update(
{"batch_initial_conditions": orig_candidates[i - 1 : i], "q": 1}
)
orig_candidates[i - 1] = candidate_rvs[i]
for k, v in call_args_list[i][1].items():
if torch.is_tensor(v):
self.assertTrue(torch.equal(expected_call_args[k], v))
elif k == "acq_function":
self.assertIsInstance(
mock_acq_function, MockAcquisitionFunction
)
else:
self.assertEqual(expected_call_args[k], v)
class TestOptimizeAcqfList(BotorchTestCase):
@mock.patch("botorch.optim.optimize.optimize_acqf") # noqa: C901
@mock.patch("botorch.optim.optimize.optimize_acqf_mixed")
def test_optimize_acqf_list(self, mock_optimize_acqf, mock_optimize_acqf_mixed):
num_restarts = 2
raw_samples = 10
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
inequality_constraints = [
[torch.tensor([3]), torch.tensor([4]), torch.tensor(5)]
]
# reinitialize so that dtype
mock_acq_function_1 = MockAcquisitionFunction()
mock_acq_function_2 = MockAcquisitionFunction()
mock_acq_function_list = [mock_acq_function_1, mock_acq_function_2]
fixed_features_list = [None, [{0: 0.5}]]
for ffl in fixed_features_list:
for num_acqf, dtype in itertools.product(
[1, 2], (torch.float, torch.double)
):
for m in mock_acq_function_list:
# clear previous X_pending
m.set_X_pending(None)
tkwargs["dtype"] = dtype
inequality_constraints[0] = [
t.to(**tkwargs) for t in inequality_constraints[0]
]
mock_optimize_acqf.reset_mock()
mock_optimize_acqf_mixed.reset_mock()
bounds = bounds.to(**tkwargs)
candidate_rvs = []
acq_val_rvs = []
gcs_return_vals = [
(torch.rand(1, 3, **tkwargs), torch.rand(1, **tkwargs))
for _ in range(num_acqf)
]
for rv in gcs_return_vals:
candidate_rvs.append(rv[0])
acq_val_rvs.append(rv[1])
side_effect = list(zip(candidate_rvs, acq_val_rvs))
mock_optimize_acqf.side_effect = side_effect
mock_optimize_acqf_mixed.side_effect = side_effect
orig_candidates = candidate_rvs[0].clone()
# Wrap the set_X_pending method for checking that call arguments
with mock.patch.object(
MockAcquisitionFunction,
"set_X_pending",
wraps=mock_acq_function_1.set_X_pending,
) as mock_set_X_pending_1, mock.patch.object(
MockAcquisitionFunction,
"set_X_pending",
wraps=mock_acq_function_2.set_X_pending,
) as mock_set_X_pending_2:
candidates, _ = optimize_acqf_list(
acq_function_list=mock_acq_function_list[:num_acqf],
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
post_processing_func=rounding_func,
fixed_features_list=ffl,
)
# check that X_pending is set correctly in sequential optimization
if num_acqf > 1:
x_pending_call_args_list = mock_set_X_pending_2.call_args_list
idxr = torch.ones(
num_acqf, dtype=torch.bool, device=self.device
)
for i in range(len(x_pending_call_args_list) - 1):
idxr[i] = 0
self.assertTrue(
torch.equal(
x_pending_call_args_list[i][0][0],
orig_candidates[idxr],
)
)
idxr[i] = 1
orig_candidates[i] = candidate_rvs[i + 1]
else:
mock_set_X_pending_1.assert_not_called()
# check final candidates
expected_candidates = (
torch.cat(candidate_rvs[-num_acqf:], dim=0)
if num_acqf > 1
else candidate_rvs[0]
)
self.assertTrue(torch.equal(candidates, expected_candidates))
# check call arguments for optimize_acqf
if ffl is None:
call_args_list = mock_optimize_acqf.call_args_list
expected_call_args = {
"acq_function": None,
"bounds": bounds,
"q": 1,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": inequality_constraints,
"equality_constraints": None,
"fixed_features": None,
"post_processing_func": rounding_func,
"batch_initial_conditions": None,
"return_best_only": True,
"sequential": False,
}
else:
call_args_list = mock_optimize_acqf_mixed.call_args_list
expected_call_args = {
"acq_function": None,
"bounds": bounds,
"q": 1,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": inequality_constraints,
"equality_constraints": None,
"post_processing_func": rounding_func,
"batch_initial_conditions": None,
"fixed_features_list": ffl,
}
for i in range(len(call_args_list)):
expected_call_args["acq_function"] = mock_acq_function_list[i]
for k, v in call_args_list[i][1].items():
if torch.is_tensor(v):
self.assertTrue(torch.equal(expected_call_args[k], v))
elif k == "acq_function":
self.assertIsInstance(
mock_acq_function_list[i], MockAcquisitionFunction
)
else:
self.assertEqual(expected_call_args[k], v)
def test_optimize_acqf_list_empty_list(self):
with self.assertRaises(ValueError):
optimize_acqf_list(
acq_function_list=[],
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
)
def test_optimize_acqf_list_fixed_features(self):
with self.assertRaises(ValueError):
optimize_acqf_list(
acq_function_list=[
MockAcquisitionFunction(),
MockAcquisitionFunction(),
],
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
fixed_features_list=[{0: 0.5}],
fixed_features={0: 0.5},
)
class TestOptimizeAcqfMixed(BotorchTestCase):
@mock.patch("botorch.optim.optimize.optimize_acqf") # noqa: C901
def test_optimize_acqf_mixed_q1(self, mock_optimize_acqf):
num_restarts = 2
raw_samples = 10
q = 1
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
mock_acq_function = MockAcquisitionFunction()
for num_ff, dtype in itertools.product([1, 3], (torch.float, torch.double)):
tkwargs["dtype"] = dtype
mock_optimize_acqf.reset_mock()
bounds = bounds.to(**tkwargs)
candidate_rvs = []
acq_val_rvs = []
for _ in range(num_ff):
candidate_rvs.append(torch.rand(1, 3, **tkwargs))
acq_val_rvs.append(torch.rand(1, **tkwargs))
fixed_features_list = [{i: i * 0.1} for i in range(num_ff)]
side_effect = list(zip(candidate_rvs, acq_val_rvs))
mock_optimize_acqf.side_effect = side_effect
candidates, acq_value = optimize_acqf_mixed(
acq_function=mock_acq_function,
q=q,
fixed_features_list=fixed_features_list,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
post_processing_func=rounding_func,
)
# compute expected output
ff_acq_values = torch.stack(acq_val_rvs)
best = torch.argmax(ff_acq_values)
expected_candidates = candidate_rvs[best]
expected_acq_value = ff_acq_values[best]
self.assertTrue(torch.equal(candidates, expected_candidates))
self.assertTrue(torch.equal(acq_value, expected_acq_value))
# check call arguments for optimize_acqf
call_args_list = mock_optimize_acqf.call_args_list
expected_call_args = {
"acq_function": None,
"bounds": bounds,
"q": q,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": None,
"equality_constraints": None,
"fixed_features": None,
"post_processing_func": rounding_func,
"batch_initial_conditions": None,
"return_best_only": True,
"sequential": False,
"ic_generator": None,
"nonlinear_inequality_constraints": None,
}
for i in range(len(call_args_list)):
expected_call_args["fixed_features"] = fixed_features_list[i]
for k, v in call_args_list[i][1].items():
if torch.is_tensor(v):
self.assertTrue(torch.equal(expected_call_args[k], v))
elif k == "acq_function":
self.assertIsInstance(v, MockAcquisitionFunction)
else:
self.assertEqual(expected_call_args[k], v)
@mock.patch("botorch.optim.optimize.optimize_acqf") # noqa: C901
def test_optimize_acqf_mixed_q2(self, mock_optimize_acqf):
num_restarts = 2
raw_samples = 10
q = 2
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
mock_acq_functions = [
MockAcquisitionFunction(),
MockOneShotEvaluateAcquisitionFunction(),
]
for num_ff, dtype, mock_acq_function in itertools.product(
[1, 3], (torch.float, torch.double), mock_acq_functions
):
tkwargs["dtype"] = dtype
mock_optimize_acqf.reset_mock()
bounds = bounds.to(**tkwargs)
fixed_features_list = [{i: i * 0.1} for i in range(num_ff)]
candidate_rvs, exp_candidates, acq_val_rvs = [], [], []
# generate mock side effects and compute expected outputs
for _ in range(q):
candidate_rvs_q = [torch.rand(1, 3, **tkwargs) for _ in range(num_ff)]
acq_val_rvs_q = [torch.rand(1, **tkwargs) for _ in range(num_ff)]
best = torch.argmax(torch.stack(acq_val_rvs_q))
exp_candidates.append(candidate_rvs_q[best])
candidate_rvs += candidate_rvs_q
acq_val_rvs += acq_val_rvs_q
side_effect = list(zip(candidate_rvs, acq_val_rvs))
mock_optimize_acqf.side_effect = side_effect
candidates, acq_value = optimize_acqf_mixed(
acq_function=mock_acq_function,
q=q,
fixed_features_list=fixed_features_list,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
post_processing_func=rounding_func,
)
expected_candidates = torch.cat(exp_candidates, dim=-2)
if isinstance(mock_acq_function, MockOneShotEvaluateAcquisitionFunction):
expected_acq_value = mock_acq_function.evaluate(
expected_candidates, bounds=bounds
)
else:
expected_acq_value = mock_acq_function(expected_candidates)
self.assertTrue(torch.equal(candidates, expected_candidates))
self.assertTrue(torch.equal(acq_value, expected_acq_value))
def test_optimize_acqf_mixed_empty_ff(self):
with self.assertRaises(ValueError):
mock_acq_function = MockAcquisitionFunction()
optimize_acqf_mixed(
acq_function=mock_acq_function,
q=1,
fixed_features_list=[],
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
)
def test_optimize_acqf_one_shot_large_q(self):
with self.assertRaises(ValueError):
mock_acq_function = MockOneShotAcquisitionFunction()
fixed_features_list = [{i: i * 0.1} for i in range(2)]
optimize_acqf_mixed(
acq_function=mock_acq_function,
q=2,
fixed_features_list=fixed_features_list,
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
)
class TestOptimizeAcqfDiscrete(BotorchTestCase):
def test_optimize_acqf_discrete(self):
for q, dtype in itertools.product((1, 2), (torch.float, torch.double)):
tkwargs = {"device": self.device, "dtype": dtype}
mock_acq_function = SquaredAcquisitionFunction()
mock_acq_function.set_X_pending(None)
# ensure proper raising of errors if no choices
with self.assertRaisesRegex(InputDataError, "`choices` must be non-emtpy."):
optimize_acqf_discrete(
acq_function=mock_acq_function,
q=q,
choices=torch.empty(0, 2),
)
choices = torch.rand(5, 2, **tkwargs)
# warning for unsupported keyword arguments
with self.assertWarnsRegex(
DeprecationWarning,
r"`optimize_acqf_discrete` does not support arguments "
r"\['num_restarts'\]. In the future, this will become an error.",
):
optimize_acqf_discrete(
acq_function=mock_acq_function, q=q, choices=choices, num_restarts=8
)
exp_acq_vals = mock_acq_function(choices)
# test unique
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function,
q=q,
choices=choices,
)
best_idcs = torch.topk(exp_acq_vals, q).indices
expected_candidates = choices[best_idcs]
expected_acq_value = exp_acq_vals[best_idcs].reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
# test non-unique (test does not properly use pending points)
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function, q=q, choices=choices, unique=False
)
best_idx = torch.argmax(exp_acq_vals)
expected_candidates = choices[best_idx].repeat(q, 1)
expected_acq_value = exp_acq_vals[best_idx].repeat(q).reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
# test max_batch_limit
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function, q=q, choices=choices, max_batch_size=3
)
best_idcs = torch.topk(exp_acq_vals, q).indices
expected_candidates = choices[best_idcs]
expected_acq_value = exp_acq_vals[best_idcs].reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
# test max_batch_limit & unique
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function,
q=q,
choices=choices,
unique=False,
max_batch_size=3,
)
best_idx = torch.argmax(exp_acq_vals)
expected_candidates = choices[best_idx].repeat(q, 1)
expected_acq_value = exp_acq_vals[best_idx].repeat(q).reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
with self.assertRaises(UnsupportedError):
acqf = MockOneShotAcquisitionFunction()
optimize_acqf_discrete(
acq_function=acqf,
q=1,
choices=torch.tensor([[0.5], [0.2]]),
)
def test_optimize_acqf_discrete_local_search(self):
for q, dtype in itertools.product((1, 2), (torch.float, torch.double)):
tkwargs = {"device": self.device, "dtype": dtype}
mock_acq_function = SquaredAcquisitionFunction()
mock_acq_function.set_X_pending(None)
discrete_choices = [
torch.tensor([0, 1, 6], **tkwargs),
torch.tensor([2, 3, 4], **tkwargs),
torch.tensor([5, 6, 9], **tkwargs),
]
# make sure we can find the global optimum
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
raw_samples=1,
num_restarts=1,
)
self.assertTrue(
torch.allclose(candidates[0], torch.tensor([6, 4, 9], **tkwargs))
)
if q > 1: # there are three local minima
self.assertTrue(
torch.allclose(candidates[1], torch.tensor([6, 3, 9], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([1, 4, 9], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([6, 4, 6], **tkwargs))
)
# same but with unique=False
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
raw_samples=1,
num_restarts=1,
unique=False,
)
expected_candidates = torch.tensor([[6, 4, 9], [6, 4, 9]], **tkwargs)
self.assertAllClose(candidates, expected_candidates[:q])
# test X_avoid and batch_initial_conditions
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
X_avoid=torch.tensor([[6, 4, 9]], **tkwargs),
batch_initial_conditions=torch.tensor([[0, 2, 5]], **tkwargs).unsqueeze(
1
),
)
self.assertTrue(
torch.allclose(candidates[0], torch.tensor([6, 3, 9], **tkwargs))
)
if q > 1: # there are two local minima
self.assertTrue(
torch.allclose(candidates[1], torch.tensor([6, 2, 9], **tkwargs))
)
# test inequality constraints
inequality_constraints = [
(
torch.tensor([2], device=self.device),
-1 * torch.ones(1, **tkwargs),
-6 * torch.ones(1, **tkwargs),
)
]
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
raw_samples=1,
num_restarts=1,
inequality_constraints=inequality_constraints,
)
self.assertTrue(
torch.allclose(candidates[0], torch.tensor([6, 4, 6], **tkwargs))
)
if q > 1: # there are three local minima
self.assertTrue(
torch.allclose(candidates[1], torch.tensor([6, 4, 5], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([6, 3, 6], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([1, 4, 6], **tkwargs))
)
# make sure we break if there are no neighbors
optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=[
torch.tensor([0, 1], **tkwargs),
torch.tensor([1], **tkwargs),
],
raw_samples=1,
num_restarts=1,
)
# test _filter_infeasible
X = torch.tensor([[0, 2, 5], [0, 2, 6], [0, 2, 9]], **tkwargs)
X_filtered = _filter_infeasible(
X=X, inequality_constraints=inequality_constraints
)
self.assertAllClose(X[:2], X_filtered)
# test _filter_invalid
X_filtered = _filter_invalid(X=X, X_avoid=X[1].unsqueeze(0))
self.assertAllClose(X[[0, 2]], X_filtered)
X_filtered = _filter_invalid(X=X, X_avoid=X[[0, 2]])
self.assertAllClose(X[1].unsqueeze(0), X_filtered)
# test _generate_neighbors
X_loc = _generate_neighbors(
x=torch.tensor([0, 2, 6], **tkwargs).unsqueeze(0),
discrete_choices=discrete_choices,
X_avoid=torch.tensor([[0, 3, 6], [0, 2, 5]], **tkwargs),
inequality_constraints=inequality_constraints,
)
self.assertTrue(
torch.allclose(
X_loc, torch.tensor([[1, 2, 6], [6, 2, 6], [0, 4, 6]], **tkwargs)
)
)
# test _gen_batch_initial_conditions_local_search
with self.assertRaisesRegex(RuntimeError, "Failed to generate"):
_gen_batch_initial_conditions_local_search(
discrete_choices=discrete_choices,
raw_samples=1,
X_avoid=torch.zeros(0, 3, **tkwargs),
inequality_constraints=[],
min_points=30,
)
X = _gen_batch_initial_conditions_local_search(
discrete_choices=discrete_choices,
raw_samples=1,
X_avoid=torch.zeros(0, 3, **tkwargs),
inequality_constraints=[],
min_points=20,
)
self.assertEqual(len(X), 20)
self.assertAllClose(torch.unique(X, dim=0), X)
def test_no_precision_loss_with_fixed_features(self) -> None:
acqf = SquaredAcquisitionFunction()
val = 1e-1
fixed_features_list = [{0: val}]
bounds = torch.stack(
[torch.zeros(2, dtype=torch.float64), torch.ones(2, dtype=torch.float64)]
)
candidate, _ = optimize_acqf_mixed(
acqf,
bounds=bounds,
q=1,
num_restarts=1,
raw_samples=1,
fixed_features_list=fixed_features_list,
)
self.assertEqual(candidate[0, 0].item(), val)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from contextlib import ExitStack
from itertools import product
from random import random
from typing import Optional
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.analytic import PosteriorMean
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
)
from botorch.acquisition.multi_objective.monte_carlo import (
qNoisyExpectedHypervolumeImprovement,
)
from botorch.exceptions import BadInitialCandidatesWarning, SamplingWarning
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import SingleTaskGP
from botorch.optim import initialize_q_batch, initialize_q_batch_nonneg
from botorch.optim.initializers import (
gen_batch_initial_conditions,
gen_one_shot_kg_initial_conditions,
gen_value_function_initial_conditions,
sample_perturbed_subset_dims,
sample_points_around_best,
sample_q_batches_from_polytope,
sample_truncated_normal_perturbations,
transform_constraints,
transform_inter_point_constraint,
transform_intra_point_constraint,
)
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.sampling import draw_sobol_samples, manual_seed
from botorch.utils.testing import (
_get_max_violation_of_bounds,
_get_max_violation_of_constraints,
BotorchTestCase,
MockAcquisitionFunction,
MockModel,
MockPosterior,
)
class TestBoundsAndConstraintCheckers(BotorchTestCase):
def test_bounds_check(self) -> None:
bounds = torch.tensor([[1, 2], [3, 4]], device=self.device)
samples = torch.tensor([[2, 3], [2, 3.1]], device=self.device)[None, :, :]
result = _get_max_violation_of_bounds(samples, bounds)
self.assertAlmostEqual(result, -0.9, delta=1e-6)
samples = torch.tensor([[2, 3], [2, 4.1]], device=self.device)[None, :, :]
result = _get_max_violation_of_bounds(samples, bounds)
self.assertAlmostEqual(result, 0.1, delta=1e-6)
def test_constraint_check(self) -> None:
constraints = [
(
torch.tensor([1], device=self.device),
torch.tensor([1.0], device=self.device),
3,
)
]
samples = torch.tensor([[2, 3], [2, 3.1]], device=self.device)[None, :, :]
result = _get_max_violation_of_constraints(samples, constraints, equality=True)
self.assertAlmostEqual(result, 0.1, delta=1e-6)
result = _get_max_violation_of_constraints(samples, constraints, equality=False)
self.assertAlmostEqual(result, 0.0, delta=1e-6)
class TestInitializeQBatch(BotorchTestCase):
def test_initialize_q_batch_nonneg(self):
for dtype in (torch.float, torch.double):
# basic test
X = torch.rand(5, 3, 4, device=self.device, dtype=dtype)
Y = torch.rand(5, device=self.device, dtype=dtype)
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# ensure nothing happens if we want all samples
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=5)
self.assertTrue(torch.equal(X, ics))
# make sure things work with constant inputs
Y = torch.ones(5, device=self.device, dtype=dtype)
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# ensure raises correct warning
Y = torch.zeros(5, device=self.device, dtype=dtype)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BadInitialCandidatesWarning))
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
with self.assertRaises(RuntimeError):
initialize_q_batch_nonneg(X=X, Y=Y, n=10)
# test less than `n` positive acquisition values
Y = torch.arange(5, device=self.device, dtype=dtype) - 3
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# check that we chose the point with the positive acquisition value
self.assertTrue(torch.equal(ics[0], X[-1]) or torch.equal(ics[1], X[-1]))
# test less than `n` alpha_pos values
Y = torch.arange(5, device=self.device, dtype=dtype)
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2, alpha=1.0)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
def test_initialize_q_batch(self):
for dtype in (torch.float, torch.double):
for batch_shape in (torch.Size(), [3, 2], (2,), torch.Size([2, 3, 4]), []):
# basic test
X = torch.rand(5, *batch_shape, 3, 4, device=self.device, dtype=dtype)
Y = torch.rand(5, *batch_shape, device=self.device, dtype=dtype)
ics = initialize_q_batch(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, *batch_shape, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# ensure nothing happens if we want all samples
ics = initialize_q_batch(X=X, Y=Y, n=5)
self.assertTrue(torch.equal(X, ics))
# ensure raises correct warning
Y = torch.zeros(5, device=self.device, dtype=dtype)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
ics = initialize_q_batch(X=X, Y=Y, n=2)
self.assertEqual(len(w), 1)
self.assertTrue(
issubclass(w[-1].category, BadInitialCandidatesWarning)
)
self.assertEqual(ics.shape, torch.Size([2, *batch_shape, 3, 4]))
with self.assertRaises(RuntimeError):
initialize_q_batch(X=X, Y=Y, n=10)
def test_initialize_q_batch_largeZ(self):
for dtype in (torch.float, torch.double):
# testing large eta*Z
X = torch.rand(5, 3, 4, device=self.device, dtype=dtype)
Y = torch.tensor([-1e12, 0, 0, 0, 1e12], device=self.device, dtype=dtype)
ics = initialize_q_batch(X=X, Y=Y, n=2, eta=100)
self.assertEqual(ics.shape[0], 2)
class TestGenBatchInitialCandidates(BotorchTestCase):
def test_gen_batch_initial_inf_bounds(self):
bounds = torch.rand(2, 2)
bounds[0, 1] = float("inf")
with self.assertRaisesRegex(
NotImplementedError,
r"Currently only finite values in `bounds` are supported for "
r"generating initial conditions for optimization.",
):
gen_batch_initial_conditions(
acq_function=mock.Mock(),
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=2,
)
def test_gen_batch_initial_conditions(self):
bounds = torch.stack([torch.zeros(2), torch.ones(2)])
mock_acqf = MockAcquisitionFunction()
mock_acqf.objective = lambda y: y.squeeze(-1)
for dtype in (torch.float, torch.double):
bounds = bounds.to(device=self.device, dtype=dtype)
mock_acqf.X_baseline = bounds # for testing sample_around_best
mock_acqf.model = MockModel(MockPosterior(mean=bounds[:, :1]))
for nonnegative, seed, init_batch_limit, ffs, sample_around_best in product(
[True, False], [None, 1234], [None, 1], [None, {0: 0.5}], [True, False]
):
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
) as mock_acqf_call, warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=10,
fixed_features=ffs,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": init_batch_limit,
"sample_around_best": sample_around_best,
},
)
expected_shape = torch.Size([2, 1, 2])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds),
1e-6,
)
batch_shape = (
torch.Size([])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
raw_samps = mock_acqf_call.call_args[0][0]
batch_shape = (
torch.Size([20 if sample_around_best else 10])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
expected_raw_samps_shape = batch_shape + torch.Size([1, 2])
self.assertEqual(raw_samps.shape, expected_raw_samps_shape)
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_gen_batch_initial_conditions_highdim(self):
d = 2200 # 2200 * 10 (q) > 21201 (sobol max dim)
bounds = torch.stack([torch.zeros(d), torch.ones(d)])
ffs_map = {i: random() for i in range(0, d, 2)}
mock_acqf = MockAcquisitionFunction()
mock_acqf.objective = lambda y: y.squeeze(-1)
for dtype in (torch.float, torch.double):
bounds = bounds.to(device=self.device, dtype=dtype)
mock_acqf.X_baseline = bounds # for testing sample_around_best
mock_acqf.model = MockModel(MockPosterior(mean=bounds[:, :1]))
for nonnegative, seed, ffs, sample_around_best in product(
[True, False], [None, 1234], [None, ffs_map], [True, False]
):
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=MockAcquisitionFunction(),
bounds=bounds,
q=10,
num_restarts=1,
raw_samples=2,
fixed_features=ffs,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"sample_around_best": sample_around_best,
},
)
self.assertTrue(
any(issubclass(w.category, SamplingWarning) for w in ws)
)
expected_shape = torch.Size([1, 10, d])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds), 1e-6
)
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_gen_batch_initial_conditions_warning(self) -> None:
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
samples = torch.zeros(10, 1, 2, device=self.device, dtype=dtype)
with self.assertWarnsRegex(
expected_warning=BadInitialCandidatesWarning,
expected_regex="Unable to find non-zero acquisition",
), mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
return_value=samples,
):
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=MockAcquisitionFunction(),
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=10,
options={"seed": 1234},
)
self.assertTrue(
torch.equal(
batch_initial_conditions,
torch.zeros(2, 1, 2, device=self.device, dtype=dtype),
)
)
def test_gen_batch_initial_conditions_transform_intra_point_constraint(self):
for dtype in (torch.float, torch.double):
constraint = (
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
torch.tensor([-1, -1]).to(dtype=dtype, device=self.device),
-1.0,
)
constraints = transform_intra_point_constraint(
constraint=constraint, d=3, q=3
)
self.assertEqual(len(constraints), 3)
self.assertAllClose(
constraints[0][0],
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
constraints[1][0],
torch.tensor([3, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
constraints[2][0],
torch.tensor([6, 7], dtype=torch.int64, device=self.device),
)
for constraint in constraints:
self.assertAllClose(
torch.tensor([-1, -1], dtype=dtype, device=self.device),
constraint[1],
)
self.assertEqual(constraint[2], -1.0)
# test failure on invalid d
constraint = (
torch.tensor([[0, 3]], dtype=torch.int64, device=self.device),
torch.tensor([-1.0, -1.0], dtype=dtype, device=self.device),
0,
)
with self.assertRaisesRegex(
ValueError,
"Constraint indices cannot exceed the problem dimension d=3.",
):
transform_intra_point_constraint(constraint=constraint, d=3, q=2)
def test_gen_batch_intial_conditions_transform_inter_point_constraint(self):
for dtype in (torch.float, torch.double):
constraint = (
torch.tensor([[0, 1], [1, 1]], dtype=torch.int64, device=self.device),
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
0,
)
transformed = transform_inter_point_constraint(constraint=constraint, d=3)
self.assertAllClose(
transformed[0],
torch.tensor([1, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[1],
torch.tensor([1.0, -1.0]).to(dtype=dtype, device=self.device),
)
self.assertEqual(constraint[2], 0.0)
# test failure on invalid d
constraint = (
torch.tensor([[0, 1], [1, 3]], dtype=torch.int64, device=self.device),
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
0,
)
with self.assertRaisesRegex(
ValueError,
"Constraint indices cannot exceed the problem dimension d=3.",
):
transform_inter_point_constraint(constraint=constraint, d=3)
def test_gen_batch_initial_conditions_transform_constraints(self):
for dtype in (torch.float, torch.double):
# test with None
self.assertIsNone(transform_constraints(constraints=None, d=3, q=3))
constraints = [
(
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
torch.tensor([-1.0, -1.0], dtype=dtype, device=self.device),
-1.0,
),
(
torch.tensor(
[[0, 1], [1, 1]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
0,
),
]
transformed = transform_constraints(constraints=constraints, d=3, q=3)
self.assertEqual(len(transformed), 4)
self.assertAllClose(
transformed[0][0],
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[1][0],
torch.tensor([3, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[2][0],
torch.tensor([6, 7], dtype=torch.int64, device=self.device),
)
for constraint in transformed[:3]:
self.assertAllClose(
torch.tensor([-1, -1], dtype=dtype, device=self.device),
constraint[1],
)
self.assertEqual(constraint[2], -1.0)
self.assertAllClose(
transformed[-1][0],
torch.tensor([1, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[-1][1],
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
)
self.assertEqual(transformed[-1][2], 0.0)
def test_gen_batch_initial_conditions_sample_q_batches_from_polytope(self):
n = 5
q = 2
d = 3
for dtype in (torch.float, torch.double):
bounds = torch.tensor(
[[0, 0, 0], [1, 1, 1]], device=self.device, dtype=dtype
)
inequality_constraints = [
(
torch.tensor([0, 1], device=self.device, dtype=torch.int64),
torch.tensor([-1, 1], device=self.device, dtype=dtype),
torch.tensor(-0.5, device=self.device, dtype=dtype),
)
]
inter_point_inequality_constraints = [
(
torch.tensor([0, 1], device=self.device, dtype=torch.int64),
torch.tensor([-1, 1], device=self.device, dtype=dtype),
torch.tensor(-0.4, device=self.device, dtype=dtype),
),
(
torch.tensor(
[[0, 1], [1, 1]], device=self.device, dtype=torch.int64
),
torch.tensor([1, 1], device=self.device, dtype=dtype),
torch.tensor(0.3, device=self.device, dtype=dtype),
),
]
equality_constraints = [
(
torch.tensor([0, 1, 2], device=self.device, dtype=torch.int64),
torch.tensor([1, 1, 1], device=self.device, dtype=dtype),
torch.tensor(1, device=self.device, dtype=dtype),
)
]
inter_point_equality_constraints = [
(
torch.tensor([0, 1, 2], device=self.device, dtype=torch.int64),
torch.tensor([1, 1, 1], device=self.device, dtype=dtype),
torch.tensor(1, device=self.device, dtype=dtype),
),
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], device=self.device, dtype=dtype),
0,
),
]
for equalities, inequalities in product(
[None, equality_constraints, inter_point_equality_constraints],
[None, inequality_constraints, inter_point_inequality_constraints],
):
samples = sample_q_batches_from_polytope(
n=n,
q=q,
bounds=bounds,
n_burnin=10000,
thinning=32,
seed=42,
inequality_constraints=inequalities,
equality_constraints=equalities,
)
self.assertEqual(samples.shape, torch.Size((n, q, d)))
tol = 4e-7
# samples are always on cpu
def _to_self_device(
x: Optional[torch.Tensor],
) -> Optional[torch.Tensor]:
return None if x is None else x.to(device=self.device)
self.assertLess(
_get_max_violation_of_bounds(_to_self_device(samples), bounds), tol
)
self.assertLess(
_get_max_violation_of_constraints(
_to_self_device(samples), constraints=equalities, equality=True
),
tol,
)
self.assertLess(
_get_max_violation_of_constraints(
_to_self_device(samples),
constraints=inequalities,
equality=False,
),
tol,
)
def test_gen_batch_initial_conditions_constraints(self):
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
inequality_constraints = [
(
torch.tensor([1], device=self.device, dtype=torch.int64),
torch.tensor([-4], device=self.device, dtype=dtype),
torch.tensor(-3, device=self.device, dtype=dtype),
)
]
equality_constraints = [
(
torch.tensor([0], device=self.device, dtype=torch.int64),
torch.tensor([1], device=self.device, dtype=dtype),
torch.tensor(0.5, device=self.device, dtype=dtype),
)
]
for nonnegative, seed, init_batch_limit, ffs in product(
[True, False], [None, 1234], [None, 1], [None, {0: 0.5}]
):
mock_acqf = MockAcquisitionFunction()
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
) as mock_acqf_call, warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=10,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": init_batch_limit,
"thinning": 2,
"n_burnin": 3,
},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
expected_shape = torch.Size([2, 1, 2])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds),
1e-6,
)
self.assertLess(
_get_max_violation_of_constraints(
batch_initial_conditions,
inequality_constraints,
equality=False,
),
1e-6,
)
self.assertLess(
_get_max_violation_of_constraints(
batch_initial_conditions,
equality_constraints,
equality=True,
),
1e-6,
)
batch_shape = (
torch.Size([])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
raw_samps = mock_acqf_call.call_args[0][0]
batch_shape = (
torch.Size([10])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
expected_raw_samps_shape = batch_shape + torch.Size([1, 2])
self.assertEqual(raw_samps.shape, expected_raw_samps_shape)
self.assertTrue((raw_samps[..., 0] == 0.5).all())
self.assertTrue((-4 * raw_samps[..., 1] >= -3).all())
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_gen_batch_initial_conditions_interpoint_constraints(self):
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
inequality_constraints = [
(
torch.tensor([0, 1], device=self.device, dtype=torch.int64),
torch.tensor([-1, -1.0], device=self.device, dtype=dtype),
torch.tensor(-1.0, device=self.device, dtype=dtype),
)
]
equality_constraints = [
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], device=self.device, dtype=dtype),
0,
),
(
torch.tensor(
[[0, 0], [2, 0]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], device=self.device, dtype=dtype),
0,
),
]
for nonnegative, seed in product([True, False], [None, 1234]):
mock_acqf = MockAcquisitionFunction()
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
):
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=3,
num_restarts=2,
raw_samples=10,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": None,
"thinning": 2,
"n_burnin": 3,
},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
expected_shape = torch.Size([2, 3, 2])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertTrue((batch_initial_conditions.sum(dim=-1) <= 1).all())
self.assertAllClose(
batch_initial_conditions[0, 0, 0],
batch_initial_conditions[0, 1, 0],
batch_initial_conditions[0, 2, 0],
atol=1e-7,
)
self.assertAllClose(
batch_initial_conditions[1, 0, 0],
batch_initial_conditions[1, 1, 0],
batch_initial_conditions[1, 2, 0],
)
self.assertLess(
_get_max_violation_of_constraints(
batch_initial_conditions,
inequality_constraints,
equality=False,
),
1e-6,
)
def test_gen_batch_initial_conditions_generator(self):
mock_acqf = MockAcquisitionFunction()
mock_acqf.objective = lambda y: y.squeeze(-1)
for dtype in (torch.float, torch.double):
bounds = torch.tensor(
[[0, 0, 0], [1, 1, 1]], device=self.device, dtype=dtype
)
for nonnegative, seed, init_batch_limit, ffs in product(
[True, False], [None, 1234], [None, 1], [None, {0: 0.5}]
):
def generator(n: int, q: int, seed: int):
with manual_seed(seed):
X_rnd_nlzd = torch.rand(
n,
q,
bounds.shape[-1],
dtype=bounds.dtype,
device=self.device,
)
X_rnd = bounds[0] + (bounds[1] - bounds[0]) * X_rnd_nlzd
X_rnd[..., -1] = 0.42
return X_rnd
mock_acqf = MockAcquisitionFunction()
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
), warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=2,
num_restarts=4,
raw_samples=10,
generator=generator,
fixed_features=ffs,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": init_batch_limit,
},
)
expected_shape = torch.Size([4, 2, 3])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertTrue((batch_initial_conditions[..., -1] == 0.42).all())
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds),
1e-6,
)
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_error_generator_with_sample_around_best(self):
tkwargs = {"device": self.device, "dtype": torch.double}
def generator(n: int, q: int, seed: int):
return torch.rand(n, q, 3).to(**tkwargs)
with self.assertRaisesRegex(
UnsupportedError,
"Option 'sample_around_best' is not supported when custom "
"generator is be used.",
):
gen_batch_initial_conditions(
MockAcquisitionFunction(),
bounds=torch.tensor([[0, 0], [1, 1]], **tkwargs),
q=1,
num_restarts=1,
raw_samples=1,
generator=generator,
options={"sample_around_best": True},
)
def test_error_equality_constraints_with_sample_around_best(self):
tkwargs = {"device": self.device, "dtype": torch.double}
# this will give something that does not respect the constraints
# TODO: it would be good to have a utils function to check if the
# constraints are obeyed
with self.assertRaisesRegex(
UnsupportedError,
"Option 'sample_around_best' is not supported when equality"
"constraints are present.",
):
gen_batch_initial_conditions(
MockAcquisitionFunction(),
bounds=torch.tensor([[0, 0], [1, 1]], **tkwargs),
q=1,
num_restarts=1,
raw_samples=1,
equality_constraints=[
(
torch.tensor([0], **tkwargs),
torch.tensor([1], **tkwargs),
torch.tensor(0.5, **tkwargs),
)
],
options={"sample_around_best": True},
)
class TestGenOneShotKGInitialConditions(BotorchTestCase):
def test_gen_one_shot_kg_initial_conditions(self):
num_fantasies = 8
num_restarts = 4
raw_samples = 16
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
mock_kg = qKnowledgeGradient(model=mm, num_fantasies=num_fantasies)
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
# test option error
with self.assertRaises(ValueError):
gen_one_shot_kg_initial_conditions(
acq_function=mock_kg,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options={"frac_random": 2.0},
)
# test generation logic
q = 2
mock_random_ics = torch.rand(num_restarts, q + num_fantasies, 2)
mock_fantasy_cands = torch.ones(20, 1, 2)
mock_fantasy_vals = torch.randn(20)
with ExitStack() as es:
mock_gbics = es.enter_context(
mock.patch(
"botorch.optim.initializers.gen_batch_initial_conditions",
return_value=mock_random_ics,
)
)
mock_optacqf = es.enter_context(
mock.patch(
"botorch.optim.optimize.optimize_acqf",
return_value=(mock_fantasy_cands, mock_fantasy_vals),
)
)
ics = gen_one_shot_kg_initial_conditions(
acq_function=mock_kg,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
mock_gbics.assert_called_once()
mock_optacqf.assert_called_once()
n_value = int((1 - 0.1) * num_fantasies)
self.assertTrue(
torch.equal(
ics[..., :-n_value, :], mock_random_ics[..., :-n_value, :]
)
)
self.assertTrue(torch.all(ics[..., -n_value:, :] == 1))
class TestGenValueFunctionInitialConditions(BotorchTestCase):
def test_gen_value_function_initial_conditions(self):
num_fantasies = 2
num_solutions = 3
num_restarts = 4
raw_samples = 5
n_train = 6
dim = 2
dtype = torch.float
# run a thorough test with dtype float
train_X = torch.rand(n_train, dim, device=self.device, dtype=dtype)
train_Y = torch.rand(n_train, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
fant_X = torch.rand(num_solutions, 1, dim, device=self.device, dtype=dtype)
fantasy_model = model.fantasize(
fant_X, IIDNormalSampler(sample_shape=torch.Size([num_fantasies]))
)
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
value_function = PosteriorMean(fantasy_model)
# test option error
with self.assertRaises(ValueError):
gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
current_model=model,
options={"frac_random": 2.0},
)
# test output shape
ics = gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
current_model=model,
)
self.assertEqual(
ics.shape, torch.Size([num_restarts, num_fantasies, num_solutions, 1, dim])
)
# test bounds
self.assertTrue(torch.all(ics >= bounds[0]))
self.assertTrue(torch.all(ics <= bounds[1]))
# test dtype
self.assertEqual(dtype, ics.dtype)
# minimal test cases for when all raw samples are random, with dtype double
dtype = torch.double
n_train = 2
dim = 1
num_solutions = 1
train_X = torch.rand(n_train, dim, device=self.device, dtype=dtype)
train_Y = torch.rand(n_train, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
fant_X = torch.rand(1, 1, dim, device=self.device, dtype=dtype)
fantasy_model = model.fantasize(
fant_X, IIDNormalSampler(sample_shape=torch.Size([num_fantasies]))
)
bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
value_function = PosteriorMean(fantasy_model)
ics = gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=1,
raw_samples=1,
current_model=model,
options={"frac_random": 0.99},
)
self.assertEqual(
ics.shape, torch.Size([1, num_fantasies, num_solutions, 1, dim])
)
# test bounds
self.assertTrue(torch.all(ics >= bounds[0]))
self.assertTrue(torch.all(ics <= bounds[1]))
# test dtype
self.assertEqual(dtype, ics.dtype)
class TestSampleAroundBest(BotorchTestCase):
def test_sample_truncated_normal_perturbations(self):
tkwargs = {"device": self.device}
n_discrete_points = 5
_bounds = torch.ones(2, 4)
_bounds[1] = 2
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
bounds = _bounds.to(**tkwargs)
for n_best in (1, 2):
X = 1 + torch.rand(n_best, 4, **tkwargs)
# basic test
perturbed_X = sample_truncated_normal_perturbations(
X=X,
n_discrete_points=n_discrete_points,
sigma=4,
bounds=bounds,
qmc=False,
)
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 4]))
self.assertTrue((perturbed_X >= 1).all())
self.assertTrue((perturbed_X <= 2).all())
# test qmc
with mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
wraps=draw_sobol_samples,
) as mock_sobol:
perturbed_X = sample_truncated_normal_perturbations(
X=X,
n_discrete_points=n_discrete_points,
sigma=4,
bounds=bounds,
qmc=True,
)
mock_sobol.assert_called_once()
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 4]))
self.assertTrue((perturbed_X >= 1).all())
self.assertTrue((perturbed_X <= 2).all())
def test_sample_perturbed_subset_dims(self):
tkwargs = {"device": self.device}
n_discrete_points = 5
# test that errors are raised
with self.assertRaises(BotorchTensorDimensionError):
sample_perturbed_subset_dims(
X=torch.zeros(1, 1),
n_discrete_points=1,
sigma=1e-3,
bounds=torch.zeros(1, 2, 1),
)
with self.assertRaises(BotorchTensorDimensionError):
sample_perturbed_subset_dims(
X=torch.zeros(1, 1, 1),
n_discrete_points=1,
sigma=1e-3,
bounds=torch.zeros(2, 1),
)
for dtype in (torch.float, torch.double):
for n_best in (1, 2):
tkwargs["dtype"] = dtype
bounds = torch.zeros(2, 21, **tkwargs)
bounds[1] = 1
X = torch.rand(n_best, 21, **tkwargs)
# basic test
with mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
) as mock_sobol:
perturbed_X = sample_perturbed_subset_dims(
X=X,
n_discrete_points=n_discrete_points,
qmc=False,
sigma=1e-3,
bounds=bounds,
)
mock_sobol.assert_not_called()
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 21]))
self.assertTrue((perturbed_X >= 0).all())
self.assertTrue((perturbed_X <= 1).all())
# test qmc
with mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
wraps=draw_sobol_samples,
) as mock_sobol:
perturbed_X = sample_perturbed_subset_dims(
X=X,
n_discrete_points=n_discrete_points,
sigma=1e-3,
bounds=bounds,
)
mock_sobol.assert_called_once()
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 21]))
self.assertTrue((perturbed_X >= 0).all())
self.assertTrue((perturbed_X <= 1).all())
# for each point in perturbed_X compute the number of
# dimensions it has in common with each point in X
# and take the maximum number
max_equal_dims = (
(perturbed_X.unsqueeze(0) == X.unsqueeze(1))
.sum(dim=-1)
.max(dim=0)
.values
)
# check that at least one dimension is perturbed
self.assertTrue((20 - max_equal_dims >= 1).all())
def test_sample_points_around_best(self):
tkwargs = {"device": self.device}
_bounds = torch.ones(2, 2)
_bounds[1] = 2
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
bounds = _bounds.to(**tkwargs)
X_train = 1 + torch.rand(20, 2, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train, prune_baseline=False, cache_root=False
)
with mock.patch(
"botorch.optim.initializers.sample_perturbed_subset_dims"
) as mock_subset_dims:
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
mock_subset_dims.assert_not_called()
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test model that returns a batched mean
model = MockModel(
MockPosterior(
mean=(2 * X_train + 1).sum(dim=-1, keepdim=True).unsqueeze(0)
)
)
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train, prune_baseline=False, cache_root=False
)
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test EI without X_baseline
acqf = qExpectedImprovement(model, best_f=0.0)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# set train inputs
model.train_inputs = (X_train,)
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test an acquisition function that has no posterior_transform
# and maximize=False
pm = PosteriorMean(model, maximize=False)
self.assertIsNone(pm.posterior_transform)
self.assertFalse(pm.maximize)
X_rnd = sample_points_around_best(
acq_function=pm,
n_discrete_points=4,
sigma=0,
bounds=bounds,
best_pct=1e-8, # ensures that we only use best value
)
idx = (-model.posterior(X_train).mean).argmax()
self.assertTrue((X_rnd == X_train[idx : idx + 1]).all(dim=-1).all())
# test acquisition function that has no model
ff = FixedFeatureAcquisitionFunction(pm, d=2, columns=[0], values=[0])
# set X_baseline for testing purposes
ff.X_baseline = X_train
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = sample_points_around_best(
acq_function=ff,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# test constraints with NEHVI
constraints = [lambda Y: Y[..., 0]]
ref_point = torch.zeros(2, **tkwargs)
# test cases when there are and are not any feasible points
for any_feas in (True, False):
Y_train = torch.stack(
[
torch.linspace(-0.5, 0.5, X_train.shape[0], **tkwargs)
if any_feas
else torch.ones(X_train.shape[0], **tkwargs),
X_train.sum(dim=-1),
],
dim=-1,
)
moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
acqf = qNoisyExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
X_baseline=X_train,
constraints=constraints,
cache_root=False,
sampler=IIDNormalSampler(sample_shape=torch.Size([2])),
)
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=0.0,
bounds=bounds,
)
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
# this should be true since sigma=0
# and we should only be returning feasible points
violation = constraints[0](Y_train)
neg_violation = -violation.clamp_min(0.0)
feas = neg_violation == 0
eq_mask = (X_train.unsqueeze(1) == X_rnd.unsqueeze(0)).all(dim=-1)
if feas.any():
# determine
# create n_train x n_rnd tensor of booleans
eq_mask = (X_train.unsqueeze(1) == X_rnd.unsqueeze(0)).all(dim=-1)
# check that all X_rnd correspond to feasible points
self.assertEqual(eq_mask[feas].sum(), 4)
else:
idcs = torch.topk(neg_violation, k=2).indices
self.assertEqual(eq_mask[idcs].sum(), 4)
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test that subset_dims is called if d>=20
X_train = 1 + torch.rand(10, 20, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
bounds = torch.ones(2, 20, **tkwargs)
bounds[1] = 2
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train, prune_baseline=False, cache_root=False
)
with mock.patch(
"botorch.optim.initializers.sample_perturbed_subset_dims",
wraps=sample_perturbed_subset_dims,
) as mock_subset_dims:
X_rnd = sample_points_around_best(
acq_function=acqf, n_discrete_points=5, sigma=1e-3, bounds=bounds
)
self.assertEqual(X_rnd.shape, torch.Size([5, 20]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
mock_subset_dims.assert_called_once()
# test tiny prob_perturb to make sure we perturb at least one dimension
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=5,
sigma=1e-3,
bounds=bounds,
prob_perturb=1e-8,
)
self.assertTrue(
((X_rnd.unsqueeze(0) == X_train.unsqueeze(1)).all(dim=-1)).sum() == 0
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from functools import partial
from typing import Dict
from unittest.mock import MagicMock, patch
import torch
from botorch.optim import core
from botorch.optim.closures import ForwardBackwardClosure, NdarrayOptimizationClosure
from botorch.optim.core import (
OptimizationResult,
OptimizationStatus,
scipy_minimize,
torch_minimize,
)
from botorch.utils.testing import BotorchTestCase
from numpy import allclose
from scipy.optimize import OptimizeResult
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim.sgd import SGD
try:
from torch.optim.lr_scheduler import LRScheduler
except ImportError: # pragma: no cover
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # pragma: no cover
class ToyModule(Module):
def __init__(self, b: Parameter, x: Parameter, dummy: Parameter):
r"""Toy module for unit testing."""
super().__init__()
self.x = x
self.b = b
self.dummy = dummy
def forward(self) -> Tensor:
return (self.x - self.b).square().sum()
@property
def free_parameters(self) -> Dict[str, Tensor]:
return {n: p for n, p in self.named_parameters() if p.requires_grad}
def norm_squared(x, delay: float = 0.0):
if x.grad is not None:
x.grad.zero_()
loss = x.square().sum()
loss.backward()
if delay:
time.sleep(delay)
return loss, [x.grad]
class TestScipyMinimize(BotorchTestCase):
def setUp(self):
super().setUp()
module = ToyModule(
x=Parameter(torch.tensor(0.5, device=self.device)),
b=Parameter(torch.tensor(0.0, device=self.device), requires_grad=False),
dummy=Parameter(torch.tensor(1.0, device=self.device)),
).to(self.device)
self.closures = {}
for dtype in ("float32", "float64"):
m = module.to(dtype=getattr(torch, dtype))
self.closures[dtype] = ForwardBackwardClosure(m, m.free_parameters)
def test_basic(self):
x = Parameter(torch.rand([]))
closure = partial(norm_squared, x)
result = scipy_minimize(closure, {"x": x})
self.assertEqual(result.status, OptimizationStatus.SUCCESS)
self.assertTrue(allclose(result.fval, 0.0))
def test_timeout(self):
x = Parameter(torch.tensor(1.0))
# adding a small delay here to combat some timing issues on windows
closure = partial(norm_squared, x, delay=1e-2)
result = scipy_minimize(closure, {"x": x}, timeout_sec=1e-4)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue("Optimization timed out after" in result.message)
def test_main(self):
def _callback(parameters, result, out) -> None:
out.append(result)
for closure in self.closures.values():
for with_wrapper in (True, False):
with torch.no_grad():
cache = {} # cache random starting values
for name, param in closure.parameters.items():
init = cache[name] = torch.rand_like(param)
param.data.copy_(init)
closure_arg = (
NdarrayOptimizationClosure(closure, closure.parameters)
if with_wrapper
else closure
)
result = scipy_minimize(
closure=closure_arg,
parameters=closure.parameters,
bounds={"x": (0, 1)},
)
self.assertIsInstance(result, OptimizationResult)
self.assertEqual(result.status, OptimizationStatus.SUCCESS)
self.assertTrue(allclose(result.fval, 0.0))
self.assertTrue(closure.parameters["dummy"].equal(cache["dummy"]))
self.assertFalse(closure.parameters["x"].equal(cache["x"]))
# Test `bounds` and `callback`
with torch.no_grad(): # closure.forward is a ToyModule instance
closure.forward.b.fill_(0.0)
closure.forward.x.fill_(0.5)
step_results = []
result = scipy_minimize(
closure=closure,
parameters=closure.parameters,
bounds={"x": (0.1, 1.0)},
callback=partial(_callback, out=step_results),
)
self.assertTrue(allclose(0.01, result.fval))
self.assertTrue(allclose(0.1, closure.forward.x.detach().cpu().item()))
self.assertEqual(result.step, len(step_results))
self.assertEqual(result.step, step_results[-1].step)
self.assertEqual(result.fval, step_results[-1].fval)
def test_post_processing(self):
closure = next(iter(self.closures.values()))
wrapper = NdarrayOptimizationClosure(closure, closure.parameters)
with patch.object(core, "minimize_with_timeout") as mock_minimize_with_timeout:
for status, msg in (
(OptimizationStatus.FAILURE, b"ABNORMAL_TERMINATION_IN_LNSRCH"),
(OptimizationStatus.STOPPED, "TOTAL NO. of ITERATIONS REACHED LIMIT"),
):
mock_minimize_with_timeout.return_value = OptimizeResult(
x=wrapper.state,
fun=1.0,
nit=3,
success=False,
message=msg,
)
result = core.scipy_minimize(wrapper, closure.parameters)
self.assertEqual(result.status, status)
self.assertEqual(
result.fval, mock_minimize_with_timeout.return_value.fun
)
self.assertEqual(
result.message, msg if isinstance(msg, str) else msg.decode("ascii")
)
class TestTorchMinimize(BotorchTestCase):
def setUp(self):
super().setUp()
module = ToyModule(
x=Parameter(torch.tensor(0.5, device=self.device)),
b=Parameter(torch.tensor(0.0, device=self.device), requires_grad=False),
dummy=Parameter(torch.tensor(1.0, device=self.device)),
).to(self.device)
self.closures = {}
for dtype in ("float32", "float64"):
m = module.to(dtype=getattr(torch, dtype))
self.closures[dtype] = ForwardBackwardClosure(m, m.free_parameters)
def test_basic(self):
x = Parameter(torch.tensor([0.02]))
closure = partial(norm_squared, x)
result = torch_minimize(closure, {"x": x}, step_limit=100)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue(allclose(result.fval, 0.0))
def test_timeout(self):
x = Parameter(torch.tensor(1.0))
# adding a small delay here to combat some timing issues on windows
closure = partial(norm_squared, x, delay=1e-3)
result = torch_minimize(closure, {"x": x}, timeout_sec=1e-4)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue("stopped due to timeout after" in result.message)
def test_main(self):
def _callback(parameters, result, out) -> None:
out.append(result)
for closure in self.closures.values():
# Test that we error out if no termination conditions are given
with self.assertRaisesRegex(RuntimeError, "No termination conditions"):
torch_minimize(closure=closure, parameters=closure.parameters)
# Test single step behavior
for optimizer in (
SGD(params=list(closure.parameters.values()), lr=0.1), # instance
partial(SGD, lr=0.1), # factory
):
cache = {n: p.detach().clone() for n, p in closure.parameters.items()}
grads = [g if g is None else g.detach().clone() for g in closure()[1]]
result = torch_minimize(
closure=closure,
parameters=closure.parameters,
optimizer=optimizer,
step_limit=1,
)
self.assertIsInstance(result, OptimizationResult)
self.assertEqual(result.fval, closure()[0])
self.assertEqual(result.step, 1)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue(closure.parameters["dummy"].equal(cache["dummy"]))
self.assertFalse(closure.parameters["x"].equal(cache["x"]))
for (name, param), g in zip(closure.parameters.items(), grads):
self.assertTrue(
param.allclose(cache[name] - (0 if g is None else 0.1 * g))
)
# Test local convergence
with torch.no_grad(): # closure.forward is a ToyModule instance
closure.forward.b.fill_(0.0)
closure.forward.x.fill_(0.02)
result = torch_minimize(closure, closure.parameters, step_limit=100)
self.assertTrue(allclose(0.0, result.fval))
self.assertEqual(result.step, 100)
# Test `bounds` and `callback`
with torch.no_grad(): # closure.forward is a ToyModule instance
closure.forward.b.fill_(0.0)
closure.forward.x.fill_(0.11)
step_results = []
result = torch_minimize(
closure=closure,
parameters=closure.parameters,
bounds={"x": (0.1, 1.0)},
callback=partial(_callback, out=step_results),
step_limit=100,
)
self.assertTrue(allclose(0.01, result.fval))
self.assertEqual(result.step, len(step_results))
# Test `stopping_criterion`
stopping_decisions = iter((False, False, True, False))
result = torch_minimize(
closure=closure,
parameters=closure.parameters,
stopping_criterion=lambda fval: next(stopping_decisions),
)
self.assertEqual(result.step, 3)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
# Test passing `scheduler`
mock_scheduler = MagicMock(spec=LRScheduler)
mock_scheduler.step = MagicMock(side_effect=RuntimeError("foo"))
with self.assertRaisesRegex(RuntimeError, "foo"):
torch_minimize(
closure=closure,
parameters=closure.parameters,
scheduler=mock_scheduler,
step_limit=1,
)
mock_scheduler.step.assert_called_once()
# Test passing `scheduler` as a factory
optimizer = SGD(list(closure.parameters.values()), lr=1e-3)
mock_factory = MagicMock(side_effect=RuntimeError("foo"))
with self.assertRaisesRegex(RuntimeError, "foo"):
torch_minimize(
closure=closure,
parameters=closure.parameters,
optimizer=optimizer,
scheduler=mock_factory,
step_limit=1,
)
mock_factory.assert_called_once_with(optimizer)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import re
from unittest.mock import MagicMock, patch
from warnings import catch_warnings
import torch
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models import SingleTaskGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.optim import core, fit
from botorch.optim.core import OptimizationResult
from botorch.settings import debug
from botorch.utils.context_managers import module_rollback_ctx, TensorCheckpoint
from botorch.utils.testing import BotorchTestCase
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from scipy.optimize import OptimizeResult
class TestFitGPyTorchMLLScipy(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_mll_scipy(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_mll_scipy(mll.to(dtype=dtype))
def _test_fit_gpytorch_mll_scipy(self, mll):
options = {"disp": False, "maxiter": 2}
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True) as ws, debug(True):
result = fit.fit_gpytorch_mll_scipy(mll, options=options)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.equal(ckpt[name].values) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test maxiter warning message
self.assertTrue(any("TOTAL NO. of" in str(w.message) for w in ws))
self.assertTrue(
any(issubclass(w.category, OptimizationWarning) for w in ws)
)
# Test iteration tracking
self.assertIsInstance(result, OptimizationResult)
self.assertLessEqual(result.step, options["maxiter"])
self.assertEqual(sum(1 for w in ws if "TOTAL NO. of" in str(w.message)), 1)
# Test that user provided bounds are respected
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_mll_scipy(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
options=options,
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
for name, param in mll.named_parameters():
self.assertNotEqual(param.requires_grad, param.equal(ckpt[name].values))
# Test handling of scipy optimization failures and parameter assignments
mock_x = []
assignments = {}
for name, param in mll.named_parameters():
if not param.requires_grad:
continue # pragma: no cover
values = assignments[name] = torch.rand_like(param)
mock_x.append(values.view(-1))
with module_rollback_ctx(mll, checkpoint=ckpt), patch.object(
core, "minimize_with_timeout"
) as mock_minimize_with_timeout:
mock_minimize_with_timeout.return_value = OptimizeResult(
x=torch.concat(mock_x).tolist(),
success=False,
status=0,
fun=float("nan"),
jac=None,
nfev=1,
njev=1,
nhev=1,
nit=1,
message="ABNORMAL_TERMINATION_IN_LNSRCH".encode(),
)
with catch_warnings(record=True) as ws, debug(True):
fit.fit_gpytorch_mll_scipy(mll, options=options)
# Test that warning gets raised
self.assertTrue(
any("ABNORMAL_TERMINATION_IN_LNSRCH" in str(w.message) for w in ws)
)
# Test that parameter values get assigned correctly
self.assertTrue(
all(
param.equal(assignments[name])
for name, param in mll.named_parameters()
if param.requires_grad
)
)
# Test `closure_kwargs`
with self.subTest("closure_kwargs"):
mock_closure = MagicMock(side_effect=StopIteration("foo"))
with self.assertRaisesRegex(StopIteration, "foo"):
fit.fit_gpytorch_mll_scipy(
mll, closure=mock_closure, closure_kwargs={"ab": "cd"}
)
mock_closure.assert_called_once_with(ab="cd")
def test_fit_with_nans(self) -> None:
"""Test the branch of NdarrayOptimizationClosure that handles errors."""
from botorch.optim.closures import NdarrayOptimizationClosure
def closure():
raise RuntimeError("singular")
for dtype in [torch.float32, torch.float64]:
parameters = {"x": torch.tensor([0.0], dtype=dtype)}
wrapper = NdarrayOptimizationClosure(closure=closure, parameters=parameters)
def _assert_np_array_is_float64_type(array) -> bool:
# e.g. "float32" in "torch.float32"
self.assertEqual(str(array.dtype), "float64")
_assert_np_array_is_float64_type(wrapper()[0])
_assert_np_array_is_float64_type(wrapper()[1])
_assert_np_array_is_float64_type(wrapper.state)
_assert_np_array_is_float64_type(wrapper._get_gradient_ndarray())
# Any mll will do
mll = next(iter(self.mlls.values()))
# will error if dtypes are wrong
fit.fit_gpytorch_mll_scipy(mll, closure=wrapper, parameters=parameters)
class TestFitGPyTorchMLLTorch(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_mll_torch(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_mll_torch(mll.to(dtype=dtype))
def _test_fit_gpytorch_mll_torch(self, mll):
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True) as _, debug(True):
result = fit.fit_gpytorch_mll_torch(mll, step_limit=2)
self.assertIsInstance(result, OptimizationResult)
self.assertLessEqual(result.step, 2)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.requires_grad != param.equal(ckpt[name].values)
for name, param in mll.named_parameters()
)
)
# Test that user provided bounds are respected
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_mll_torch(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
# Test `closure_kwargs`
with self.subTest("closure_kwargs"):
mock_closure = MagicMock(side_effect=StopIteration("foo"))
with self.assertRaisesRegex(StopIteration, "foo"):
fit.fit_gpytorch_mll_torch(
mll, closure=mock_closure, closure_kwargs={"ab": "cd"}
)
mock_closure.assert_called_once_with(ab="cd")
class TestFitGPyTorchScipy(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_scipy(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_scipy(mll.to(dtype=dtype))
def _test_fit_gpytorch_scipy(self, mll):
options = {"disp": False, "maxiter": 3, "maxfun": 2}
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True) as ws, debug(True):
_, info_dict = fit.fit_gpytorch_scipy(
mll, track_iterations=True, options=options
)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test maxiter warning message
self.assertTrue(any("TOTAL NO. of" in str(w.message) for w in ws))
self.assertTrue(
any(issubclass(w.category, OptimizationWarning) for w in ws)
)
# Test iteration tracking
self.assertLessEqual(len(info_dict["iterations"]), options["maxiter"])
self.assertIsInstance(info_dict["iterations"][0], OptimizationResult)
self.assertTrue("fopt" in info_dict)
self.assertTrue("wall_time" in info_dict)
self.assertEqual(sum(1 for w in ws if "TOTAL NO. of" in str(w.message)), 1)
# Test that user provided bounds and `exclude` argument are respected
exclude = "model.mean_module.constant", re.compile("raw_lengthscale$")
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_scipy(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
options={**options, "exclude": exclude},
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
for name, param in mll.named_parameters():
if (
name
in (
"model.mean_module.constant",
"model.covar_module.base_kernel.raw_lengthscale",
)
or not param.requires_grad
):
self.assertTrue(param.equal(ckpt[name][0]))
else:
self.assertFalse(param.equal(ckpt[name][0]))
# Test use of `approx_mll` flag
with self.subTest("approx_mll"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_scipy(mll, approx_mll=True, options=options)
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test handling of scipy optimization failures and parameter assignments
mock_x = []
assignments = {}
for name, param in mll.named_parameters():
if not param.requires_grad:
continue # pragma: no cover
values = assignments[name] = torch.rand_like(param)
mock_x.append(values.view(-1))
with module_rollback_ctx(mll, checkpoint=ckpt), patch.object(
fit, "minimize"
) as mock_minimize:
mock_minimize.return_value = OptimizeResult(
x=torch.concat(mock_x).tolist(),
success=False,
status=0,
fun=float("nan"),
jac=None,
nfev=1,
njev=1,
nhev=1,
nit=1,
message="ABNORMAL_TERMINATION_IN_LNSRCH".encode(),
)
with catch_warnings(record=True) as ws, debug(True):
fit.fit_gpytorch_scipy(mll, options=options)
# Test that warning gets raised
self.assertTrue(
any("ABNORMAL_TERMINATION_IN_LNSRCH" in str(w.message) for w in ws)
)
# Test that parameter values get assigned correctly
self.assertTrue(
all(
param.equal(assignments[name])
for name, param in mll.named_parameters()
if param.requires_grad
)
)
class TestFitGPyTorchTorch(BotorchTestCase):
def setUp(self):
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_torch(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_torch(mll.to(dtype=dtype))
def _test_fit_gpytorch_torch(self, mll):
options = {"maxiter": 3}
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True), debug(True):
_, info_dict = fit.fit_gpytorch_torch(
mll, track_iterations=True, options=options
)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test iteration tracking
self.assertEqual(len(info_dict["iterations"]), options["maxiter"])
self.assertIsInstance(info_dict["iterations"][0], OptimizationResult)
self.assertTrue("fopt" in info_dict)
self.assertTrue("wall_time" in info_dict)
# Test that user provided bounds and `exclude` argument are respected
exclude = "model.mean_module.constant", re.compile("raw_lengthscale$")
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_torch(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
options={**options, "exclude": exclude},
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
for name, param in mll.named_parameters():
if (
name
in (
"model.mean_module.constant",
"model.covar_module.base_kernel.raw_lengthscale",
)
or not param.requires_grad
):
self.assertTrue(param.equal(ckpt[name][0]))
else:
self.assertFalse(param.equal(ckpt[name][0]))
# Test use of `approx_mll` flag
with self.subTest("approx_mll"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_torch(mll, approx_mll=True, options=options)
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from math import pi
from unittest.mock import MagicMock, patch
from warnings import catch_warnings, simplefilter
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.optim import numpy_converter
from botorch.optim.numpy_converter import (
_scipy_objective_and_grad,
module_to_array,
set_params_with_array,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.constraints import GreaterThan
from gpytorch.kernels.rbf_kernel import RBFKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means.constant_mean import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.models.exact_gp import ExactGP
def _get_index(property_dict, parameter_name):
idx = 0
for p_name, ta in property_dict.items():
if p_name == parameter_name:
break
idx += ta.shape.numel()
return idx
class TestModuleToArray(BotorchTestCase):
def test_basic(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(module=mll)
self.assertTrue(np.array_equal(x, np.zeros(5)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
"model.mean_module.raw_constant": torch.Size(),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
self.assertIsNone(bounds)
def test_exclude(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll, exclude={"model.mean_module.raw_constant"}
)
self.assertTrue(np.array_equal(x, np.zeros(4)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
self.assertIsNone(bounds)
def test_manual_bounds(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll,
bounds={"model.covar_module.raw_lengthscale": (0.1, None)},
)
self.assertTrue(np.array_equal(x, np.zeros(5)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
"model.mean_module.raw_constant": torch.Size(),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
lower_exp = np.full_like(x, 0.1)
for p in (
"likelihood.noise_covar.raw_noise",
"model.mean_module.raw_constant",
):
lower_exp[_get_index(pdict, p)] = -np.inf
self.assertTrue(np.equal(bounds[0], lower_exp).all())
self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll,
bounds={
key: (-float("inf"), float("inf"))
for key, _ in mll.named_parameters()
},
)
self.assertIsNone(bounds)
def test_module_bounds(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood(
noise_constraint=GreaterThan(1e-5, transform=None)
)
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll,
bounds={"model.covar_module.raw_lengthscale": (0.1, None)},
)
self.assertTrue(np.array_equal(x, np.zeros(5)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
"model.mean_module.raw_constant": torch.Size(),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
lower_exp = np.full_like(x, 0.1)
lower_exp[_get_index(pdict, "model.mean_module.raw_constant")] = -np.inf
lower_exp[_get_index(pdict, "likelihood.noise_covar.raw_noise")] = 1e-5
self.assertTrue(np.allclose(bounds[0], lower_exp))
self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
class TestSetParamsWithArray(BotorchTestCase):
def test_set_parameters(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
with catch_warnings():
# Get parameters
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(module=mll)
# Set parameters
mll = set_params_with_array(
mll, np.array([1.0, 2.0, 3.0, 4.0, 5.0]), pdict
)
z = dict(mll.named_parameters())
self.assertTrue(
torch.equal(
z["likelihood.noise_covar.raw_noise"],
torch.tensor([1.0], device=self.device, dtype=dtype),
)
)
self.assertTrue(
torch.equal(
z["model.covar_module.raw_lengthscale"],
torch.tensor([[2.0, 3.0, 4.0]], device=self.device, dtype=dtype),
)
)
self.assertTrue(
torch.equal(
z["model.mean_module.raw_constant"],
torch.tensor(5.0, device=self.device, dtype=dtype),
)
)
# Extract again
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x2, pdict2, bounds2 = module_to_array(module=mll)
self.assertTrue(np.array_equal(x2, np.array([1.0, 2.0, 3.0, 4.0, 5.0])))
class TestScipyObjectiveAndGrad(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
self.mll = ExactMarginalLogLikelihood(model.likelihood, model)
def test_scipy_objective_and_grad(self):
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, property_dict, bounds = module_to_array(module=self.mll)
loss, grad = _scipy_objective_and_grad(x, self.mll, property_dict)
_dist = self.mll.model(*self.mll.model.train_inputs)
_loss = -self.mll(_dist, self.mll.model.train_targets)
_loss.sum().backward()
_grad = torch.concat(
[self.mll.get_parameter(name).grad.view(-1) for name in property_dict]
)
self.assertEqual(loss, _loss.detach().sum().item())
self.assertTrue(np.allclose(grad, _grad.detach().numpy()))
def _getter(*args, **kwargs):
raise RuntimeError("foo")
_handler = MagicMock()
with catch_warnings(), patch.multiple(
numpy_converter,
_get_extra_mll_args=_getter,
_handle_numerical_errors=_handler,
):
simplefilter("ignore", category=DeprecationWarning)
_scipy_objective_and_grad(x, self.mll, property_dict)
self.assertEqual(_handler.call_count, 1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.optim.stopping import ExpMAStoppingCriterion, StoppingCriterion
from botorch.utils.testing import BotorchTestCase
class TestStoppingCriterion(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
StoppingCriterion()
def test_exponential_moving_average(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# test max iter
sc = ExpMAStoppingCriterion(maxiter=2)
self.assertEqual(sc.maxiter, 2)
self.assertEqual(sc.n_window, 10)
self.assertEqual(sc.rel_tol, 1e-5)
self.assertFalse(sc.evaluate(fvals=torch.ones(1, **tkwargs)))
self.assertTrue(sc.evaluate(fvals=torch.zeros(1, **tkwargs)))
# test convergence
n_window = 4
for minimize in (True, False):
# test basic
sc = ExpMAStoppingCriterion(
minimize=minimize, n_window=n_window, rel_tol=0.0375
)
self.assertEqual(sc.rel_tol, 0.0375)
self.assertIsNone(sc._prev_fvals)
weights_exp = torch.tensor([0.1416, 0.1976, 0.2758, 0.3849])
self.assertAllClose(sc.weights, weights_exp, atol=1e-4)
f_vals = 1 + torch.linspace(1, 0, 25, **tkwargs) ** 2
if not minimize:
f_vals = -f_vals
for i, fval in enumerate(f_vals):
if sc.evaluate(fval):
self.assertEqual(i, 10)
break
# test multiple components
sc = ExpMAStoppingCriterion(
minimize=minimize, n_window=n_window, rel_tol=0.0375
)
df = torch.linspace(0, 0.1, 25, **tkwargs)
if not minimize:
df = -df
f_vals = torch.stack([f_vals, f_vals + df], dim=-1)
for i, fval in enumerate(f_vals):
if sc.evaluate(fval):
self.assertEqual(i, 10)
break
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
import torch
from botorch import settings
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
)
from botorch.acquisition.multi_objective.max_value_entropy_search import (
qMultiObjectiveMaxValueEntropy,
)
from botorch.acquisition.multi_objective.monte_carlo import (
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.exceptions import BotorchError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import ModelListGP, SingleTaskGP
from botorch.models.transforms.input import Warp
from botorch.optim.utils import columnwise_clamp, fix_features, get_X_baseline
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestColumnWiseClamp(BotorchTestCase):
def setUp(self):
super().setUp()
self.X = torch.tensor([[-2, 1], [0.5, -0.5]], device=self.device)
self.X_expected = torch.tensor([[-1, 0.5], [0.5, -0.5]], device=self.device)
def test_column_wise_clamp_scalars(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, 1, -1)
X_clmp = columnwise_clamp(X, -1, 0.5)
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, -3, 3)
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_scalar_tensors(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.tensor(1), torch.tensor(-1))
X_clmp = columnwise_clamp(X, torch.tensor(-1), torch.tensor(0.5))
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, torch.tensor(-3), torch.tensor(3))
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_tensors(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.ones(2), torch.zeros(2))
with self.assertRaises(RuntimeError):
X_clmp = columnwise_clamp(X, torch.zeros(3), torch.ones(3))
X_clmp = columnwise_clamp(X, torch.tensor([-1, -1]), torch.tensor([0.5, 0.5]))
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, torch.tensor([-3, -3]), torch.tensor([3, 3]))
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_full_dim_tensors(self):
X = torch.tensor([[[-1, 2, 0.5], [0.5, 3, 1.5]], [[0.5, 1, 0], [2, -2, 3]]])
lower = torch.tensor([[[0, 0.5, 1], [0, 2, 2]], [[0, 2, 0], [1, -1, 0]]])
upper = torch.tensor([[[1, 1.5, 1], [1, 4, 3]], [[1, 3, 0.5], [3, 1, 2.5]]])
X_expected = torch.tensor(
[[[0, 1.5, 1], [0.5, 3, 2]], [[0.5, 2, 0], [2, -1, 2.5]]]
)
X_clmp = columnwise_clamp(X, lower, upper)
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, lower - 5, upper + 5)
self.assertTrue(torch.equal(X_clmp, X))
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.ones_like(X), torch.zeros_like(X))
with self.assertRaises(RuntimeError):
X_clmp = columnwise_clamp(X, lower.unsqueeze(-3), upper.unsqueeze(-3))
def test_column_wise_clamp_raise_on_violation(self):
X = self.X
with self.assertRaises(BotorchError):
X_clmp = columnwise_clamp(
X, torch.zeros(2), torch.ones(2), raise_on_violation=True
)
X_clmp = columnwise_clamp(
X, torch.tensor([-3, -3]), torch.tensor([3, 3]), raise_on_violation=True
)
self.assertTrue(torch.equal(X_clmp, X))
class TestFixFeatures(BotorchTestCase):
def _getTensors(self):
X = torch.tensor([[-2, 1, 3], [0.5, -0.5, 1.0]], device=self.device)
X_null_two = torch.tensor([[-2, 1, 3], [0.5, -0.5, 1.0]], device=self.device)
X_expected = torch.tensor([[-1, 1, -2], [-1, -0.5, -2]], device=self.device)
X_expected_null_two = torch.tensor(
[[-1, 1, 3], [-1, -0.5, 1.0]], device=self.device
)
return X, X_null_two, X_expected, X_expected_null_two
def test_fix_features(self):
X, X_null_two, X_expected, X_expected_null_two = self._getTensors()
X.requires_grad_(True)
X_null_two.requires_grad_(True)
X_fix = fix_features(X, {0: -1, 2: -2})
X_fix_null_two = fix_features(X_null_two, {0: -1, 2: None})
self.assertTrue(torch.equal(X_fix, X_expected))
self.assertTrue(torch.equal(X_fix_null_two, X_expected_null_two))
def f(X):
return X.sum()
f(X).backward()
self.assertTrue(torch.equal(X.grad, torch.ones_like(X)))
X.grad.zero_()
f(X_fix).backward()
self.assertTrue(
torch.equal(
X.grad,
torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], device=self.device),
)
)
f(X_null_two).backward()
self.assertTrue(torch.equal(X_null_two.grad, torch.ones_like(X)))
X_null_two.grad.zero_()
f(X_fix_null_two).backward()
self.assertTrue(
torch.equal(
X_null_two.grad,
torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], device=self.device),
)
)
class TestGetXBaseline(BotorchTestCase):
def test_get_X_baseline(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
X_train = torch.rand(20, 2, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train[:2], prune_baseline=False, cache_root=False
)
X = get_X_baseline(acq_function=acqf)
self.assertTrue(torch.equal(X, acqf.X_baseline))
# test EI without X_baseline
acqf = qExpectedImprovement(model, best_f=0.0)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = get_X_baseline(
acq_function=acqf,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# set train inputs
model.train_inputs = (X_train,)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test that we fail back to train_inputs if X_baseline is an empty tensor
acqf.register_buffer("X_baseline", X_train[:0])
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test acquisition function without X_baseline or model
acqf = FixedFeatureAcquisitionFunction(acqf, d=2, columns=[0], values=[0])
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = get_X_baseline(
acq_function=acqf,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
Y_train = 2 * X_train[:2] + 1
moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
ref_point = torch.zeros(2, **tkwargs)
# test NEHVI with X_baseline
acqf = qNoisyExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
X_baseline=X_train[:2],
sampler=IIDNormalSampler(sample_shape=torch.Size([2])),
cache_root=False,
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, acqf.X_baseline))
# test qEHVI without train_inputs
acqf = qExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
partitioning=FastNondominatedPartitioning(
ref_point=ref_point,
Y=Y_train,
),
)
# test extracting train_inputs from model list GP
model_list = ModelListGP(
SingleTaskGP(X_train, Y_train[:, :1]),
SingleTaskGP(X_train, Y_train[:, 1:]),
)
acqf = qExpectedHypervolumeImprovement(
model_list,
ref_point=ref_point,
partitioning=FastNondominatedPartitioning(
ref_point=ref_point,
Y=Y_train,
),
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test MESMO for which we need to use
# `acqf.mo_model`
batched_mo_model = SingleTaskGP(X_train, Y_train)
acqf = qMultiObjectiveMaxValueEntropy(
batched_mo_model,
sample_pareto_frontiers=lambda model: torch.rand(10, 2, **tkwargs),
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test that if there is an input transform that is applied
# to the train_inputs when the model is in eval mode, we
# extract the untransformed train_inputs
model = SingleTaskGP(
X_train, Y_train[:, :1], input_transform=Warp(indices=[0, 1])
)
model.eval()
self.assertFalse(torch.equal(model.train_inputs[0], X_train))
acqf = qExpectedImprovement(model, best_f=0.0)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import MagicMock
import numpy as np
import torch
from botorch.optim.closures.core import (
as_ndarray,
get_tensors_as_ndarray_1d,
set_tensors_from_ndarray_1d,
)
from botorch.optim.utils import get_bounds_as_ndarray
from botorch.optim.utils.numpy_utils import torch_to_numpy_dtype_dict
from botorch.utils.testing import BotorchTestCase
from torch.nn import Parameter
class TestNumpyUtils(BotorchTestCase):
def setUp(self):
super().setUp()
self.parameters = {"foo": torch.rand(2), "bar": Parameter(torch.rand(3))}
def test_as_ndarray(self):
base = np.random.randn(3)
tnsr = torch.from_numpy(base)
# Test inplace conversion
result = as_ndarray(tnsr)
self.assertTrue(np.shares_memory(base, result))
# Test conversion with memory allocation
result = as_ndarray(tnsr, inplace=False)
self.assertTrue(np.allclose(base, result))
self.assertFalse(np.shares_memory(base, result))
result = as_ndarray(tnsr, dtype=np.float32)
self.assertTrue(np.allclose(base, result))
self.assertFalse(np.shares_memory(base, result))
self.assertEqual(result.dtype, np.float32)
# Test that `clone` does not get called on non-CPU tensors
mock_tensor = MagicMock()
mock_tensor.cpu.return_value = mock_tensor
mock_tensor.device.return_value = "foo"
mock_tensor.clone.return_value = mock_tensor
as_ndarray(mock_tensor)
mock_tensor.cpu.assert_called_once()
mock_tensor.clone.assert_not_called()
mock_tensor.numpy.assert_called_once()
def test_as_ndarray_dtypes(self) -> None:
for torch_dtype, np_dtype in torch_to_numpy_dtype_dict.items():
tens = torch.tensor(0, dtype=torch_dtype, device="cpu")
self.assertEqual(torch_dtype, tens.dtype)
self.assertEqual(tens.numpy().dtype, np_dtype)
self.assertEqual(as_ndarray(tens, np_dtype).dtype, np_dtype)
def test_get_tensors_as_ndarray_1d(self):
with self.assertRaisesRegex(RuntimeError, "Argument `tensors` .* is empty"):
get_tensors_as_ndarray_1d(())
values = get_tensors_as_ndarray_1d(self.parameters)
self.assertTrue(
np.allclose(values, get_tensors_as_ndarray_1d(self.parameters.values()))
)
n = 0
for param in self.parameters.values():
k = param.numel()
self.assertTrue(
np.allclose(values[n : n + k], param.view(-1).detach().cpu().numpy())
)
n += k
with self.assertRaisesRegex(ValueError, "Expected a vector for `out`"):
get_tensors_as_ndarray_1d(self.parameters, out=np.empty((1, 1)))
with self.assertRaisesRegex(ValueError, "Size of `parameters` .* not match"):
get_tensors_as_ndarray_1d(self.parameters, out=np.empty(values.size - 1))
with self.assertRaisesRegex(RuntimeError, "failed while copying values .* foo"):
get_tensors_as_ndarray_1d(
self.parameters,
out=np.empty(values.size),
as_array=MagicMock(side_effect=RuntimeError("foo")),
)
def test_set_tensors_from_ndarray_1d(self):
values = get_tensors_as_ndarray_1d(self.parameters)
others = np.random.rand(*values.shape).astype(values.dtype)
with self.assertRaisesRegex(RuntimeError, "failed while copying values to"):
set_tensors_from_ndarray_1d(self.parameters, np.empty([1]))
set_tensors_from_ndarray_1d(self.parameters, others)
n = 0
for param in self.parameters.values():
k = param.numel()
self.assertTrue(
np.allclose(others[n : n + k], param.view(-1).detach().cpu().numpy())
)
n += k
def test_get_bounds_as_ndarray(self):
params = {"a": torch.rand(1), "b": torch.rand(1), "c": torch.rand(2)}
bounds = {"a": (None, 1), "c": (0, None)}
test = np.full((4, 2), (-float("inf"), float("inf")))
test[0, 1] = 1
test[2, 0] = 0
test[3, 0] = 0
array = get_bounds_as_ndarray(parameters=params, bounds=bounds)
self.assertTrue(np.array_equal(test, array))
# Test with tensor bounds.
bounds = {
"a": (None, torch.tensor(1, device=self.device)),
"c": (torch.tensor(0, device=self.device), None),
}
array = get_bounds_as_ndarray(parameters=params, bounds=bounds)
self.assertTrue(np.array_equal(test, array))
# Test with n-dim tensor bounds.
bounds = {
"a": (None, torch.tensor(1, device=self.device)),
"c": (
torch.tensor([0, 0], device=self.device),
torch.tensor([1, 1], device=self.device),
),
}
test[2:, 1] = 1
array = get_bounds_as_ndarray(parameters=params, bounds=bounds)
self.assertTrue(np.array_equal(test, array))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from functools import partial
from warnings import catch_warnings, warn
import numpy as np
from botorch.optim.utils import (
_filter_kwargs,
_handle_numerical_errors,
_warning_handler_template,
)
from botorch.utils.testing import BotorchTestCase
from linear_operator.utils.errors import NanError, NotPSDError
class TestUtilsCommon(BotorchTestCase):
def test__filter_kwargs(self) -> None:
def mock_adam(params, lr: float = 0.001) -> None:
return # pragma: nocover
kwargs = {"lr": 0.01, "maxiter": 3000}
expected_msg = (
r"Keyword arguments \['maxiter'\] will be ignored because they are "
r"not allowed parameters for function mock_adam. Allowed parameters "
r"are \['params', 'lr'\]."
)
with self.assertWarnsRegex(Warning, expected_msg):
valid_kwargs = _filter_kwargs(mock_adam, **kwargs)
self.assertEqual(set(valid_kwargs.keys()), {"lr"})
mock_partial = partial(mock_adam, lr=2.0)
expected_msg = (
r"Keyword arguments \['maxiter'\] will be ignored because they are "
r"not allowed parameters. Allowed parameters are \['params', 'lr'\]."
)
with self.assertWarnsRegex(Warning, expected_msg):
valid_kwargs = _filter_kwargs(mock_partial, **kwargs)
self.assertEqual(set(valid_kwargs.keys()), {"lr"})
def test_handle_numerical_errors(self):
x = np.zeros(1, dtype=np.float64)
with self.assertRaisesRegex(NotPSDError, "foo"):
_handle_numerical_errors(NotPSDError("foo"), x=x)
for error in (
NanError(),
RuntimeError("singular"),
RuntimeError("input is not positive-definite"),
):
fake_loss, fake_grad = _handle_numerical_errors(error, x=x)
self.assertTrue(np.isnan(fake_loss))
self.assertEqual(fake_grad.shape, x.shape)
self.assertTrue(np.isnan(fake_grad).all())
fake_loss, fake_grad = _handle_numerical_errors(error, x=x, dtype=np.float32)
self.assertEqual(np.float32, fake_loss.dtype)
self.assertEqual(np.float32, fake_grad.dtype)
with self.assertRaisesRegex(RuntimeError, "foo"):
_handle_numerical_errors(RuntimeError("foo"), x=x)
def test_warning_handler_template(self):
with catch_warnings(record=True) as ws:
warn(DeprecationWarning("foo"))
warn(RuntimeWarning("bar"))
self.assertFalse(any(_warning_handler_template(w) for w in ws))
handler = partial(
_warning_handler_template,
debug=lambda w: issubclass(w.category, DeprecationWarning),
rethrow=lambda w: True,
)
with self.assertLogs(level="DEBUG") as logs, catch_warnings(record=True) as _ws:
self.assertTrue(all(handler(w) for w in ws))
self.assertEqual(1, len(logs.output))
self.assertTrue("foo" in logs.output[0])
self.assertEqual(1, len(_ws))
self.assertEqual("bar", str(_ws[0].message))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.