python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.pairwise_samplers import (
PairwiseIIDNormalSampler,
PairwiseSobolQMCNormalSampler,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultivariateNormal
def _get_test_posterior(device, n=3, dtype=torch.float, batched=False):
mean = torch.zeros(n, device=device, dtype=dtype)
cov = torch.eye(n, device=device, dtype=dtype)
if batched:
cov = cov.repeat(3, 1, 1)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
class TestPairwiseIIDNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = PairwiseIIDNormalSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 3, 2]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior(
device=self.device, dtype=dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior(
device=self.device, dtype=new_dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure error is rasied when number of points are < 2
posterior = _get_test_posterior(device=self.device, n=1, dtype=dtype)
with self.assertRaises(RuntimeError):
sampler(posterior)
# check max_num_comparisons
sampler = PairwiseIIDNormalSampler(
sample_shape=torch.Size([4]), max_num_comparisons=2
)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 2]))
class TestPairwiseSobolQMCNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = PairwiseSobolQMCNormalSampler(
sample_shape=torch.Size([4]), seed=1234
)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 3, 2]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior(
device=self.device, dtype=dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior(
device=self.device, dtype=new_dtype, batched=True
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 3, 2]))
# ensure error is rasied when number of points are < 2
posterior = _get_test_posterior(device=self.device, n=1, dtype=dtype)
with self.assertRaises(RuntimeError):
sampler(posterior)
# check max_num_comparisons
sampler = PairwiseSobolQMCNormalSampler(
sample_shape=torch.Size([4]), max_num_comparisons=2
)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 2]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.sampling.index_sampler import IndexSampler
from botorch.utils.testing import BotorchTestCase
class TestIndexSampler(BotorchTestCase):
def test_index_sampler(self):
# Basic usage.
posterior = EnsemblePosterior(
values=torch.randn(torch.Size((50, 16, 1, 1))).to(self.device)
)
sampler = IndexSampler(sample_shape=torch.Size((128,)))
samples = sampler(posterior)
self.assertTrue(samples.shape == torch.Size((128, 50, 1, 1)))
self.assertTrue(sampler.base_samples.max() < 16)
self.assertTrue(sampler.base_samples.min() >= 0)
# check deterministic nature
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# test construct base samples
sampler = IndexSampler(sample_shape=torch.Size((4, 128)), seed=42)
self.assertTrue(sampler.base_samples is None)
sampler._construct_base_samples(posterior=posterior)
self.assertTrue(sampler.base_samples.shape == torch.Size((4, 128)))
self.assertTrue(
sampler.base_samples.device.type
== posterior.device.type
== self.device.type
)
base_samples = sampler.base_samples
sampler = IndexSampler(sample_shape=torch.Size((4, 128)), seed=42)
sampler._construct_base_samples(posterior=posterior)
self.assertAllClose(base_samples, sampler.base_samples)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.normal import (
IIDNormalSampler,
NormalMCSampler,
SobolQMCNormalSampler,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultivariateNormal
from linear_operator.operators import DiagLinearOperator
def _get_test_posterior(device, dtype=torch.float):
mean = torch.zeros(2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
def _get_test_posterior_batched(device, dtype=torch.float):
mean = torch.zeros(3, 2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype).repeat(3, 1, 1)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
class TestNormalMCSampler(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
NormalMCSampler(sample_shape=torch.Size([4]))
class TestIIDNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = IIDNormalSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=new_dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works with a different batch_range
sampler.batch_range_override = (-3, -1)
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
class TestSobolQMCNormalSampler(BotorchTestCase):
def test_forward(self):
for dtype in (torch.float, torch.double):
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.seed, 1234)
# check samples non-batched
posterior = _get_test_posterior(device=self.device, dtype=dtype)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
# ensure samples are the same
samples2 = sampler(posterior)
self.assertAllClose(samples, samples2)
# ensure this works with a differently shaped posterior
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works when changing the dtype
new_dtype = torch.float if dtype == torch.double else torch.double
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=new_dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
# ensure this works with a different batch_range
sampler.batch_range_override = (-3, -1)
posterior_batched = _get_test_posterior_batched(
device=self.device, dtype=dtype
)
samples_batched = sampler(posterior_batched)
self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
def test_unsupported_dimension(self):
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
maxdim = torch.quasirandom.SobolEngine.MAXDIM + 1
mean = torch.zeros(maxdim)
cov = DiagLinearOperator(torch.ones(maxdim))
mvn = MultivariateNormal(mean, cov)
posterior = GPyTorchPosterior(mvn)
with self.assertRaises(UnsupportedError) as e:
sampler(posterior)
self.assertIn(f"Requested: {maxdim}", str(e.exception))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import InputDataError
from botorch.sampling.base import MCSampler
from botorch.utils.testing import BotorchTestCase, MockPosterior
class NonAbstractSampler(MCSampler):
def forward(self, posterior):
raise NotImplementedError
class OtherSampler(MCSampler):
def forward(self, posterior):
raise NotImplementedError
class TestBaseMCSampler(BotorchTestCase):
def test_MCSampler_abstract_raises(self):
with self.assertRaises(TypeError):
MCSampler()
def test_init(self):
with self.assertRaises(TypeError):
NonAbstractSampler()
# Current args.
sampler = NonAbstractSampler(sample_shape=torch.Size([4]), seed=1234)
self.assertEqual(sampler.sample_shape, torch.Size([4]))
self.assertEqual(sampler.seed, 1234)
self.assertIsNone(sampler.base_samples)
# Default seed.
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
self.assertIsInstance(sampler.seed, int)
# Deprecated args & error handling.
with self.assertWarnsRegex(DeprecationWarning, "positional argument"):
NonAbstractSampler(4)
with self.assertRaisesRegex(InputDataError, "sample_shape"):
NonAbstractSampler(4.5)
with self.assertWarnsRegex(DeprecationWarning, "resample"):
NonAbstractSampler(sample_shape=torch.Size([4]), resample=False)
with self.assertRaisesRegex(RuntimeError, "StochasticSampler"):
NonAbstractSampler(sample_shape=torch.Size([4]), resample=True)
with self.assertWarnsRegex(DeprecationWarning, "collapse_batch"):
NonAbstractSampler(sample_shape=torch.Size([4]), collapse_batch_dims=True)
with self.assertRaisesRegex(RuntimeError, "ForkedRNGSampler"):
NonAbstractSampler(sample_shape=torch.Size([4]), collapse_batch_dims=False)
with self.assertRaisesRegex(RuntimeError, "unknown argument"):
NonAbstractSampler(sample_shape=torch.Size([4]), dummy_arg=True)
def test_batch_range(self):
posterior = MockPosterior()
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
# Default: read from the posterior.
self.assertEqual(
sampler._get_batch_range(posterior=posterior), posterior.batch_range
)
# Overwrite.
sampler.batch_range_override = (0, -5)
self.assertEqual(sampler._get_batch_range(posterior=posterior), (0, -5))
def test_get_collapsed_shape(self):
posterior = MockPosterior(base_shape=torch.Size([4, 3, 2]))
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
self.assertEqual(
sampler._get_collapsed_shape(posterior=posterior), torch.Size([4, 1, 3, 2])
)
posterior = MockPosterior(
base_shape=torch.Size([3, 4, 3, 2]), batch_range=(0, 0)
)
self.assertEqual(
sampler._get_collapsed_shape(posterior=posterior),
torch.Size([4, 3, 4, 3, 2]),
)
posterior = MockPosterior(
base_shape=torch.Size([3, 4, 3, 2]), batch_range=(0, -1)
)
self.assertEqual(
sampler._get_collapsed_shape(posterior=posterior),
torch.Size([4, 1, 1, 1, 2]),
)
def test_get_extended_base_sample_shape(self):
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
posterior = MockPosterior(base_shape=torch.Size([3, 2]))
self.assertEqual(
sampler._get_extended_base_sample_shape(posterior=posterior),
torch.Size([4, 3, 2]),
)
posterior = MockPosterior(base_shape=torch.Size([3, 5, 3, 2]))
bss = sampler._get_extended_base_sample_shape(posterior=posterior)
self.assertEqual(bss, torch.Size([4, 3, 5, 3, 2]))
def test_update_base_samples(self):
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
with self.assertRaisesRegex(NotImplementedError, "update_base"):
sampler._update_base_samples(
posterior=MockPosterior(), base_sampler=sampler
)
def test_instance_check(self):
sampler = NonAbstractSampler(sample_shape=torch.Size([4]))
# Same type:
sampler._instance_check(sampler)
# Different type:
other = OtherSampler(sample_shape=torch.Size([4]))
with self.assertRaisesRegex(RuntimeError, "an instance of"):
sampler._instance_check(base_sampler=other)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import math
import numpy as np
import torch
from botorch.sampling.qmc import MultivariateNormalQMCEngine, NormalQMCEngine
from botorch.utils.testing import BotorchTestCase
from scipy.stats import shapiro
class NormalQMCTests(BotorchTestCase):
def test_NormalQMCEngine(self):
for d in (1, 2):
engine = NormalQMCEngine(d=d)
samples = engine.draw()
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
# test double dtype
samples = engine.draw(dtype=torch.double)
self.assertEqual(samples.dtype, torch.double)
self.assertEqual(samples.shape, torch.Size([1, d]))
def test_NormalQMCEngineInvTransform(self):
for d in (1, 2):
engine = NormalQMCEngine(d=d, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
# test double dtype
samples = engine.draw(dtype=torch.double)
self.assertEqual(samples.dtype, torch.double)
self.assertEqual(samples.shape, torch.Size([1, d]))
def test_NormalQMCEngineSeeded(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([2, 2]))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.shape, torch.Size([2, 3]))
def test_NormalQMCEngineSeededOut(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345)
out = torch.zeros(2, 2)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345)
out = torch.empty(2, 3)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
def test_NormalQMCEngineSeededInvTransform(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([2, 2]))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345, inv_transform=True)
samples = engine.draw(n=2)
self.assertEqual(samples.shape, torch.Size([2, 3]))
def test_NormalQMCEngineShapiro(self):
engine = NormalQMCEngine(d=2, seed=12345)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
def test_NormalQMCEngineShapiroInvTransform(self):
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
class MultivariateNormalQMCTests(BotorchTestCase):
def test_MultivariateNormalQMCEngineShapeErrors(self):
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=torch.zeros(2), cov=torch.zeros(2, 1))
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=torch.zeros(1), cov=torch.eye(2))
def test_MultivariateNormalQMCEngineNonPSD(self):
for dtype in (torch.float, torch.double):
# try with non-psd, non-pd cov and expect an assertion error
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.tensor([[1, 2], [2, 1]], device=self.device, dtype=dtype)
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=mean, cov=cov)
def test_MultivariateNormalQMCEngineNonPD(self):
for dtype in (torch.float, torch.double):
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=self.device, dtype=dtype
)
# try with non-pd but psd cov; should work
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
self.assertTrue(engine._corr_matrix is not None)
def test_MultivariateNormalQMCEngineSymmetric(self):
for dtype in (torch.float, torch.double):
# try with non-symmetric cov and expect an error
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.tensor([[1, 0], [2, 1]], device=self.device, dtype=dtype)
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=mean, cov=cov)
def test_MultivariateNormalQMCEngine(self):
for d, dtype in itertools.product((1, 2, 3), (torch.float, torch.double)):
mean = torch.rand(d, device=self.device, dtype=dtype)
cov = torch.eye(d, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
def test_MultivariateNormalQMCEngineInvTransform(self):
for d, dtype in itertools.product((1, 2, 3), (torch.float, torch.double)):
mean = torch.rand(d, device=self.device, dtype=dtype)
cov = torch.eye(d, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.shape, torch.Size([1, d]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, d]))
def test_MultivariateNormalQMCEngineSeeded(self):
for dtype in (torch.float, torch.double):
# test even dimension
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
# test odd dimension
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
def test_MultivariateNormalQMCEngineSeededOut(self):
for dtype in (torch.float, torch.double):
# test even dimension
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
out = torch.zeros(2, 2, device=self.device, dtype=dtype)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
# test odd dimension
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
out = torch.zeros(2, 3, device=self.device, dtype=dtype)
self.assertIsNone(engine.draw(n=2, out=out))
self.assertTrue(torch.all(out != 0))
def test_MultivariateNormalQMCEngineSeededInvTransform(self):
for dtype in (torch.float, torch.double):
# test even dimension
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
# test odd dimension
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = cov.to(device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
def test_MultivariateNormalQMCEngineShapiro(self):
for dtype in (torch.float, torch.double):
# test the standard case
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.eye(2, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
# test the correlated, non-zero mean case
mean = torch.tensor([1.0, 2.0], device=self.device, dtype=dtype)
cov = torch.tensor(
[[1.5, 0.5], [0.5, 1.5]], device=self.device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0) - mean) < 1e-2))
self.assertTrue(
torch.all(torch.abs(samples.std(dim=0) - math.sqrt(1.5)) < 1e-2)
)
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# check covariance
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1] - 0.5), 1e-2)
def test_MultivariateNormalQMCEngineShapiroInvTransform(self):
for dtype in (torch.float, torch.double):
# test the standard case
mean = torch.zeros(2, device=self.device, dtype=dtype)
cov = torch.eye(2, device=self.device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
# test the correlated, non-zero mean case
mean = torch.tensor([1.0, 2.0], device=self.device, dtype=dtype)
cov = torch.tensor(
[[1.5, 0.5], [0.5, 1.5]], device=self.device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=256)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0) - mean) < 1e-2))
self.assertTrue(
torch.all(torch.abs(samples.std(dim=0) - math.sqrt(1.5)) < 1e-2)
)
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# check covariance
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1] - 0.5), 1e-2)
def test_MultivariateNormalQMCEngineDegenerate(self):
for dtype in (torch.float, torch.double):
# X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
mean = torch.zeros(3, device=self.device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=self.device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=4096)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, self.device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.abs(torch.std(samples[:, 0]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 1]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 2]) - math.sqrt(2)) < 1e-2)
for i in (0, 1, 2):
_, pval = shapiro(samples[:, i].cpu().numpy())
self.assertGreater(pval, 0.9)
cov = np.cov(samples.cpu().numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
self.assertLess(np.abs(cov[0, 2] - 1), 1e-2)
# check to see if X + Y = Z almost exactly
self.assertTrue(
torch.all(
torch.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) < 1e-5
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import patch
import torch
from botorch.models import SingleTaskGP, SingleTaskVariationalGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise.utils import (
get_input_transform,
get_output_transform,
get_train_inputs,
get_train_targets,
InverseLengthscaleTransform,
OutcomeUntransformer,
)
from botorch.utils.context_managers import delattr_ctx
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
class TestTransforms(BotorchTestCase):
def test_inverse_lengthscale_transform(self):
tkwargs = {"device": self.device, "dtype": torch.float64}
kernel = MaternKernel(nu=2.5, ard_num_dims=3).to(**tkwargs)
with self.assertRaisesRegex(RuntimeError, "does not implement `lengthscale`"):
InverseLengthscaleTransform(ScaleKernel(kernel))
x = torch.rand(3, 3, **tkwargs)
transform = InverseLengthscaleTransform(kernel)
self.assertTrue(transform(x).equal(kernel.lengthscale.reciprocal() * x))
def test_outcome_untransformer(self):
for untransformer in (
OutcomeUntransformer(transform=Standardize(m=1), num_outputs=1),
OutcomeUntransformer(transform=Standardize(m=2), num_outputs=2),
):
with torch.random.fork_rng():
torch.random.manual_seed(0)
y = torch.rand(untransformer.num_outputs, 4, device=self.device)
x = untransformer.transform(y.T)[0].T
self.assertTrue(y.allclose(untransformer(x)))
class TestGetters(BotorchTestCase):
def setUp(self):
super().setUp()
with torch.random.fork_rng():
torch.random.manual_seed(0)
train_X = torch.rand(5, 2)
train_Y = torch.randn(5, 2)
self.models = []
for num_outputs in (1, 2):
self.models.append(
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, :num_outputs],
input_transform=Normalize(d=2),
outcome_transform=Standardize(m=num_outputs),
)
)
self.models.append(
SingleTaskVariationalGP(
train_X=train_X,
train_Y=train_Y[:, :num_outputs],
input_transform=Normalize(d=2),
outcome_transform=Standardize(m=num_outputs),
)
)
def test_get_input_transform(self):
for model in self.models:
self.assertIs(get_input_transform(model), model.input_transform)
def test_get_output_transform(self):
for model in self.models:
transform = get_output_transform(model)
self.assertIsInstance(transform, OutcomeUntransformer)
self.assertIs(transform.transform, model.outcome_transform)
def test_get_train_inputs(self):
for model in self.models:
model.train()
X = (
model.model.train_inputs[0]
if isinstance(model, SingleTaskVariationalGP)
else model.train_inputs[0]
)
Z = model.input_transform(X)
train_inputs = get_train_inputs(model, transformed=False)
self.assertIsInstance(train_inputs, tuple)
self.assertEqual(len(train_inputs), 1)
self.assertTrue(X.equal(get_train_inputs(model, transformed=False)[0]))
self.assertTrue(Z.equal(get_train_inputs(model, transformed=True)[0]))
model.eval()
self.assertTrue(X.equal(get_train_inputs(model, transformed=False)[0]))
self.assertTrue(Z.equal(get_train_inputs(model, transformed=True)[0]))
with delattr_ctx(model, "input_transform"), patch.object(
model, "_original_train_inputs", new=None
):
self.assertTrue(Z.equal(get_train_inputs(model, transformed=False)[0]))
self.assertTrue(Z.equal(get_train_inputs(model, transformed=True)[0]))
with self.subTest("test_model_list"):
model_list = ModelListGP(*self.models)
input_list = get_train_inputs(model_list)
self.assertIsInstance(input_list, list)
self.assertEqual(len(input_list), len(self.models))
for model, train_inputs in zip(model_list.models, input_list):
for a, b in zip(train_inputs, get_train_inputs(model)):
self.assertTrue(a.equal(b))
def test_get_train_targets(self):
for model in self.models:
model.train()
if isinstance(model, SingleTaskVariationalGP):
F = model.model.train_targets
Y = model.outcome_transform.untransform(F)[0].squeeze(dim=0)
else:
F = model.train_targets
Y = OutcomeUntransformer(model.outcome_transform, model.num_outputs)(F)
self.assertTrue(F.equal(get_train_targets(model, transformed=True)))
self.assertTrue(Y.equal(get_train_targets(model, transformed=False)))
model.eval()
self.assertTrue(F.equal(get_train_targets(model, transformed=True)))
self.assertTrue(Y.equal(get_train_targets(model, transformed=False)))
with delattr_ctx(model, "outcome_transform"):
self.assertTrue(F.equal(get_train_targets(model, transformed=True)))
self.assertTrue(F.equal(get_train_targets(model, transformed=False)))
with self.subTest("test_model_list"):
model_list = ModelListGP(*self.models)
target_list = get_train_targets(model_list)
self.assertIsInstance(target_list, list)
self.assertEqual(len(target_list), len(self.models))
for model, Y in zip(self.models, target_list):
self.assertTrue(Y.equal(get_train_targets(model)))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from itertools import product
import torch
from botorch.models import (
FixedNoiseGP,
ModelListGP,
SingleTaskGP,
SingleTaskVariationalGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise import draw_matheron_paths, MatheronPath, PathList
from botorch.sampling.pathwise.utils import get_train_inputs
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
from torch import Size
from torch.nn.functional import pad
from .helpers import get_sample_moments, standardize_moments
class TestPosteriorSamplers(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.models = defaultdict(list)
seed = 0
for kernel in (
ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([]))),
):
with torch.random.fork_rng():
torch.manual_seed(seed)
tkwargs = {"device": self.device, "dtype": torch.float64}
base = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
base.lengthscale = 0.1 + 0.3 * torch.rand_like(base.lengthscale)
kernel.to(**tkwargs)
uppers = 1 + 9 * torch.rand(base.lengthscale.shape[-1], **tkwargs)
bounds = pad(uppers.unsqueeze(0), (0, 0, 1, 0))
X = uppers * torch.rand(4, base.lengthscale.shape[-1], **tkwargs)
Y = 10 * kernel(X).cholesky() @ torch.randn(4, 1, **tkwargs)
if kernel.batch_shape:
Y = Y.squeeze(-1).transpose(0, 1) # n x m
input_transform = Normalize(d=X.shape[-1], bounds=bounds)
outcome_transform = Standardize(m=Y.shape[-1])
# SingleTaskGP in eval mode
self.models[SingleTaskGP].append(
SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=deepcopy(kernel),
input_transform=deepcopy(input_transform),
outcome_transform=deepcopy(outcome_transform),
)
.to(**tkwargs)
.eval()
)
# FixedNoiseGP in train mode
self.models[FixedNoiseGP].append(
FixedNoiseGP(
train_X=X,
train_Y=Y,
train_Yvar=0.01 * torch.rand_like(Y),
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
# SingleTaskVariationalGP in train mode
self.models[SingleTaskVariationalGP].append(
SingleTaskVariationalGP(
train_X=X,
train_Y=Y,
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
seed += 1
def test_draw_matheron_paths(self):
for seed, models in enumerate(self.models.values()):
for model, sample_shape in product(models, [Size([1024]), Size([32, 32])]):
with torch.random.fork_rng():
torch.random.manual_seed(seed)
paths = draw_matheron_paths(model=model, sample_shape=sample_shape)
self.assertIsInstance(paths, MatheronPath)
self._test_draw_matheron_paths(model, paths, sample_shape)
with self.subTest("test_model_list"):
model_list = ModelListGP(
self.models[SingleTaskGP][0], self.models[FixedNoiseGP][0]
)
path_list = draw_matheron_paths(model_list, sample_shape=sample_shape)
(train_X,) = get_train_inputs(model_list.models[0], transformed=False)
X = torch.zeros(
4, train_X.shape[-1], dtype=train_X.dtype, device=self.device
)
sample_list = path_list(X)
self.assertIsInstance(path_list, PathList)
self.assertIsInstance(sample_list, list)
self.assertEqual(len(sample_list), len(path_list.paths))
def _test_draw_matheron_paths(self, model, paths, sample_shape, atol=3):
(train_X,) = get_train_inputs(model, transformed=False)
X = torch.rand(16, train_X.shape[-1], dtype=train_X.dtype, device=self.device)
# Evaluate sample paths and compute sample statistics
samples = paths(X)
batch_shape = (
model.model.covar_module.batch_shape
if isinstance(model, SingleTaskVariationalGP)
else model.covar_module.batch_shape
)
self.assertEqual(samples.shape, sample_shape + batch_shape + X.shape[-2:-1])
sample_moments = get_sample_moments(samples, sample_shape)
if hasattr(model, "outcome_transform"):
# Do this instead of untransforming exact moments
sample_moments = standardize_moments(
model.outcome_transform, *sample_moments
)
if model.training:
model.eval()
mvn = model(model.transform_inputs(X))
model.train()
else:
mvn = model(model.transform_inputs(X))
exact_moments = (mvn.loc, mvn.covariance_matrix)
# Compare moments
num_features = paths["prior_paths"].weight.shape[-1]
tol = atol * (num_features**-0.5 + sample_shape.numel() ** -0.5)
for exact, estimate in zip(exact_moments, sample_moments):
self.assertTrue(exact.allclose(estimate, atol=tol, rtol=0))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.pathwise.paths import PathDict, PathList, SamplePath
from botorch.utils.testing import BotorchTestCase
from torch.nn import ModuleDict, ModuleList
class IdentityPath(SamplePath):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x
class TestGenericPaths(BotorchTestCase):
def test_path_dict(self):
with self.assertRaisesRegex(UnsupportedError, "must be preceded by a join"):
PathDict(output_transform="foo")
A = IdentityPath()
B = IdentityPath()
# Test __init__
module_dict = ModuleDict({"0": A, "1": B})
path_dict = PathDict(paths={"0": A, "1": B})
self.assertTrue(path_dict.paths is not module_dict)
path_dict = PathDict(paths=module_dict)
self.assertIs(path_dict.paths, module_dict)
# Test __call__
x = torch.rand(3, device=self.device)
output = path_dict(x)
self.assertIsInstance(output, dict)
self.assertTrue(x.equal(output.pop("0")))
self.assertTrue(x.equal(output.pop("1")))
self.assertTrue(not output)
path_dict.join = torch.stack
output = path_dict(x)
self.assertIsInstance(output, torch.Tensor)
self.assertEqual(output.shape, (2,) + x.shape)
self.assertTrue(output.eq(x).all())
# Test `dict`` methods
self.assertEqual(len(path_dict), 2)
for key, val, (key_0, val_0), (key_1, val_1), key_2 in zip(
path_dict,
path_dict.values(),
path_dict.items(),
path_dict.paths.items(),
path_dict.keys(),
):
self.assertEqual(1, len({key, key_0, key_1, key_2}))
self.assertEqual(1, len({val, val_0, val_1, path_dict[key]}))
path_dict["1"] = A # test __setitem__
self.assertIs(path_dict.paths["1"], A)
del path_dict["1"] # test __delitem__
self.assertEqual(("0",), tuple(path_dict))
def test_path_list(self):
with self.assertRaisesRegex(UnsupportedError, "must be preceded by a join"):
PathList(output_transform="foo")
# Test __init__
A = IdentityPath()
B = IdentityPath()
module_list = ModuleList((A, B))
path_list = PathList(paths=list(module_list))
self.assertTrue(path_list.paths is not module_list)
path_list = PathList(paths=module_list)
self.assertIs(path_list.paths, module_list)
# Test __call__
x = torch.rand(3, device=self.device)
output = path_list(x)
self.assertIsInstance(output, list)
self.assertTrue(x.equal(output.pop()))
self.assertTrue(x.equal(output.pop()))
self.assertTrue(not output)
path_list.join = torch.stack
output = path_list(x)
self.assertIsInstance(output, torch.Tensor)
self.assertEqual(output.shape, (2,) + x.shape)
self.assertTrue(output.eq(x).all())
# Test `list` methods
self.assertEqual(len(path_list), 2)
for key, (path, path_0) in enumerate(zip(path_list, path_list.paths)):
self.assertEqual(1, len({path, path_0, path_list[key]}))
path_list[1] = A # test __setitem__
self.assertIs(path_list.paths[1], A)
del path_list[1] # test __delitem__
self.assertEqual((A,), tuple(path_list))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Tuple
from botorch.models.transforms.outcome import Standardize
from torch import Size, Tensor
def get_sample_moments(samples: Tensor, sample_shape: Size) -> Tuple[Tensor, Tensor]:
sample_dim = len(sample_shape)
samples = samples.view(-1, *samples.shape[sample_dim:])
loc = samples.mean(dim=0)
residuals = (samples - loc).permute(*range(1, samples.ndim), 0)
return loc, (residuals @ residuals.transpose(-2, -1)) / sample_shape.numel()
def standardize_moments(
transform: Standardize,
loc: Tensor,
covariance_matrix: Tensor,
) -> Tuple[Tensor, Tensor]:
m = transform.means.squeeze().unsqueeze(-1)
s = transform.stdvs.squeeze().reciprocal().unsqueeze(-1)
loc = s * (loc - m)
correlation_matrix = s.unsqueeze(-1) * covariance_matrix * s.unsqueeze(-2)
return loc, correlation_matrix
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from itertools import product
from unittest.mock import MagicMock
import torch
from botorch.models import (
FixedNoiseGP,
ModelListGP,
SingleTaskGP,
SingleTaskVariationalGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise import (
draw_kernel_feature_paths,
GeneralizedLinearPath,
PathList,
)
from botorch.sampling.pathwise.utils import get_train_inputs
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from torch import Size
from torch.nn.functional import pad
from .helpers import get_sample_moments, standardize_moments
class TestPriorSamplers(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.models = defaultdict(list)
self.num_features = 1024
seed = 0
for kernel in (
MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([])),
ScaleKernel(RBFKernel(ard_num_dims=2, batch_shape=Size([2]))),
):
with torch.random.fork_rng():
torch.manual_seed(seed)
tkwargs = {"device": self.device, "dtype": torch.float64}
base = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
base.lengthscale = 0.1 + 0.3 * torch.rand_like(base.lengthscale)
kernel.to(**tkwargs)
uppers = 1 + 9 * torch.rand(base.lengthscale.shape[-1], **tkwargs)
bounds = pad(uppers.unsqueeze(0), (0, 0, 1, 0))
X = uppers * torch.rand(4, base.lengthscale.shape[-1], **tkwargs)
Y = 10 * kernel(X).cholesky() @ torch.randn(4, 1, **tkwargs)
if kernel.batch_shape:
Y = Y.squeeze(-1).transpose(0, 1) # n x m
input_transform = Normalize(d=X.shape[-1], bounds=bounds)
outcome_transform = Standardize(m=Y.shape[-1])
# SingleTaskGP in eval mode
self.models[SingleTaskGP].append(
SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=deepcopy(kernel),
input_transform=deepcopy(input_transform),
outcome_transform=deepcopy(outcome_transform),
)
.to(**tkwargs)
.eval()
)
# FixedNoiseGP in train mode
self.models[FixedNoiseGP].append(
FixedNoiseGP(
train_X=X,
train_Y=Y,
train_Yvar=0.01 * torch.rand_like(Y),
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
# SingleTaskVariationalGP in train mode
# When batched, uses a multitask format which break the tests below
if not kernel.batch_shape:
self.models[SingleTaskVariationalGP].append(
SingleTaskVariationalGP(
train_X=X,
train_Y=Y,
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
seed += 1
def test_draw_kernel_feature_paths(self):
for seed, models in enumerate(self.models.values()):
for model, sample_shape in product(models, [Size([1024]), Size([2, 512])]):
with torch.random.fork_rng():
torch.random.manual_seed(seed)
paths = draw_kernel_feature_paths(
model=model,
sample_shape=sample_shape,
num_features=self.num_features,
)
self.assertIsInstance(paths, GeneralizedLinearPath)
self._test_draw_kernel_feature_paths(model, paths, sample_shape)
with self.subTest("test_model_list"):
model_list = ModelListGP(
self.models[SingleTaskGP][0], self.models[FixedNoiseGP][0]
)
path_list = draw_kernel_feature_paths(
model=model_list,
sample_shape=sample_shape,
num_features=self.num_features,
)
(train_X,) = get_train_inputs(model_list.models[0], transformed=False)
X = torch.zeros(
4, train_X.shape[-1], dtype=train_X.dtype, device=self.device
)
sample_list = path_list(X)
self.assertIsInstance(path_list, PathList)
self.assertIsInstance(sample_list, list)
self.assertEqual(len(sample_list), len(path_list.paths))
with self.subTest("test_initialization"):
model = self.models[SingleTaskGP][0]
sample_shape = torch.Size([16])
expected_weight_shape = (
sample_shape + model.covar_module.batch_shape + (self.num_features,)
)
weight_generator = MagicMock(
side_effect=lambda _: torch.rand(expected_weight_shape)
)
draw_kernel_feature_paths(
model=model,
sample_shape=sample_shape,
num_features=self.num_features,
weight_generator=weight_generator,
)
weight_generator.assert_called_once_with(expected_weight_shape)
def _test_draw_kernel_feature_paths(self, model, paths, sample_shape, atol=3):
(train_X,) = get_train_inputs(model, transformed=False)
X = torch.rand(16, train_X.shape[-1], dtype=train_X.dtype, device=self.device)
# Evaluate sample paths
samples = paths(X)
batch_shape = (
model.model.covar_module.batch_shape
if isinstance(model, SingleTaskVariationalGP)
else model.covar_module.batch_shape
)
self.assertEqual(samples.shape, sample_shape + batch_shape + X.shape[-2:-1])
# Calculate sample statistics
sample_moments = get_sample_moments(samples, sample_shape)
if hasattr(model, "outcome_transform"):
# Do this instead of untransforming exact moments
sample_moments = standardize_moments(
model.outcome_transform, *sample_moments
)
# Compute prior distribution
prior = model.forward(X if model.training else model.input_transform(X))
exact_moments = (prior.loc, prior.covariance_matrix)
# Compare moments
tol = atol * (paths.weight.shape[-1] ** -0.5 + sample_shape.numel() ** -0.5)
for exact, estimate in zip(exact_moments, sample_moments):
self.assertTrue(exact.allclose(estimate, atol=tol, rtol=0))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from itertools import chain
from unittest.mock import patch
import torch
from botorch.models import FixedNoiseGP, SingleTaskGP, SingleTaskVariationalGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.sampling.pathwise import (
draw_kernel_feature_paths,
gaussian_update,
GeneralizedLinearPath,
KernelEvaluationMap,
)
from botorch.sampling.pathwise.utils import get_train_inputs, get_train_targets
from botorch.utils.context_managers import delattr_ctx
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import BernoulliLikelihood
from linear_operator.operators import ZeroLinearOperator
from linear_operator.utils.cholesky import psd_safe_cholesky
from torch import Size
from torch.nn.functional import pad
class TestPathwiseUpdates(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.models = defaultdict(list)
seed = 0
for kernel in (
RBFKernel(ard_num_dims=2),
ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([2]))),
):
with torch.random.fork_rng():
torch.manual_seed(seed)
tkwargs = {"device": self.device, "dtype": torch.float64}
base = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
base.lengthscale = 0.1 + 0.3 * torch.rand_like(base.lengthscale)
kernel.to(**tkwargs)
uppers = 1 + 9 * torch.rand(base.lengthscale.shape[-1], **tkwargs)
bounds = pad(uppers.unsqueeze(0), (0, 0, 1, 0))
X = uppers * torch.rand(4, base.lengthscale.shape[-1], **tkwargs)
Y = 10 * kernel(X).cholesky() @ torch.randn(4, 1, **tkwargs)
if kernel.batch_shape:
Y = Y.squeeze(-1).transpose(0, 1) # n x m
input_transform = Normalize(d=X.shape[-1], bounds=bounds)
outcome_transform = Standardize(m=Y.shape[-1])
# SingleTaskGP in eval mode
self.models[SingleTaskGP].append(
SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=deepcopy(kernel),
input_transform=deepcopy(input_transform),
outcome_transform=deepcopy(outcome_transform),
)
.to(**tkwargs)
.eval()
)
# FixedNoiseGP in train mode
self.models[FixedNoiseGP].append(
FixedNoiseGP(
train_X=X,
train_Y=Y,
train_Yvar=0.01 * torch.rand_like(Y),
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
# SingleTaskVariationalGP in train mode
# When batched, uses a multitask format which break the tests below
if not kernel.batch_shape:
self.models[SingleTaskVariationalGP].append(
SingleTaskVariationalGP(
train_X=X,
train_Y=Y,
covar_module=kernel,
input_transform=input_transform,
outcome_transform=outcome_transform,
).to(**tkwargs)
)
seed += 1
def test_gaussian_updates(self):
for seed, model in enumerate(chain.from_iterable(self.models.values())):
with torch.random.fork_rng():
torch.manual_seed(seed)
self._test_gaussian_updates(model)
def _test_gaussian_updates(self, model):
sample_shape = torch.Size([3])
# Extract exact conditions and precompute covariances
if isinstance(model, SingleTaskVariationalGP):
Z = model.model.variational_strategy.inducing_points
X = (
Z
if model.input_transform is None
else model.input_transform.untransform(Z)
)
U = torch.randn(len(Z), device=Z.device, dtype=Z.dtype)
Kuu = Kmm = model.model.covar_module(Z)
noise_values = None
else:
(X,) = get_train_inputs(model, transformed=False)
(Z,) = get_train_inputs(model, transformed=True)
U = get_train_targets(model, transformed=True)
Kmm = model.forward(X if model.training else Z).lazy_covariance_matrix
Kuu = Kmm + model.likelihood.noise_covar(shape=Z.shape[:-1])
noise_values = torch.randn(
*sample_shape, *U.shape, device=U.device, dtype=U.dtype
)
# Disable sampling of noise variables `e` used to obtain `y = f + e`
with delattr_ctx(model, "outcome_transform"), patch.object(
torch,
"randn_like",
return_value=noise_values,
):
prior_paths = draw_kernel_feature_paths(model, sample_shape=sample_shape)
sample_values = prior_paths(X)
update_paths = gaussian_update(
model=model,
sample_values=sample_values,
target_values=U,
)
# Test initialization
self.assertIsInstance(update_paths, GeneralizedLinearPath)
self.assertIsInstance(update_paths.feature_map, KernelEvaluationMap)
self.assertTrue(update_paths.feature_map.points.equal(Z))
self.assertIs(
update_paths.feature_map.input_transform,
getattr(model, "input_transform", None),
)
# Compare with manually computed update weights `Cov(y, y)^{-1} (y - f - e)`
Luu = psd_safe_cholesky(Kuu.to_dense())
errors = U - sample_values
if noise_values is not None:
errors -= (
model.likelihood.noise_covar(shape=Z.shape[:-1]).cholesky()
@ noise_values.unsqueeze(-1)
).squeeze(-1)
weight = torch.cholesky_solve(errors.unsqueeze(-1), Luu).squeeze(-1)
self.assertTrue(weight.allclose(update_paths.weight))
# Compare with manually computed update values at test locations
Z2 = torch.rand(16, Z.shape[-1], device=self.device, dtype=Z.dtype)
X2 = (
model.input_transform.untransform(Z2)
if hasattr(model, "input_transform")
else Z2
)
features = update_paths.feature_map(X2)
expected_updates = (features @ update_paths.weight.unsqueeze(-1)).squeeze(-1)
actual_updates = update_paths(X2)
self.assertTrue(actual_updates.allclose(expected_updates))
# Test passing `noise_covariance`
m = Z.shape[-2]
update_paths = gaussian_update(
model=model,
sample_values=sample_values,
target_values=U,
noise_covariance=ZeroLinearOperator(m, m, dtype=X.dtype),
)
Lmm = psd_safe_cholesky(Kmm.to_dense())
errors = U - sample_values
weight = torch.cholesky_solve(errors.unsqueeze(-1), Lmm).squeeze(-1)
self.assertTrue(weight.allclose(update_paths.weight))
if isinstance(model, SingleTaskVariationalGP):
# Test passing non-zero `noise_covariance``
with patch.object(model, "likelihood", new=BernoulliLikelihood()):
with self.assertRaisesRegex(NotImplementedError, "not yet supported"):
gaussian_update(
model=model,
sample_values=sample_values,
noise_covariance="foo",
)
else:
# Test exact models with non-Gaussian likelihoods
with patch.object(model, "likelihood", new=BernoulliLikelihood()):
with self.assertRaises(NotImplementedError):
gaussian_update(model=model, sample_values=sample_values)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import MagicMock, patch
import torch
from botorch.sampling.pathwise.features import KernelEvaluationMap, KernelFeatureMap
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel
from torch import Size
class TestFeatureMaps(BotorchTestCase):
def test_kernel_evaluation_map(self):
kernel = MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([2]))
kernel.to(device=self.device)
with torch.random.fork_rng():
torch.manual_seed(0)
kernel.lengthscale = 0.1 + 0.3 * torch.rand_like(kernel.lengthscale)
with self.assertRaisesRegex(RuntimeError, "Shape mismatch"):
KernelEvaluationMap(kernel=kernel, points=torch.rand(4, 3, 2))
for dtype in (torch.float32, torch.float64):
kernel.to(dtype=dtype)
X0, X1 = torch.rand(5, 2, dtype=dtype, device=self.device).split([2, 3])
kernel_map = KernelEvaluationMap(kernel=kernel, points=X1)
self.assertEqual(kernel_map.batch_shape, kernel.batch_shape)
self.assertEqual(kernel_map.num_outputs, X1.shape[-1])
self.assertTrue(kernel_map(X0).to_dense().equal(kernel(X0, X1).to_dense()))
with patch.object(
kernel_map, "output_transform", new=lambda z: torch.concat([z, z], dim=-1)
):
self.assertEqual(kernel_map.num_outputs, 2 * X1.shape[-1])
def test_kernel_feature_map(self):
d = 2
m = 3
weight = torch.rand(m, d, device=self.device)
bias = torch.rand(m, device=self.device)
kernel = MaternKernel(nu=2.5, batch_shape=Size([3])).to(self.device)
feature_map = KernelFeatureMap(
kernel=kernel,
weight=weight,
bias=bias,
input_transform=MagicMock(side_effect=lambda x: x),
output_transform=MagicMock(side_effect=lambda z: z.exp()),
)
X = torch.rand(2, d, device=self.device)
features = feature_map(X)
feature_map.input_transform.assert_called_once_with(X)
feature_map.output_transform.assert_called_once()
self.assertTrue((X @ weight.transpose(-2, -1) + bias).exp().equal(features))
# Test batch_shape and num_outputs
self.assertIs(feature_map.batch_shape, kernel.batch_shape)
self.assertEqual(feature_map.num_outputs, weight.shape[-2])
with patch.object(feature_map, "output_transform", new=None):
self.assertEqual(feature_map.num_outputs, weight.shape[-2])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from math import ceil
from unittest.mock import patch
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.pathwise.features import generators
from botorch.sampling.pathwise.features.generators import gen_kernel_features
from botorch.sampling.pathwise.features.maps import FeatureMap
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from gpytorch.kernels.kernel import Kernel
from torch import Size, Tensor
class TestFeatureGenerators(BotorchTestCase):
def setUp(self, seed: int = 0) -> None:
super().setUp()
self.kernels = []
self.num_inputs = d = 2
self.num_features = 4096
for kernel in (
MaternKernel(nu=0.5, batch_shape=Size([])),
MaternKernel(nu=1.5, ard_num_dims=1, active_dims=[0]),
ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=d, batch_shape=Size([2]))),
ScaleKernel(
RBFKernel(ard_num_dims=1, batch_shape=Size([2, 2])), active_dims=[1]
),
):
kernel.to(
dtype=torch.float32 if (seed % 2) else torch.float64, device=self.device
)
with torch.random.fork_rng():
torch.manual_seed(seed)
kern = kernel.base_kernel if isinstance(kernel, ScaleKernel) else kernel
kern.lengthscale = 0.1 + 0.2 * torch.rand_like(kern.lengthscale)
seed += 1
self.kernels.append(kernel)
def test_gen_kernel_features(self):
for seed, kernel in enumerate(self.kernels):
with torch.random.fork_rng():
torch.random.manual_seed(seed)
feature_map = gen_kernel_features(
kernel=kernel,
num_inputs=self.num_inputs,
num_outputs=self.num_features,
)
n = 4
m = ceil(n * kernel.batch_shape.numel() ** -0.5)
for input_batch_shape in ((n**2,), (m, *kernel.batch_shape, m)):
X = torch.rand(
(*input_batch_shape, self.num_inputs),
device=kernel.device,
dtype=kernel.dtype,
)
self._test_gen_kernel_features(kernel, feature_map, X)
def _test_gen_kernel_features(
self, kernel: Kernel, feature_map: FeatureMap, X: Tensor, atol: float = 3.0
):
with self.subTest("test_initialization"):
self.assertEqual(feature_map.weight.dtype, kernel.dtype)
self.assertEqual(feature_map.weight.device, kernel.device)
self.assertEqual(
feature_map.weight.shape[-1],
self.num_inputs
if kernel.active_dims is None
else len(kernel.active_dims),
)
with self.subTest("test_covariance"):
features = feature_map(X)
test_shape = torch.broadcast_shapes(
(*X.shape[:-1], self.num_features), kernel.batch_shape + (1, 1)
)
self.assertEqual(features.shape, test_shape)
K0 = features @ features.transpose(-2, -1)
K1 = kernel(X).to_dense()
self.assertTrue(
K0.allclose(K1, atol=atol * self.num_features**-0.5, rtol=0)
)
# Test passing the wrong dimensional shape to `weight_generator`
with self.assertRaisesRegex(UnsupportedError, "2-dim"), patch.object(
generators,
"_gen_fourier_features",
side_effect=lambda **kwargs: kwargs["weight_generator"](Size([])),
):
gen_kernel_features(
kernel=kernel,
num_inputs=self.num_inputs,
num_outputs=self.num_features,
)
# Test requesting an odd number of features
with self.assertRaisesRegex(UnsupportedError, "Expected an even number"):
gen_kernel_features(
kernel=kernel, num_inputs=self.num_inputs, num_outputs=3
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import numpy as np
import torch
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
from botorch.optim.parameter_constraints import (
_arrayify,
_generate_unfixed_lin_constraints,
_generate_unfixed_nonlin_constraints,
_make_linear_constraints,
eval_lin_constraint,
lin_constraint_jac,
make_scipy_bounds,
make_scipy_linear_constraints,
)
from botorch.utils.testing import BotorchTestCase
from scipy.optimize import Bounds
class TestParameterConstraints(BotorchTestCase):
def test_arrayify(self):
for dtype in (torch.float, torch.double, torch.int, torch.long):
t = torch.tensor([[1, 2], [3, 4]], device=self.device).type(dtype)
t_np = _arrayify(t)
self.assertIsInstance(t_np, np.ndarray)
self.assertTrue(t_np.dtype == np.float64)
def test_eval_lin_constraint(self):
res = eval_lin_constraint(
flat_idxr=[0, 2],
coeffs=np.array([1.0, -2.0]),
rhs=0.5,
x=np.array([1.0, 2.0, 3.0]),
)
self.assertEqual(res, -5.5)
def test_lin_constraint_jac(self):
dummy_array = np.array([1.0])
res = lin_constraint_jac(
dummy_array, flat_idxr=[0, 2], coeffs=np.array([1.0, -2.0]), n=3
)
self.assertTrue(all(np.equal(res, np.array([1.0, 0.0, -2.0]))))
def test_make_linear_constraints(self):
# equality constraints, 1d indices
indices = torch.tensor([1, 2], dtype=torch.long, device=self.device)
for dtype, shapeX in product(
(torch.float, torch.double), (torch.Size([3, 2, 4]), torch.Size([2, 4]))
):
coefficients = torch.tensor([1.0, 2.0], dtype=dtype, device=self.device)
constraints = _make_linear_constraints(
indices=indices,
coefficients=coefficients,
rhs=1.0,
shapeX=shapeX,
eq=True,
)
self.assertTrue(
all(set(c.keys()) == {"fun", "jac", "type"} for c in constraints)
)
self.assertTrue(all(c["type"] == "eq" for c in constraints))
self.assertEqual(len(constraints), shapeX[:-1].numel())
x = np.random.rand(shapeX.numel())
self.assertEqual(constraints[0]["fun"](x), x[1] + 2 * x[2] - 1.0)
jac_exp = np.zeros(shapeX.numel())
jac_exp[[1, 2]] = [1, 2]
self.assertTrue(np.allclose(constraints[0]["jac"](x), jac_exp))
self.assertEqual(constraints[-1]["fun"](x), x[-3] + 2 * x[-2] - 1.0)
jac_exp = np.zeros(shapeX.numel())
jac_exp[[-3, -2]] = [1, 2]
self.assertTrue(np.allclose(constraints[-1]["jac"](x), jac_exp))
# inequality constraints, 1d indices
for shapeX in [torch.Size([1, 1, 2]), torch.Size([1, 2])]:
lcs = _make_linear_constraints(
indices=torch.tensor([1]),
coefficients=torch.tensor([1.0]),
rhs=1.0,
shapeX=shapeX,
eq=False,
)
self.assertEqual(len(lcs), 1)
self.assertEqual(lcs[0]["type"], "ineq")
# constraint across q-batch (2d indics), equality constraint
indices = torch.tensor([[0, 3], [1, 2]], dtype=torch.long, device=self.device)
for dtype, shapeX in product(
(torch.float, torch.double), (torch.Size([3, 2, 4]), torch.Size([2, 4]))
):
q, d = shapeX[-2:]
b = 1 if len(shapeX) == 2 else shapeX[0]
coefficients = torch.tensor([1.0, 2.0], dtype=dtype, device=self.device)
constraints = _make_linear_constraints(
indices=indices,
coefficients=coefficients,
rhs=1.0,
shapeX=shapeX,
eq=True,
)
self.assertTrue(
all(set(c.keys()) == {"fun", "jac", "type"} for c in constraints)
)
self.assertTrue(all(c["type"] == "eq" for c in constraints))
self.assertEqual(len(constraints), b)
x = np.random.rand(shapeX.numel())
offsets = [q * d, d]
# rule is [i, j, k] is i * offset[0] + j * offset[1] + k
for i in range(b):
pos1 = i * offsets[0] + 3
pos2 = i * offsets[0] + 1 * offsets[1] + 2
self.assertEqual(constraints[i]["fun"](x), x[pos1] + 2 * x[pos2] - 1.0)
jac_exp = np.zeros(shapeX.numel())
jac_exp[[pos1, pos2]] = [1, 2]
self.assertTrue(np.allclose(constraints[i]["jac"](x), jac_exp))
# make sure error is raised for scalar tensors
with self.assertRaises(ValueError):
constraints = _make_linear_constraints(
indices=torch.tensor(0),
coefficients=torch.tensor([1.0]),
rhs=1.0,
shapeX=torch.Size([1, 1, 2]),
eq=False,
)
# test that len(shapeX) < 2 raises an error
with self.assertRaises(UnsupportedError):
_make_linear_constraints(
shapeX=torch.Size([2]),
indices=indices,
coefficients=coefficients,
rhs=0.0,
)
def test_make_scipy_linear_constraints(self):
for shapeX in [torch.Size([2, 1, 4]), torch.Size([1, 4])]:
b = shapeX[0] if len(shapeX) == 3 else 1
res = make_scipy_linear_constraints(
shapeX=shapeX, inequality_constraints=None, equality_constraints=None
)
self.assertEqual(res, [])
indices = torch.tensor([0, 1], dtype=torch.long, device=self.device)
coefficients = torch.tensor([1.5, -1.0], device=self.device)
# both inequality and equality constraints
cs = make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
self.assertEqual(len(cs), 2 * b)
self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
# inequality only
cs = make_scipy_linear_constraints(
shapeX=shapeX, inequality_constraints=[(indices, coefficients, 1.0)]
)
self.assertEqual(len(cs), b)
self.assertTrue(all(c["type"] == "ineq" for c in cs))
# equality only
cs = make_scipy_linear_constraints(
shapeX=shapeX, equality_constraints=[(indices, coefficients, 1.0)]
)
self.assertEqual(len(cs), b)
self.assertTrue(all(c["type"] == "eq" for c in cs))
# test that 2-dim indices work properly
indices = indices.unsqueeze(0)
cs = make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
self.assertEqual(len(cs), 2 * b)
self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
def test_make_scipy_linear_constraints_unsupported(self):
shapeX = torch.Size([2, 1, 4])
coefficients = torch.tensor([1.5, -1.0], device=self.device)
# test that >2-dim indices raises an UnsupportedError
indices = torch.tensor([0, 1], dtype=torch.long, device=self.device)
indices = indices.unsqueeze(0).unsqueeze(0)
with self.assertRaises(UnsupportedError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
# test that out of bounds index raises an error
indices = torch.tensor([0, 4], dtype=torch.long, device=self.device)
with self.assertRaises(RuntimeError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
# test that two-d index out-of-bounds raises an error
# q out of bounds
indices = torch.tensor([[0, 0], [1, 0]], dtype=torch.long, device=self.device)
with self.assertRaises(RuntimeError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
# d out of bounds
indices = torch.tensor([[0, 0], [0, 4]], dtype=torch.long, device=self.device)
with self.assertRaises(RuntimeError):
make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=[(indices, coefficients, 1.0)],
equality_constraints=[(indices, coefficients, 1.0)],
)
def test_generate_unfixed_nonlin_constraints(self):
def nlc1(x):
return 4 - x.sum(dim=-1)
def nlc2(x):
return x[..., 0] - 1
# first test with one constraint
(new_nlc1,) = _generate_unfixed_nonlin_constraints(
constraints=[nlc1], fixed_features={1: 2.0}, dimension=3
)
self.assertAllClose(
nlc1(torch.tensor([[4.0, 2.0, 2.0]], device=self.device)),
new_nlc1(torch.tensor([[4.0, 2.0]], device=self.device)),
)
# test with several constraints
constraints = [nlc1, nlc2]
new_constraints = _generate_unfixed_nonlin_constraints(
constraints=constraints, fixed_features={1: 2.0}, dimension=3
)
for nlc, new_nlc in zip(constraints, new_constraints):
self.assertAllClose(
nlc(torch.tensor([[4.0, 2.0, 2.0]], device=self.device)),
new_nlc(torch.tensor([[4.0, 2.0]], device=self.device)),
)
# test with several constraints and two fixes
constraints = [nlc1, nlc2]
new_constraints = _generate_unfixed_nonlin_constraints(
constraints=constraints, fixed_features={1: 2.0, 2: 1.0}, dimension=3
)
for nlc, new_nlc in zip(constraints, new_constraints):
self.assertAllClose(
nlc(torch.tensor([[4.0, 2.0, 1.0]], device=self.device)),
new_nlc(torch.tensor([[4.0]], device=self.device)),
)
def test_generate_unfixed_lin_constraints(self):
# Case 1: some fixed features are in the indices
indices = [
torch.arange(4, device=self.device),
torch.arange(2, -1, -1, device=self.device),
]
coefficients = [
torch.tensor([-0.1, 0.2, -0.3, 0.4], device=self.device),
torch.tensor([-0.1, 0.3, -0.5], device=self.device),
]
rhs = [0.5, 0.5]
dimension = 4
fixed_features = {1: 1, 3: 2}
new_constraints = _generate_unfixed_lin_constraints(
constraints=list(zip(indices, coefficients, rhs)),
fixed_features=fixed_features,
dimension=dimension,
eq=False,
)
for i, (new_indices, new_coefficients, new_rhs) in enumerate(new_constraints):
if i % 2 == 0: # first list of indices is [0, 1, 2, 3]
self.assertTrue(
torch.equal(new_indices, torch.arange(2, device=self.device))
)
else: # second list of indices is [2, 1, 0]
self.assertTrue(
torch.equal(
new_indices, torch.arange(1, -1, -1, device=self.device)
)
)
mask = [True] * indices[i].shape[0]
subtract = 0
for j, old_idx in enumerate(indices[i]):
if old_idx.item() in fixed_features:
mask[j] = False
subtract += fixed_features[old_idx.item()] * coefficients[i][j]
self.assertTrue(torch.equal(new_coefficients, coefficients[i][mask]))
self.assertEqual(new_rhs, rhs[i] - subtract)
# Case 2: none of fixed features are in the indices, but have to be renumbered
indices = [
torch.arange(2, 6, device=self.device),
torch.arange(5, 2, -1, device=self.device),
]
fixed_features = {0: -10, 1: 10}
dimension = 6
new_constraints = _generate_unfixed_lin_constraints(
constraints=list(zip(indices, coefficients, rhs)),
fixed_features=fixed_features,
dimension=dimension,
eq=False,
)
for i, (new_indices, new_coefficients, new_rhs) in enumerate(new_constraints):
if i % 2 == 0: # first list of indices is [2, 3, 4, 5]
self.assertTrue(
torch.equal(new_indices, torch.arange(4, device=self.device))
)
else: # second list of indices is [5, 4, 3]
self.assertTrue(
torch.equal(new_indices, torch.arange(3, 0, -1, device=self.device))
)
self.assertTrue(torch.equal(new_coefficients, coefficients[i]))
self.assertEqual(new_rhs, rhs[i])
# Case 3: all fixed features are in the indices
indices = [
torch.arange(4, device=self.device),
torch.arange(2, -1, -1, device=self.device),
]
# Case 3a: problem is feasible
dimension = 4
fixed_features = {0: 2, 1: 1, 2: 1, 3: 2}
for eq in [False, True]:
new_constraints = _generate_unfixed_lin_constraints(
constraints=[(indices[0], coefficients[0], rhs[0])],
fixed_features=fixed_features,
dimension=dimension,
eq=eq,
)
self.assertEqual(new_constraints, [])
# Case 3b: problem is infeasible
for eq in [False, True]:
prefix = "Ineq" if not eq else "Eq"
with self.assertRaisesRegex(CandidateGenerationError, prefix):
new_constraints = _generate_unfixed_lin_constraints(
constraints=[(indices[1], coefficients[1], rhs[1])],
fixed_features=fixed_features,
dimension=dimension,
eq=eq,
)
class TestMakeScipyBounds(BotorchTestCase):
def test_make_scipy_bounds(self):
X = torch.zeros(3, 1, 2)
# both None
self.assertIsNone(make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=None))
# lower None
upper_bounds = torch.ones(2)
bounds = make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=upper_bounds)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(
np.all(np.equal(bounds.lb, np.full((3, 1, 2), float("-inf")).flatten()))
)
self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
# upper None
lower_bounds = torch.zeros(2)
bounds = make_scipy_bounds(X=X, lower_bounds=lower_bounds, upper_bounds=None)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
self.assertTrue(
np.all(np.equal(bounds.ub, np.full((3, 1, 2), float("inf")).flatten()))
)
# floats
bounds = make_scipy_bounds(X=X, lower_bounds=0.0, upper_bounds=1.0)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
# 1-d tensors
bounds = make_scipy_bounds(
X=X, lower_bounds=lower_bounds, upper_bounds=upper_bounds
)
self.assertIsInstance(bounds, Bounds)
self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest.mock as mock
import torch
from botorch.acquisition import PosteriorMean
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.models import GenericDeterministicModel
from botorch.optim.homotopy import (
FixedHomotopySchedule,
Homotopy,
HomotopyParameter,
LinearHomotopySchedule,
LogLinearHomotopySchedule,
)
from botorch.optim.optimize_homotopy import optimize_acqf_homotopy, prune_candidates
from botorch.utils.testing import BotorchTestCase
from torch.nn import Parameter
PRUNE_CANDIDATES_PATH = f"{prune_candidates.__module__}"
class TestHomotopy(BotorchTestCase):
def _test_schedule(self, schedule, values):
self.assertEqual(schedule.num_steps, len(values))
self.assertEqual(schedule.value, values[0])
self.assertFalse(schedule.should_stop)
for i in range(len(values) - 1):
schedule.step()
self.assertEqual(schedule.value, values[i + 1])
self.assertFalse(schedule.should_stop)
schedule.step()
self.assertTrue(schedule.should_stop)
schedule.restart()
self.assertEqual(schedule.value, values[0])
self.assertFalse(schedule.should_stop)
def test_fixed_schedule(self):
values = [1, 3, 7]
fixed = FixedHomotopySchedule(values=values)
self.assertEqual(fixed._values, values)
self._test_schedule(schedule=fixed, values=values)
def test_linear_schedule(self):
values = [1, 2, 3, 4, 5]
linear = LinearHomotopySchedule(start=1, end=5, num_steps=5)
self.assertEqual(linear._values, values)
self._test_schedule(schedule=linear, values=values)
def test_log_linear_schedule(self):
values = [0.01, 0.1, 1, 10, 100]
linear = LogLinearHomotopySchedule(start=0.01, end=100, num_steps=5)
self.assertEqual(linear._values, values)
self._test_schedule(schedule=linear, values=values)
def test_homotopy(self):
tkwargs = {"device": self.device, "dtype": torch.double}
p1 = Parameter(-2 * torch.ones(1, **tkwargs))
v1 = [1, 2, 3, 4, 5]
p2 = -3 * torch.ones(1, **tkwargs)
v2 = [0.01, 0.1, 1, 10, 100]
callback = mock.Mock()
homotopy_parameters = [
HomotopyParameter(
parameter=p1,
schedule=LinearHomotopySchedule(start=1, end=5, num_steps=5),
),
HomotopyParameter(
parameter=p2,
schedule=LogLinearHomotopySchedule(start=0.01, end=100, num_steps=5),
),
]
homotopy = Homotopy(
homotopy_parameters=homotopy_parameters, callbacks=[callback]
)
self.assertEqual(homotopy._original_values, [-2, -3])
self.assertEqual(homotopy._homotopy_parameters, homotopy_parameters)
self.assertEqual(homotopy._callbacks, [callback])
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters], [v1[0], v2[0]]
)
for i in range(4):
homotopy.step()
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters],
[v1[i + 1], v2[i + 1]],
)
self.assertFalse(homotopy.should_stop)
homotopy.step()
self.assertTrue(homotopy.should_stop)
# Restart the schedules
homotopy.restart()
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters], [v1[0], v2[0]]
)
# Reset the parameters to their original values
homotopy.reset()
self.assertEqual(
[h.parameter.item() for h in homotopy._homotopy_parameters], [-2, -3]
)
# Expect the call count to be 8: init (1), step (5), restart (1), reset (1).
self.assertEqual(callback.call_count, 8)
def test_optimize_acqf_homotopy(self):
tkwargs = {"device": self.device, "dtype": torch.double}
p = Parameter(-2 * torch.ones(1, **tkwargs))
hp = HomotopyParameter(
parameter=p,
schedule=LinearHomotopySchedule(start=4, end=0, num_steps=5),
)
model = GenericDeterministicModel(f=lambda x: 5 - (x - p) ** 2)
acqf = PosteriorMean(model=model)
candidate, acqf_val = optimize_acqf_homotopy(
q=1,
acq_function=acqf,
bounds=torch.tensor([[-10], [5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=2,
raw_samples=16,
post_processing_func=lambda x: x.round(),
)
self.assertEqual(candidate, torch.zeros(1, **tkwargs))
self.assertEqual(acqf_val, 5 * torch.ones(1, **tkwargs))
# test fixed feature
fixed_features = {0: 1.0}
model = GenericDeterministicModel(
f=lambda x: 5 - (x - p).sum(dim=-1, keepdims=True) ** 2
)
acqf = PosteriorMean(model=model)
candidate, acqf_val = optimize_acqf_homotopy(
q=1,
acq_function=acqf,
bounds=torch.tensor([[-10, -10], [5, 5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=2,
raw_samples=16,
fixed_features=fixed_features,
)
self.assertEqual(candidate[0, 0], torch.tensor(1, **tkwargs))
# With q > 1.
acqf = qExpectedImprovement(model=model, best_f=0.0)
candidate, acqf_val = optimize_acqf_homotopy(
q=3,
acq_function=acqf,
bounds=torch.tensor([[-10, -10], [5, 5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=2,
raw_samples=16,
fixed_features=fixed_features,
)
self.assertEqual(candidate.shape, torch.Size([3, 2]))
self.assertEqual(acqf_val.shape, torch.Size([3]))
def test_prune_candidates(self):
tkwargs = {"device": self.device, "dtype": torch.double}
# no pruning
X = torch.rand(6, 3, **tkwargs)
vals = X.sum(dim=-1)
X_pruned = prune_candidates(candidates=X, acq_values=vals, prune_tolerance=1e-6)
self.assertTrue((X[vals.argsort(descending=True), :] == X_pruned).all())
# pruning
X[1, :] = X[0, :] + 1e-10
X[4, :] = X[2, :] - 1e-10
vals = torch.tensor([1, 6, 3, 4, 2, 5], **tkwargs)
X_pruned = prune_candidates(candidates=X, acq_values=vals, prune_tolerance=1e-6)
self.assertTrue((X[[1, 5, 3, 2]] == X_pruned).all())
# invalid shapes
with self.assertRaisesRegex(
ValueError, "`candidates` must be of size `n x d`."
):
prune_candidates(
candidates=torch.zeros(3, 2, 1),
acq_values=torch.zeros(2, 1),
prune_tolerance=1e-6,
)
with self.assertRaisesRegex(ValueError, "`acq_values` must be of size `n`."):
prune_candidates(
candidates=torch.zeros(3, 2),
acq_values=torch.zeros(3, 1),
prune_tolerance=1e-6,
)
with self.assertRaisesRegex(ValueError, "`prune_tolerance` must be >= 0."):
prune_candidates(
candidates=torch.zeros(3, 2),
acq_values=torch.zeros(3),
prune_tolerance=-1.2345,
)
@mock.patch(f"{PRUNE_CANDIDATES_PATH}.prune_candidates", wraps=prune_candidates)
def test_optimize_acqf_homotopy_pruning(self, prune_candidates_mock):
tkwargs = {"device": self.device, "dtype": torch.double}
p = Parameter(torch.zeros(1, **tkwargs))
hp = HomotopyParameter(
parameter=p,
schedule=LinearHomotopySchedule(start=4, end=0, num_steps=5),
)
model = GenericDeterministicModel(f=lambda x: 5 - (x - p) ** 2)
acqf = PosteriorMean(model=model)
candidate, acqf_val = optimize_acqf_homotopy(
q=1,
acq_function=acqf,
bounds=torch.tensor([[-10], [5]]).to(**tkwargs),
homotopy=Homotopy(homotopy_parameters=[hp]),
num_restarts=4,
raw_samples=16,
post_processing_func=lambda x: x.round(),
)
# First time we expect to call `prune_candidates` with 4 candidates
self.assertEqual(
prune_candidates_mock.call_args_list[0][1]["candidates"].shape,
torch.Size([4, 1]),
)
for i in range(1, 5): # The paths should have been pruned to just one path
self.assertEqual(
prune_candidates_mock.call_args_list[i][1]["candidates"].shape,
torch.Size([1, 1]),
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
from inspect import signature
from itertools import product
from unittest import mock
import numpy as np
import torch
from botorch.acquisition.acquisition import (
AcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.exceptions import InputDataError, UnsupportedError
from botorch.generation.gen import gen_candidates_scipy, gen_candidates_torch
from botorch.optim.optimize import (
_filter_infeasible,
_filter_invalid,
_gen_batch_initial_conditions_local_search,
_generate_neighbors,
optimize_acqf,
optimize_acqf_cyclic,
optimize_acqf_discrete,
optimize_acqf_discrete_local_search,
optimize_acqf_list,
optimize_acqf_mixed,
)
from botorch.optim.parameter_constraints import (
_arrayify,
_make_f_and_grad_nonlinear_inequality_constraints,
)
from botorch.optim.utils.timeout import minimize_with_timeout
from botorch.utils.testing import BotorchTestCase, MockAcquisitionFunction
from scipy.optimize import OptimizeResult
from torch import Tensor
class MockOneShotAcquisitionFunction(
MockAcquisitionFunction, OneShotAcquisitionFunction
):
def __init__(self, num_fantasies=2):
r"""
Args:
num_fantasies: The number of fantasies.
"""
super().__init__()
self.num_fantasies = num_fantasies
def get_augmented_q_batch_size(self, q: int) -> int:
return q + self.num_fantasies
def extract_candidates(self, X_full: Tensor) -> Tensor:
return X_full[..., : -self.num_fantasies, :]
def forward(self, X):
pass
class SquaredAcquisitionFunction(AcquisitionFunction):
def __init__(self, model=None): # noqa: D107
super().__init__(model=model)
def forward(self, X):
return torch.linalg.norm(X, dim=-1).squeeze(-1)
class MockOneShotEvaluateAcquisitionFunction(MockOneShotAcquisitionFunction):
def evaluate(self, X: Tensor, bounds: Tensor):
return X.sum()
class SinOneOverXAcqusitionFunction(MockAcquisitionFunction):
"""
Acquisition function for sin(1/x).
This is useful for testing because it behaves pathologically only zero, so
optimization is likely to fail when initializing near zero but not
elsewhere.
"""
def __call__(self, X):
return torch.sin(1 / X[..., 0].max(dim=-1).values)
def rounding_func(X: Tensor) -> Tensor:
batch_shape, d = X.shape[:-1], X.shape[-1]
X_round = torch.stack([x.round() for x in X.view(-1, d)])
return X_round.view(*batch_shape, d)
class TestOptimizeAcqf(BotorchTestCase):
@mock.patch("botorch.generation.gen.gen_candidates_torch")
@mock.patch("botorch.optim.optimize.gen_batch_initial_conditions")
@mock.patch("botorch.optim.optimize.gen_candidates_scipy")
@mock.patch("botorch.optim.utils.common.signature")
def test_optimize_acqf_joint(
self,
mock_signature,
mock_gen_candidates_scipy,
mock_gen_batch_initial_conditions,
mock_gen_candidates_torch,
):
q = 3
num_restarts = 2
raw_samples = 10
options = {}
mock_acq_function = MockAcquisitionFunction()
cnt = 0
for dtype in (torch.float, torch.double):
for mock_gen_candidates in (
mock_gen_candidates_scipy,
mock_gen_candidates_torch,
):
if mock_gen_candidates == mock_gen_candidates_torch:
mock_signature.return_value = signature(gen_candidates_torch)
else:
mock_signature.return_value = signature(gen_candidates_scipy)
mock_gen_batch_initial_conditions.return_value = torch.zeros(
num_restarts, q, 3, device=self.device, dtype=dtype
)
base_cand = torch.arange(3, device=self.device, dtype=dtype).expand(
1, q, 3
)
mock_candidates = torch.cat(
[i * base_cand for i in range(num_restarts)], dim=0
)
mock_acq_values = num_restarts - torch.arange(
num_restarts, device=self.device, dtype=dtype
)
mock_gen_candidates.return_value = (mock_candidates, mock_acq_values)
bounds = torch.stack(
[
torch.zeros(3, device=self.device, dtype=dtype),
4 * torch.ones(3, device=self.device, dtype=dtype),
]
)
mock_gen_candidates.reset_mock()
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
gen_candidates=mock_gen_candidates,
)
mock_gen_candidates.assert_called_once()
self.assertTrue(torch.equal(candidates, mock_candidates[0]))
self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
cnt += 1
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test generation with provided initial conditions
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
return_best_only=False,
batch_initial_conditions=torch.zeros(
num_restarts, q, 3, device=self.device, dtype=dtype
),
gen_candidates=mock_gen_candidates,
)
self.assertTrue(torch.equal(candidates, mock_candidates))
self.assertTrue(torch.equal(acq_vals, mock_acq_values))
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test fixed features
fixed_features = {0: 0.1}
mock_candidates[:, 0] = 0.1
mock_gen_candidates.return_value = (mock_candidates, mock_acq_values)
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
fixed_features=fixed_features,
gen_candidates=mock_gen_candidates,
)
self.assertEqual(
mock_gen_candidates.call_args[1]["fixed_features"], fixed_features
)
self.assertTrue(torch.equal(candidates, mock_candidates[0]))
cnt += 1
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test trivial case when all features are fixed
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
fixed_features={0: 0.1, 1: 0.2, 2: 0.3},
gen_candidates=mock_gen_candidates,
)
self.assertTrue(
torch.equal(
candidates,
torch.tensor(
[0.1, 0.2, 0.3], device=self.device, dtype=dtype
).expand(3, 3),
)
)
self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
# test OneShotAcquisitionFunction
mock_acq_function = MockOneShotAcquisitionFunction()
candidates, acq_vals = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
gen_candidates=mock_gen_candidates,
)
self.assertTrue(
torch.equal(
candidates, mock_acq_function.extract_candidates(mock_candidates[0])
)
)
self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
# verify ValueError
with self.assertRaisesRegex(ValueError, "Must specify"):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=bounds,
q=q,
num_restarts=num_restarts,
options=options,
gen_candidates=mock_gen_candidates,
)
@mock.patch("botorch.optim.optimize.gen_batch_initial_conditions")
@mock.patch("botorch.optim.optimize.gen_candidates_scipy")
@mock.patch("botorch.generation.gen.gen_candidates_torch")
@mock.patch("botorch.optim.utils.common.signature")
def test_optimize_acqf_sequential(
self,
mock_signature,
mock_gen_candidates_torch,
mock_gen_candidates_scipy,
mock_gen_batch_initial_conditions,
timeout_sec=None,
):
for mock_gen_candidates, timeout_sec in product(
[mock_gen_candidates_scipy, mock_gen_candidates_torch], [None, 1e-4]
):
if mock_gen_candidates == mock_gen_candidates_torch:
mock_signature.return_value = signature(gen_candidates_torch)
else:
mock_signature.return_value = signature(gen_candidates_scipy)
mock_gen_candidates.__name__ = "gen_candidates"
q = 3
num_restarts = 2
raw_samples = 10
options = {}
for dtype, use_rounding in ((torch.float, True), (torch.double, False)):
mock_acq_function = MockAcquisitionFunction()
mock_gen_batch_initial_conditions.side_effect = [
torch.zeros(num_restarts, 1, 3, device=self.device, dtype=dtype)
for _ in range(q)
]
gcs_return_vals = [
(
torch.tensor(
[[[1.1, 2.1, 3.1]]], device=self.device, dtype=dtype
),
torch.tensor([i], device=self.device, dtype=dtype),
)
for i in range(q)
]
mock_gen_candidates.side_effect = gcs_return_vals
bounds = torch.stack(
[
torch.zeros(3, device=self.device, dtype=dtype),
4 * torch.ones(3, device=self.device, dtype=dtype),
]
)
if mock_gen_candidates is mock_gen_candidates_scipy:
# x[2] * 4 >= 5
inequality_constraints = [
(torch.tensor([2]), torch.tensor([4]), torch.tensor(5))
]
equality_constraints = [
(torch.tensor([0, 1]), torch.ones(2), torch.tensor(4.0))
]
# gen_candidates_torch does not support constraints
else:
inequality_constraints = None
equality_constraints = None
mock_gen_candidates.reset_mock()
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
post_processing_func=rounding_func if use_rounding else None,
sequential=True,
timeout_sec=timeout_sec,
gen_candidates=mock_gen_candidates,
)
self.assertEqual(mock_gen_candidates.call_count, q)
base_candidates = torch.cat(
[cands[0] for cands, _ in gcs_return_vals], dim=-2
)
if use_rounding:
expected_candidates = base_candidates.round()
expected_val = mock_acq_function(expected_candidates.unsqueeze(-2))
else:
expected_candidates = base_candidates
expected_val = torch.cat([acqval for _, acqval in gcs_return_vals])
self.assertTrue(torch.equal(candidates, expected_candidates))
self.assertTrue(torch.equal(acq_value, expected_val))
# verify error when using a OneShotAcquisitionFunction
with self.assertRaises(NotImplementedError):
optimize_acqf(
acq_function=mock.Mock(spec=OneShotAcquisitionFunction),
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
sequential=True,
)
# Verify error for passing in incorrect bounds
with self.assertRaisesRegex(
ValueError,
"bounds should be a `2 x d` tensor",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds.T,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
sequential=True,
)
# Verify error when using sequential=True in
# conjunction with user-supplied batch_initial_conditions
with self.assertRaisesRegex(
UnsupportedError,
"`batch_initial_conditions` is not supported for sequential "
"optimization. Either avoid specifying `batch_initial_conditions` "
"to use the custom initializer or use the `ic_generator` kwarg to "
"generate initial conditions for the case of "
"nonlinear inequality constraints.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
batch_initial_conditions=torch.zeros((1, 1, 3)),
sequential=True,
)
@mock.patch(
"botorch.generation.gen.minimize_with_timeout",
wraps=minimize_with_timeout,
)
@mock.patch("botorch.optim.utils.timeout.optimize.minimize")
def test_optimize_acqf_timeout(
self, mock_minimize, mock_minimize_with_timeout
) -> None:
"""
Check that the right value of `timeout_sec` is passed to `minimize_with_timeout`
"""
num_restarts = 2
q = 3
dim = 4
for timeout_sec, sequential, expected_call_count, expected_timeout_arg in [
(1.0, True, num_restarts * q, 1.0 / (num_restarts * q)),
(0.0, True, num_restarts * q, 0.0),
(1.0, False, num_restarts, 1.0 / num_restarts),
(0.0, False, num_restarts, 0.0),
]:
with self.subTest(
timeout_sec=timeout_sec,
sequential=sequential,
expected_call_count=expected_call_count,
expected_timeout_arg=expected_timeout_arg,
):
mock_minimize.return_value = OptimizeResult(
{
"x": np.zeros(dim if sequential else dim * q),
"success": True,
"status": 0,
},
)
optimize_acqf(
timeout_sec=timeout_sec,
q=q,
sequential=sequential,
num_restarts=num_restarts,
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
raw_samples=7,
options={"batch_limit": 1},
)
self.assertEqual(
mock_minimize_with_timeout.call_count, expected_call_count
)
timeout_times = torch.tensor(
[
elt.kwargs["timeout_sec"]
for elt in mock_minimize_with_timeout.mock_calls
]
)
self.assertGreaterEqual(timeout_times.min(), 0)
self.assertAllClose(
timeout_times,
torch.full_like(timeout_times, expected_timeout_arg),
rtol=float("inf"),
atol=1e-8,
)
mock_minimize_with_timeout.reset_mock()
def test_optimize_acqf_sequential_notimplemented(self):
# Sequential acquisition function optimization only supported
# when return_best_only=True
with self.assertRaises(NotImplementedError):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
q=3,
num_restarts=2,
raw_samples=10,
return_best_only=False,
sequential=True,
)
def test_optimize_acqf_sequential_q_constraint_notimplemented(self):
# Sequential acquisition function not supported with q-constraints
with self.assertRaises(UnsupportedError):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
equality_constraints=[
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor(
[1.0, -1.0], device=self.device, dtype=torch.float64
),
0,
),
],
q=3,
num_restarts=2,
raw_samples=10,
return_best_only=True,
sequential=True,
)
with self.assertRaises(UnsupportedError):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
inequality_constraints=[
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor(
[1.0, -1.0], device=self.device, dtype=torch.float64
),
0,
),
],
q=3,
num_restarts=2,
raw_samples=10,
return_best_only=True,
sequential=True,
)
def test_optimize_acqf_batch_limit(self) -> None:
num_restarts = 3
raw_samples = 5
dim = 4
q = 4
batch_limit = 2
options = {"batch_limit": batch_limit}
initial_conditions = [
torch.ones(shape) for shape in [(1, 2, dim), (2, 1, dim), (1, dim)]
] + [None]
for gen_candidates, ics in zip(
[gen_candidates_scipy, gen_candidates_torch], initial_conditions
):
with self.subTest(gen_candidates=gen_candidates, initial_conditions=ics):
_, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
return_best_only=False,
gen_candidates=gen_candidates,
batch_initial_conditions=ics,
)
expected_shape = (num_restarts,) if ics is None else (ics.shape[0],)
self.assertEqual(acq_value_list.shape, expected_shape)
def test_optimize_acqf_runs_given_batch_initial_conditions(self):
num_restarts, raw_samples, dim = 1, 2, 3
opt_x = 2 / np.pi
# -x[i] * 1 >= -opt_x * 1.01 => x[i] <= opt_x * 1.01
inequality_constraints = [
(torch.tensor([i]), -torch.tensor([1]), -opt_x * 1.01) for i in range(dim)
] + [
# x[i] * 1 >= opt_x * .99
(torch.tensor([i]), torch.tensor([1]), opt_x * 0.99)
for i in range(dim)
]
q = 1
ic_shapes = [(1, 2, dim), (2, 1, dim), (1, dim)]
torch.manual_seed(0)
for shape in ic_shapes:
with self.subTest(shape=shape):
# start near one (of many) optima
initial_conditions = (opt_x * 1.01) * torch.ones(shape)
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
batch_initial_conditions=initial_conditions,
inequality_constraints=inequality_constraints,
)
self.assertAllClose(
batch_candidates,
opt_x * torch.ones_like(batch_candidates),
# must be at least 50% closer to the optimum than it started
atol=0.004,
rtol=0.005,
)
self.assertAlmostEqual(acq_value_list.item(), 1, places=3)
def test_optimize_acqf_wrong_ic_shape_inequality_constraints(self) -> None:
dim = 3
ic_shapes = [(1, 2, dim + 1), (1, 2, dim, 1), (1, dim + 1), (1, 1), (dim,)]
for shape in ic_shapes:
with self.subTest(shape=shape):
initial_conditions = torch.ones(shape)
expected_error = (
rf"batch_initial_conditions.shape\[-1\] must be {dim}\."
if len(shape) in (2, 3)
else r"batch_initial_conditions must be 2\-dimensional or "
)
with self.assertRaisesRegex(ValueError, expected_error):
optimize_acqf(
acq_function=MockAcquisitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=4,
batch_initial_conditions=initial_conditions,
num_restarts=1,
)
def test_optimize_acqf_warns_on_opt_failure(self):
"""
Test error handling in `scipy.optimize.minimize`.
Expected behavior is that a warning is raised when optimization fails
in `scipy.optimize.minimize`, and then it restarts and tries again.
This is a test case cooked up to fail. It is trying to optimize
sin(1/x), which is pathological near zero, given a starting point near
zero.
"""
num_restarts, raw_samples, dim = 1, 1, 1
initial_conditions = 1e-8 * torch.ones((num_restarts, raw_samples, dim))
torch.manual_seed(0)
with warnings.catch_warnings(record=True) as ws:
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=torch.stack([-1 * torch.ones(dim), torch.ones(dim)]),
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
batch_initial_conditions=initial_conditions,
)
message = (
"Optimization failed in `gen_candidates_scipy` with the following "
"warning(s):\n[OptimizationWarning('Optimization failed within "
"`scipy.optimize.minimize` with status 2 and message ABNORMAL_TERMINATION"
"_IN_LNSRCH.')]\nBecause you specified `batch_initial_conditions`, "
"optimization will not be retried with new initial conditions and will "
"proceed with the current solution. Suggested remediation: Try again with "
"different `batch_initial_conditions`, or don't provide "
"`batch_initial_conditions.`"
)
expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message in str(w.message)
for w in ws
)
)
self.assertTrue(expected_warning_raised)
def test_optimize_acqf_successfully_restarts_on_opt_failure(self):
"""
Test that `optimize_acqf` can succeed after restarting on opt failure.
With the given seed (5), `optimize_acqf` will choose an initial
condition that causes failure in the first run of
`gen_candidates_scipy`, then re-tries with a new starting point and
succeed.
Also tests that this can be turned off by setting
`retry_on_optimization_warning = False`.
"""
num_restarts, raw_samples, dim = 1, 1, 1
bounds = torch.stack(
[
-1 * torch.ones(dim, dtype=torch.double),
torch.ones(dim, dtype=torch.double),
]
)
torch.manual_seed(5)
with warnings.catch_warnings(record=True) as ws:
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
# shorten the line search to make it faster and make failure
# more likely
options={"maxls": 2},
)
message = (
"Optimization failed in `gen_candidates_scipy` with the following "
"warning(s):\n[OptimizationWarning('Optimization failed within "
"`scipy.optimize.minimize` with status 2 and message ABNORMAL_TERMINATION"
"_IN_LNSRCH.')]\nTrying again with a new set of initial conditions."
)
expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message in str(w.message)
for w in ws
)
)
self.assertTrue(expected_warning_raised)
# check if it succeeded on restart -- the maximum value of sin(1/x) is 1
self.assertAlmostEqual(acq_value_list.item(), 1.0)
# Test with retry_on_optimization_warning = False.
torch.manual_seed(5)
with warnings.catch_warnings(record=True) as ws:
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
# shorten the line search to make it faster and make failure
# more likely
options={"maxls": 2},
retry_on_optimization_warning=False,
)
expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message in str(w.message)
for w in ws
)
)
self.assertFalse(expected_warning_raised)
def test_optimize_acqf_warns_on_second_opt_failure(self):
"""
Test that `optimize_acqf` warns if it fails on a second optimization try.
With the given seed (230), `optimize_acqf` will choose an initial
condition that causes failure in the first run of
`gen_candidates_scipy`, then re-tries and still does not succeed. Since
this doesn't happen with seeds 0 - 229, this test might be broken by
future refactorings affecting calls to `torch`.
"""
num_restarts, raw_samples, dim = 1, 1, 1
bounds = torch.stack(
[
-1 * torch.ones(dim, dtype=torch.double),
torch.ones(dim, dtype=torch.double),
]
)
with warnings.catch_warnings(record=True) as ws:
torch.manual_seed(230)
batch_candidates, acq_value_list = optimize_acqf(
acq_function=SinOneOverXAcqusitionFunction(),
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
# shorten the line search to make it faster and make failure
# more likely
options={"maxls": 2},
)
message_1 = (
"Optimization failed in `gen_candidates_scipy` with the following "
"warning(s):\n[OptimizationWarning('Optimization failed within "
"`scipy.optimize.minimize` with status 2 and message ABNORMAL_TERMINATION"
"_IN_LNSRCH.')]\nTrying again with a new set of initial conditions."
)
message_2 = (
"Optimization failed on the second try, after generating a new set "
"of initial conditions."
)
first_expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message_1 in str(w.message)
for w in ws
)
)
second_expected_warning_raised = any(
(
issubclass(w.category, RuntimeWarning) and message_2 in str(w.message)
for w in ws
)
)
self.assertTrue(first_expected_warning_raised)
self.assertTrue(second_expected_warning_raised)
def test_optimize_acqf_nonlinear_constraints(self):
num_restarts = 2
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
mock_acq_function = SquaredAcquisitionFunction()
bounds = torch.stack(
[torch.zeros(3, **tkwargs), 4 * torch.ones(3, **tkwargs)]
)
# Make sure we find the global optimum [4, 4, 4] without constraints
with torch.random.fork_rng():
torch.manual_seed(0)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
sequential=True,
raw_samples=16,
)
self.assertAllClose(candidates, 4 * torch.ones(1, 3, **tkwargs))
# Constrain the sum to be <= 4 in which case the solution is a
# permutation of [4, 0, 0]
def nlc1(x):
return 4 - x.sum(dim=-1)
batch_initial_conditions = torch.tensor([[[0.5, 0.5, 3]]], **tkwargs)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=batch_initial_conditions,
num_restarts=1,
)
self.assertTrue(
torch.allclose(
torch.sort(candidates).values,
torch.tensor([[0, 0, 4]], **tkwargs),
)
)
self.assertTrue(
torch.allclose(acq_value, torch.tensor([4], **tkwargs), atol=1e-3)
)
# Make sure we return the initial solution if SLSQP fails to return
# a feasible point.
with mock.patch(
"botorch.generation.gen.minimize_with_timeout"
) as mock_minimize:
# By setting "success" to True and "status" to 0, we prevent a
# warning that `minimize` failed, which isn't the behavior
# we're looking to test here.
mock_minimize.return_value = OptimizeResult(
x=np.array([4, 4, 4]), success=True, status=0
)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=batch_initial_conditions,
num_restarts=1,
)
self.assertAllClose(candidates, batch_initial_conditions[0, ...])
# Constrain all variables to be >= 1. The global optimum is 2.45 and
# is attained by some permutation of [1, 1, 2]
def nlc2(x):
return x[..., 0] - 1
def nlc3(x):
return x[..., 1] - 1
def nlc4(x):
return x[..., 2] - 1
with torch.random.fork_rng():
torch.manual_seed(0)
batch_initial_conditions = 1 + 0.33 * torch.rand(
num_restarts, 1, 3, **tkwargs
)
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1, nlc2, nlc3, nlc4],
batch_initial_conditions=batch_initial_conditions,
num_restarts=num_restarts,
)
self.assertTrue(
torch.allclose(
torch.sort(candidates).values,
torch.tensor([[1, 1, 2]], **tkwargs),
)
)
self.assertTrue(
torch.allclose(acq_value, torch.tensor(2.45, **tkwargs), atol=1e-3)
)
with torch.random.fork_rng():
torch.manual_seed(0)
batch_initial_conditions = torch.rand(num_restarts, 1, 3, **tkwargs)
batch_initial_conditions[..., 0] = 2
# test with fixed features
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1, nlc2],
batch_initial_conditions=batch_initial_conditions,
num_restarts=num_restarts,
fixed_features={0: 2},
)
self.assertEqual(candidates[0, 0], 2.0)
self.assertTrue(
torch.allclose(
torch.sort(candidates).values,
torch.tensor([[0, 2, 2]], **tkwargs),
)
)
self.assertTrue(
torch.allclose(acq_value, torch.tensor(2.8284, **tkwargs), atol=1e-3)
)
# Test that an ic_generator object with the same API as
# gen_batch_initial_conditions returns candidates of the
# required shape.
with mock.patch(
"botorch.optim.optimize.gen_batch_initial_conditions"
) as ic_generator:
ic_generator.return_value = batch_initial_conditions
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=3,
nonlinear_inequality_constraints=[nlc1],
num_restarts=1,
ic_generator=ic_generator,
)
self.assertEqual(candidates.size(), torch.Size([1, 3]))
# Constraints must be passed in as lists
with self.assertRaisesRegex(
ValueError,
"`nonlinear_inequality_constraints` must be a list of callables, "
"got <class 'function'>.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=nlc1,
num_restarts=num_restarts,
batch_initial_conditions=batch_initial_conditions,
)
# batch_initial_conditions must be feasible
with self.assertRaisesRegex(
ValueError,
"`batch_initial_conditions` must satisfy the non-linear "
"inequality constraints.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
num_restarts=num_restarts,
batch_initial_conditions=4 * torch.ones(1, 1, 3, **tkwargs),
)
# Explicitly setting batch_limit to be >1 should raise
with self.assertRaisesRegex(
ValueError,
"`batch_limit` must be 1 when non-linear inequality constraints "
"are given.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=torch.rand(5, 1, 3, **tkwargs),
num_restarts=5,
options={"batch_limit": 5},
)
# If there are non-linear inequality constraints an initial condition
# generator object `ic_generator` must be supplied.
with self.assertRaisesRegex(
RuntimeError,
"`ic_generator` must be given if "
"there are non-linear inequality constraints.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
num_restarts=1,
raw_samples=16,
)
@mock.patch("botorch.generation.gen.gen_candidates_torch")
@mock.patch("botorch.optim.optimize.gen_batch_initial_conditions")
@mock.patch("botorch.optim.optimize.gen_candidates_scipy")
@mock.patch("botorch.optim.utils.common.signature")
def test_optimize_acqf_non_linear_constraints_sequential(
self,
mock_signature,
mock_gen_candidates_scipy,
mock_gen_batch_initial_conditions,
mock_gen_candidates_torch,
):
def nlc(x):
return 4 * x[..., 2] - 5
q = 3
num_restarts = 2
raw_samples = 10
options = {}
for mock_gen_candidates in (
mock_gen_candidates_torch,
mock_gen_candidates_scipy,
):
if mock_gen_candidates == mock_gen_candidates_torch:
mock_signature.return_value = signature(gen_candidates_torch)
else:
mock_signature.return_value = signature(gen_candidates_scipy)
for dtype in (torch.float, torch.double):
mock_acq_function = MockAcquisitionFunction()
mock_gen_batch_initial_conditions.side_effect = [
torch.zeros(num_restarts, 1, 3, device=self.device, dtype=dtype)
for _ in range(q)
]
gcs_return_vals = [
(
torch.tensor(
[[[1.0, 2.0, 3.0]]], device=self.device, dtype=dtype
),
torch.tensor([i], device=self.device, dtype=dtype),
)
# for nonlinear inequality constraints the batch_limit variable is
# currently set to 1 by default and hence gen_candidates_scipy is
# called num_restarts*q times
for i in range(num_restarts * q)
]
mock_gen_candidates.side_effect = gcs_return_vals
expected_candidates = torch.cat(
[cands[0] for cands, _ in gcs_return_vals[::num_restarts]], dim=-2
)
bounds = torch.stack(
[
torch.zeros(3, device=self.device, dtype=dtype),
4 * torch.ones(3, device=self.device, dtype=dtype),
]
)
with warnings.catch_warnings(record=True) as ws:
candidates, acq_value = optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
nonlinear_inequality_constraints=[nlc],
sequential=True,
ic_generator=mock_gen_batch_initial_conditions,
gen_candidates=mock_gen_candidates,
)
if mock_gen_candidates == mock_gen_candidates_torch:
self.assertEqual(len(ws), 3)
message = (
"Keyword arguments ['nonlinear_inequality_constraints']"
" will be ignored because they are not allowed parameters for"
" function gen_candidates. Allowed parameters are "
" ['initial_conditions', 'acquisition_function', "
"'lower_bounds', 'upper_bounds', 'optimizer', 'options',"
" 'callback', 'fixed_features', 'timeout_sec']."
)
expected_warning_raised = (
issubclass(w.category, UserWarning)
and message == str(w.message)
for w in ws
)
self.assertTrue(expected_warning_raised)
# check message
else:
self.assertEqual(len(ws), 0)
self.assertTrue(torch.equal(candidates, expected_candidates))
# Extract the relevant entries from gcs_return_vals to
# perform comparison with.
self.assertTrue(
torch.equal(
acq_value,
torch.cat(
[
expected_acq_value
for _, expected_acq_value in gcs_return_vals[
num_restarts - 1 :: num_restarts
]
]
),
),
)
def test_constraint_caching(self):
def nlc(x):
return 4 - x.sum(dim=-1)
class FunWrapperWithCallCount:
def __init__(self):
self.call_count = 0
def __call__(self, x, f):
self.call_count += 1
X = torch.from_numpy(x).view(-1).contiguous().requires_grad_(True)
loss = f(X).sum()
gradf = _arrayify(torch.autograd.grad(loss, X)[0].contiguous().view(-1))
return loss.item(), gradf
f_np_wrapper = FunWrapperWithCallCount()
f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints(
f_np_wrapper=f_np_wrapper, nlc=nlc
)
x1, x2 = np.array([1.0, 0.5, 0.25]), np.array([1.0, 0.5, 0.5])
# Call f_obj once, this requires calling f_np_wrapper
self.assertEqual(f_obj(x1), 2.25)
self.assertEqual(f_np_wrapper.call_count, 1)
# Call f_obj again, we should use the cached value this time
self.assertEqual(f_obj(x1), 2.25)
self.assertEqual(f_np_wrapper.call_count, 1)
# Call f_grad, we should use the cached value here as well
self.assertTrue(np.array_equal(f_grad(x1), -np.ones(3)))
self.assertEqual(f_np_wrapper.call_count, 1)
# Call f_grad with a new input
self.assertTrue(np.array_equal(f_grad(x2), -np.ones(3)))
self.assertEqual(f_np_wrapper.call_count, 2)
# Call f_obj on the new input, should use the cache
self.assertEqual(f_obj(x2), 2.0)
self.assertEqual(f_np_wrapper.call_count, 2)
class TestOptimizeAcqfCyclic(BotorchTestCase):
@mock.patch("botorch.optim.optimize._optimize_acqf") # noqa: C901
# TODO: make sure this runs without mock
def test_optimize_acqf_cyclic(self, mock_optimize_acqf):
num_restarts = 2
raw_samples = 10
num_cycles = 2
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
inequality_constraints = [
[torch.tensor([2], dtype=int), torch.tensor([4.0]), torch.tensor(5.0)]
]
mock_acq_function = MockAcquisitionFunction()
for q, dtype in itertools.product([1, 3], (torch.float, torch.double)):
tkwargs["dtype"] = dtype
inequality_constraints = [
(
# indices can't be floats or doubles
inequality_constraints[0][0],
inequality_constraints[0][1].to(**tkwargs),
inequality_constraints[0][2].to(**tkwargs),
)
]
mock_optimize_acqf.reset_mock()
bounds = bounds.to(**tkwargs)
candidate_rvs = []
acq_val_rvs = []
for cycle_j in range(num_cycles):
gcs_return_vals = [
(torch.rand(1, 3, **tkwargs), torch.rand(1, **tkwargs))
for _ in range(q)
]
if cycle_j == 0:
# return `q` candidates for first call
candidate_rvs.append(
torch.cat([rv[0] for rv in gcs_return_vals], dim=-2)
)
acq_val_rvs.append(torch.cat([rv[1] for rv in gcs_return_vals]))
else:
# return 1 candidate for subsequent calls
for rv in gcs_return_vals:
candidate_rvs.append(rv[0])
acq_val_rvs.append(rv[1])
mock_optimize_acqf.side_effect = list(zip(candidate_rvs, acq_val_rvs))
orig_candidates = candidate_rvs[0].clone()
# wrap the set_X_pending method for checking that call arguments
with mock.patch.object(
MockAcquisitionFunction,
"set_X_pending",
wraps=mock_acq_function.set_X_pending,
) as mock_set_X_pending:
candidates, acq_value = optimize_acqf_cyclic(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
post_processing_func=rounding_func,
cyclic_options={"maxiter": num_cycles},
)
# check that X_pending is set correctly in cyclic optimization
if q > 1:
x_pending_call_args_list = mock_set_X_pending.call_args_list
idxr = torch.ones(q, dtype=torch.bool, device=self.device)
for i in range(len(x_pending_call_args_list) - 1):
idxr[i] = 0
self.assertTrue(
torch.equal(
x_pending_call_args_list[i][0][0], orig_candidates[idxr]
)
)
idxr[i] = 1
orig_candidates[i] = candidate_rvs[i + 1]
# check reset to base_X_pendingg
self.assertIsNone(x_pending_call_args_list[-1][0][0])
else:
mock_set_X_pending.assert_not_called()
# check final candidates
expected_candidates = (
torch.cat(candidate_rvs[-q:], dim=0) if q > 1 else candidate_rvs[0]
)
self.assertTrue(torch.equal(candidates, expected_candidates))
# check call arguments for optimize_acqf
call_args_list = mock_optimize_acqf.call_args_list
expected_call_args = {
"acq_function": mock_acq_function,
"bounds": bounds,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": inequality_constraints,
"equality_constraints": None,
"fixed_features": None,
"post_processing_func": rounding_func,
"return_best_only": True,
"sequential": True,
}
orig_candidates = candidate_rvs[0].clone()
for i in range(len(call_args_list)):
if i == 0:
# first cycle
expected_call_args.update(
{"batch_initial_conditions": None, "q": q}
)
else:
expected_call_args.update(
{"batch_initial_conditions": orig_candidates[i - 1 : i], "q": 1}
)
orig_candidates[i - 1] = candidate_rvs[i]
for k, v in call_args_list[i][1].items():
if torch.is_tensor(v):
self.assertTrue(torch.equal(expected_call_args[k], v))
elif k == "acq_function":
self.assertIsInstance(
mock_acq_function, MockAcquisitionFunction
)
else:
self.assertEqual(expected_call_args[k], v)
class TestOptimizeAcqfList(BotorchTestCase):
@mock.patch("botorch.optim.optimize.optimize_acqf") # noqa: C901
@mock.patch("botorch.optim.optimize.optimize_acqf_mixed")
def test_optimize_acqf_list(self, mock_optimize_acqf, mock_optimize_acqf_mixed):
num_restarts = 2
raw_samples = 10
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
inequality_constraints = [
[torch.tensor([3]), torch.tensor([4]), torch.tensor(5)]
]
# reinitialize so that dtype
mock_acq_function_1 = MockAcquisitionFunction()
mock_acq_function_2 = MockAcquisitionFunction()
mock_acq_function_list = [mock_acq_function_1, mock_acq_function_2]
fixed_features_list = [None, [{0: 0.5}]]
for ffl in fixed_features_list:
for num_acqf, dtype in itertools.product(
[1, 2], (torch.float, torch.double)
):
for m in mock_acq_function_list:
# clear previous X_pending
m.set_X_pending(None)
tkwargs["dtype"] = dtype
inequality_constraints[0] = [
t.to(**tkwargs) for t in inequality_constraints[0]
]
mock_optimize_acqf.reset_mock()
mock_optimize_acqf_mixed.reset_mock()
bounds = bounds.to(**tkwargs)
candidate_rvs = []
acq_val_rvs = []
gcs_return_vals = [
(torch.rand(1, 3, **tkwargs), torch.rand(1, **tkwargs))
for _ in range(num_acqf)
]
for rv in gcs_return_vals:
candidate_rvs.append(rv[0])
acq_val_rvs.append(rv[1])
side_effect = list(zip(candidate_rvs, acq_val_rvs))
mock_optimize_acqf.side_effect = side_effect
mock_optimize_acqf_mixed.side_effect = side_effect
orig_candidates = candidate_rvs[0].clone()
# Wrap the set_X_pending method for checking that call arguments
with mock.patch.object(
MockAcquisitionFunction,
"set_X_pending",
wraps=mock_acq_function_1.set_X_pending,
) as mock_set_X_pending_1, mock.patch.object(
MockAcquisitionFunction,
"set_X_pending",
wraps=mock_acq_function_2.set_X_pending,
) as mock_set_X_pending_2:
candidates, _ = optimize_acqf_list(
acq_function_list=mock_acq_function_list[:num_acqf],
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
post_processing_func=rounding_func,
fixed_features_list=ffl,
)
# check that X_pending is set correctly in sequential optimization
if num_acqf > 1:
x_pending_call_args_list = mock_set_X_pending_2.call_args_list
idxr = torch.ones(
num_acqf, dtype=torch.bool, device=self.device
)
for i in range(len(x_pending_call_args_list) - 1):
idxr[i] = 0
self.assertTrue(
torch.equal(
x_pending_call_args_list[i][0][0],
orig_candidates[idxr],
)
)
idxr[i] = 1
orig_candidates[i] = candidate_rvs[i + 1]
else:
mock_set_X_pending_1.assert_not_called()
# check final candidates
expected_candidates = (
torch.cat(candidate_rvs[-num_acqf:], dim=0)
if num_acqf > 1
else candidate_rvs[0]
)
self.assertTrue(torch.equal(candidates, expected_candidates))
# check call arguments for optimize_acqf
if ffl is None:
call_args_list = mock_optimize_acqf.call_args_list
expected_call_args = {
"acq_function": None,
"bounds": bounds,
"q": 1,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": inequality_constraints,
"equality_constraints": None,
"fixed_features": None,
"post_processing_func": rounding_func,
"batch_initial_conditions": None,
"return_best_only": True,
"sequential": False,
}
else:
call_args_list = mock_optimize_acqf_mixed.call_args_list
expected_call_args = {
"acq_function": None,
"bounds": bounds,
"q": 1,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": inequality_constraints,
"equality_constraints": None,
"post_processing_func": rounding_func,
"batch_initial_conditions": None,
"fixed_features_list": ffl,
}
for i in range(len(call_args_list)):
expected_call_args["acq_function"] = mock_acq_function_list[i]
for k, v in call_args_list[i][1].items():
if torch.is_tensor(v):
self.assertTrue(torch.equal(expected_call_args[k], v))
elif k == "acq_function":
self.assertIsInstance(
mock_acq_function_list[i], MockAcquisitionFunction
)
else:
self.assertEqual(expected_call_args[k], v)
def test_optimize_acqf_list_empty_list(self):
with self.assertRaises(ValueError):
optimize_acqf_list(
acq_function_list=[],
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
)
def test_optimize_acqf_list_fixed_features(self):
with self.assertRaises(ValueError):
optimize_acqf_list(
acq_function_list=[
MockAcquisitionFunction(),
MockAcquisitionFunction(),
],
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
fixed_features_list=[{0: 0.5}],
fixed_features={0: 0.5},
)
class TestOptimizeAcqfMixed(BotorchTestCase):
@mock.patch("botorch.optim.optimize.optimize_acqf") # noqa: C901
def test_optimize_acqf_mixed_q1(self, mock_optimize_acqf):
num_restarts = 2
raw_samples = 10
q = 1
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
mock_acq_function = MockAcquisitionFunction()
for num_ff, dtype in itertools.product([1, 3], (torch.float, torch.double)):
tkwargs["dtype"] = dtype
mock_optimize_acqf.reset_mock()
bounds = bounds.to(**tkwargs)
candidate_rvs = []
acq_val_rvs = []
for _ in range(num_ff):
candidate_rvs.append(torch.rand(1, 3, **tkwargs))
acq_val_rvs.append(torch.rand(1, **tkwargs))
fixed_features_list = [{i: i * 0.1} for i in range(num_ff)]
side_effect = list(zip(candidate_rvs, acq_val_rvs))
mock_optimize_acqf.side_effect = side_effect
candidates, acq_value = optimize_acqf_mixed(
acq_function=mock_acq_function,
q=q,
fixed_features_list=fixed_features_list,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
post_processing_func=rounding_func,
)
# compute expected output
ff_acq_values = torch.stack(acq_val_rvs)
best = torch.argmax(ff_acq_values)
expected_candidates = candidate_rvs[best]
expected_acq_value = ff_acq_values[best]
self.assertTrue(torch.equal(candidates, expected_candidates))
self.assertTrue(torch.equal(acq_value, expected_acq_value))
# check call arguments for optimize_acqf
call_args_list = mock_optimize_acqf.call_args_list
expected_call_args = {
"acq_function": None,
"bounds": bounds,
"q": q,
"num_restarts": num_restarts,
"raw_samples": raw_samples,
"options": options,
"inequality_constraints": None,
"equality_constraints": None,
"fixed_features": None,
"post_processing_func": rounding_func,
"batch_initial_conditions": None,
"return_best_only": True,
"sequential": False,
"ic_generator": None,
"nonlinear_inequality_constraints": None,
}
for i in range(len(call_args_list)):
expected_call_args["fixed_features"] = fixed_features_list[i]
for k, v in call_args_list[i][1].items():
if torch.is_tensor(v):
self.assertTrue(torch.equal(expected_call_args[k], v))
elif k == "acq_function":
self.assertIsInstance(v, MockAcquisitionFunction)
else:
self.assertEqual(expected_call_args[k], v)
@mock.patch("botorch.optim.optimize.optimize_acqf") # noqa: C901
def test_optimize_acqf_mixed_q2(self, mock_optimize_acqf):
num_restarts = 2
raw_samples = 10
q = 2
options = {}
tkwargs = {"device": self.device}
bounds = torch.stack([torch.zeros(3), 4 * torch.ones(3)])
mock_acq_functions = [
MockAcquisitionFunction(),
MockOneShotEvaluateAcquisitionFunction(),
]
for num_ff, dtype, mock_acq_function in itertools.product(
[1, 3], (torch.float, torch.double), mock_acq_functions
):
tkwargs["dtype"] = dtype
mock_optimize_acqf.reset_mock()
bounds = bounds.to(**tkwargs)
fixed_features_list = [{i: i * 0.1} for i in range(num_ff)]
candidate_rvs, exp_candidates, acq_val_rvs = [], [], []
# generate mock side effects and compute expected outputs
for _ in range(q):
candidate_rvs_q = [torch.rand(1, 3, **tkwargs) for _ in range(num_ff)]
acq_val_rvs_q = [torch.rand(1, **tkwargs) for _ in range(num_ff)]
best = torch.argmax(torch.stack(acq_val_rvs_q))
exp_candidates.append(candidate_rvs_q[best])
candidate_rvs += candidate_rvs_q
acq_val_rvs += acq_val_rvs_q
side_effect = list(zip(candidate_rvs, acq_val_rvs))
mock_optimize_acqf.side_effect = side_effect
candidates, acq_value = optimize_acqf_mixed(
acq_function=mock_acq_function,
q=q,
fixed_features_list=fixed_features_list,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
post_processing_func=rounding_func,
)
expected_candidates = torch.cat(exp_candidates, dim=-2)
if isinstance(mock_acq_function, MockOneShotEvaluateAcquisitionFunction):
expected_acq_value = mock_acq_function.evaluate(
expected_candidates, bounds=bounds
)
else:
expected_acq_value = mock_acq_function(expected_candidates)
self.assertTrue(torch.equal(candidates, expected_candidates))
self.assertTrue(torch.equal(acq_value, expected_acq_value))
def test_optimize_acqf_mixed_empty_ff(self):
with self.assertRaises(ValueError):
mock_acq_function = MockAcquisitionFunction()
optimize_acqf_mixed(
acq_function=mock_acq_function,
q=1,
fixed_features_list=[],
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
)
def test_optimize_acqf_one_shot_large_q(self):
with self.assertRaises(ValueError):
mock_acq_function = MockOneShotAcquisitionFunction()
fixed_features_list = [{i: i * 0.1} for i in range(2)]
optimize_acqf_mixed(
acq_function=mock_acq_function,
q=2,
fixed_features_list=fixed_features_list,
bounds=torch.stack([torch.zeros(3), 4 * torch.ones(3)]),
num_restarts=2,
raw_samples=10,
)
class TestOptimizeAcqfDiscrete(BotorchTestCase):
def test_optimize_acqf_discrete(self):
for q, dtype in itertools.product((1, 2), (torch.float, torch.double)):
tkwargs = {"device": self.device, "dtype": dtype}
mock_acq_function = SquaredAcquisitionFunction()
mock_acq_function.set_X_pending(None)
# ensure proper raising of errors if no choices
with self.assertRaisesRegex(InputDataError, "`choices` must be non-emtpy."):
optimize_acqf_discrete(
acq_function=mock_acq_function,
q=q,
choices=torch.empty(0, 2),
)
choices = torch.rand(5, 2, **tkwargs)
# warning for unsupported keyword arguments
with self.assertWarnsRegex(
DeprecationWarning,
r"`optimize_acqf_discrete` does not support arguments "
r"\['num_restarts'\]. In the future, this will become an error.",
):
optimize_acqf_discrete(
acq_function=mock_acq_function, q=q, choices=choices, num_restarts=8
)
exp_acq_vals = mock_acq_function(choices)
# test unique
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function,
q=q,
choices=choices,
)
best_idcs = torch.topk(exp_acq_vals, q).indices
expected_candidates = choices[best_idcs]
expected_acq_value = exp_acq_vals[best_idcs].reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
# test non-unique (test does not properly use pending points)
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function, q=q, choices=choices, unique=False
)
best_idx = torch.argmax(exp_acq_vals)
expected_candidates = choices[best_idx].repeat(q, 1)
expected_acq_value = exp_acq_vals[best_idx].repeat(q).reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
# test max_batch_limit
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function, q=q, choices=choices, max_batch_size=3
)
best_idcs = torch.topk(exp_acq_vals, q).indices
expected_candidates = choices[best_idcs]
expected_acq_value = exp_acq_vals[best_idcs].reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
# test max_batch_limit & unique
candidates, acq_value = optimize_acqf_discrete(
acq_function=mock_acq_function,
q=q,
choices=choices,
unique=False,
max_batch_size=3,
)
best_idx = torch.argmax(exp_acq_vals)
expected_candidates = choices[best_idx].repeat(q, 1)
expected_acq_value = exp_acq_vals[best_idx].repeat(q).reshape_as(acq_value)
self.assertAllClose(acq_value, expected_acq_value)
self.assertAllClose(candidates, expected_candidates)
with self.assertRaises(UnsupportedError):
acqf = MockOneShotAcquisitionFunction()
optimize_acqf_discrete(
acq_function=acqf,
q=1,
choices=torch.tensor([[0.5], [0.2]]),
)
def test_optimize_acqf_discrete_local_search(self):
for q, dtype in itertools.product((1, 2), (torch.float, torch.double)):
tkwargs = {"device": self.device, "dtype": dtype}
mock_acq_function = SquaredAcquisitionFunction()
mock_acq_function.set_X_pending(None)
discrete_choices = [
torch.tensor([0, 1, 6], **tkwargs),
torch.tensor([2, 3, 4], **tkwargs),
torch.tensor([5, 6, 9], **tkwargs),
]
# make sure we can find the global optimum
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
raw_samples=1,
num_restarts=1,
)
self.assertTrue(
torch.allclose(candidates[0], torch.tensor([6, 4, 9], **tkwargs))
)
if q > 1: # there are three local minima
self.assertTrue(
torch.allclose(candidates[1], torch.tensor([6, 3, 9], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([1, 4, 9], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([6, 4, 6], **tkwargs))
)
# same but with unique=False
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
raw_samples=1,
num_restarts=1,
unique=False,
)
expected_candidates = torch.tensor([[6, 4, 9], [6, 4, 9]], **tkwargs)
self.assertAllClose(candidates, expected_candidates[:q])
# test X_avoid and batch_initial_conditions
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
X_avoid=torch.tensor([[6, 4, 9]], **tkwargs),
batch_initial_conditions=torch.tensor([[0, 2, 5]], **tkwargs).unsqueeze(
1
),
)
self.assertTrue(
torch.allclose(candidates[0], torch.tensor([6, 3, 9], **tkwargs))
)
if q > 1: # there are two local minima
self.assertTrue(
torch.allclose(candidates[1], torch.tensor([6, 2, 9], **tkwargs))
)
# test inequality constraints
inequality_constraints = [
(
torch.tensor([2], device=self.device),
-1 * torch.ones(1, **tkwargs),
-6 * torch.ones(1, **tkwargs),
)
]
candidates, acq_value = optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=discrete_choices,
raw_samples=1,
num_restarts=1,
inequality_constraints=inequality_constraints,
)
self.assertTrue(
torch.allclose(candidates[0], torch.tensor([6, 4, 6], **tkwargs))
)
if q > 1: # there are three local minima
self.assertTrue(
torch.allclose(candidates[1], torch.tensor([6, 4, 5], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([6, 3, 6], **tkwargs))
or torch.allclose(candidates[1], torch.tensor([1, 4, 6], **tkwargs))
)
# make sure we break if there are no neighbors
optimize_acqf_discrete_local_search(
acq_function=mock_acq_function,
q=q,
discrete_choices=[
torch.tensor([0, 1], **tkwargs),
torch.tensor([1], **tkwargs),
],
raw_samples=1,
num_restarts=1,
)
# test _filter_infeasible
X = torch.tensor([[0, 2, 5], [0, 2, 6], [0, 2, 9]], **tkwargs)
X_filtered = _filter_infeasible(
X=X, inequality_constraints=inequality_constraints
)
self.assertAllClose(X[:2], X_filtered)
# test _filter_invalid
X_filtered = _filter_invalid(X=X, X_avoid=X[1].unsqueeze(0))
self.assertAllClose(X[[0, 2]], X_filtered)
X_filtered = _filter_invalid(X=X, X_avoid=X[[0, 2]])
self.assertAllClose(X[1].unsqueeze(0), X_filtered)
# test _generate_neighbors
X_loc = _generate_neighbors(
x=torch.tensor([0, 2, 6], **tkwargs).unsqueeze(0),
discrete_choices=discrete_choices,
X_avoid=torch.tensor([[0, 3, 6], [0, 2, 5]], **tkwargs),
inequality_constraints=inequality_constraints,
)
self.assertTrue(
torch.allclose(
X_loc, torch.tensor([[1, 2, 6], [6, 2, 6], [0, 4, 6]], **tkwargs)
)
)
# test _gen_batch_initial_conditions_local_search
with self.assertRaisesRegex(RuntimeError, "Failed to generate"):
_gen_batch_initial_conditions_local_search(
discrete_choices=discrete_choices,
raw_samples=1,
X_avoid=torch.zeros(0, 3, **tkwargs),
inequality_constraints=[],
min_points=30,
)
X = _gen_batch_initial_conditions_local_search(
discrete_choices=discrete_choices,
raw_samples=1,
X_avoid=torch.zeros(0, 3, **tkwargs),
inequality_constraints=[],
min_points=20,
)
self.assertEqual(len(X), 20)
self.assertAllClose(torch.unique(X, dim=0), X)
def test_no_precision_loss_with_fixed_features(self) -> None:
acqf = SquaredAcquisitionFunction()
val = 1e-1
fixed_features_list = [{0: val}]
bounds = torch.stack(
[torch.zeros(2, dtype=torch.float64), torch.ones(2, dtype=torch.float64)]
)
candidate, _ = optimize_acqf_mixed(
acqf,
bounds=bounds,
q=1,
num_restarts=1,
raw_samples=1,
fixed_features_list=fixed_features_list,
)
self.assertEqual(candidate[0, 0].item(), val)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from contextlib import ExitStack
from itertools import product
from random import random
from typing import Optional
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.analytic import PosteriorMean
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
)
from botorch.acquisition.multi_objective.monte_carlo import (
qNoisyExpectedHypervolumeImprovement,
)
from botorch.exceptions import BadInitialCandidatesWarning, SamplingWarning
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import SingleTaskGP
from botorch.optim import initialize_q_batch, initialize_q_batch_nonneg
from botorch.optim.initializers import (
gen_batch_initial_conditions,
gen_one_shot_kg_initial_conditions,
gen_value_function_initial_conditions,
sample_perturbed_subset_dims,
sample_points_around_best,
sample_q_batches_from_polytope,
sample_truncated_normal_perturbations,
transform_constraints,
transform_inter_point_constraint,
transform_intra_point_constraint,
)
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.sampling import draw_sobol_samples, manual_seed
from botorch.utils.testing import (
_get_max_violation_of_bounds,
_get_max_violation_of_constraints,
BotorchTestCase,
MockAcquisitionFunction,
MockModel,
MockPosterior,
)
class TestBoundsAndConstraintCheckers(BotorchTestCase):
def test_bounds_check(self) -> None:
bounds = torch.tensor([[1, 2], [3, 4]], device=self.device)
samples = torch.tensor([[2, 3], [2, 3.1]], device=self.device)[None, :, :]
result = _get_max_violation_of_bounds(samples, bounds)
self.assertAlmostEqual(result, -0.9, delta=1e-6)
samples = torch.tensor([[2, 3], [2, 4.1]], device=self.device)[None, :, :]
result = _get_max_violation_of_bounds(samples, bounds)
self.assertAlmostEqual(result, 0.1, delta=1e-6)
def test_constraint_check(self) -> None:
constraints = [
(
torch.tensor([1], device=self.device),
torch.tensor([1.0], device=self.device),
3,
)
]
samples = torch.tensor([[2, 3], [2, 3.1]], device=self.device)[None, :, :]
result = _get_max_violation_of_constraints(samples, constraints, equality=True)
self.assertAlmostEqual(result, 0.1, delta=1e-6)
result = _get_max_violation_of_constraints(samples, constraints, equality=False)
self.assertAlmostEqual(result, 0.0, delta=1e-6)
class TestInitializeQBatch(BotorchTestCase):
def test_initialize_q_batch_nonneg(self):
for dtype in (torch.float, torch.double):
# basic test
X = torch.rand(5, 3, 4, device=self.device, dtype=dtype)
Y = torch.rand(5, device=self.device, dtype=dtype)
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# ensure nothing happens if we want all samples
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=5)
self.assertTrue(torch.equal(X, ics))
# make sure things work with constant inputs
Y = torch.ones(5, device=self.device, dtype=dtype)
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# ensure raises correct warning
Y = torch.zeros(5, device=self.device, dtype=dtype)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BadInitialCandidatesWarning))
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
with self.assertRaises(RuntimeError):
initialize_q_batch_nonneg(X=X, Y=Y, n=10)
# test less than `n` positive acquisition values
Y = torch.arange(5, device=self.device, dtype=dtype) - 3
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# check that we chose the point with the positive acquisition value
self.assertTrue(torch.equal(ics[0], X[-1]) or torch.equal(ics[1], X[-1]))
# test less than `n` alpha_pos values
Y = torch.arange(5, device=self.device, dtype=dtype)
ics = initialize_q_batch_nonneg(X=X, Y=Y, n=2, alpha=1.0)
self.assertEqual(ics.shape, torch.Size([2, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
def test_initialize_q_batch(self):
for dtype in (torch.float, torch.double):
for batch_shape in (torch.Size(), [3, 2], (2,), torch.Size([2, 3, 4]), []):
# basic test
X = torch.rand(5, *batch_shape, 3, 4, device=self.device, dtype=dtype)
Y = torch.rand(5, *batch_shape, device=self.device, dtype=dtype)
ics = initialize_q_batch(X=X, Y=Y, n=2)
self.assertEqual(ics.shape, torch.Size([2, *batch_shape, 3, 4]))
self.assertEqual(ics.device, X.device)
self.assertEqual(ics.dtype, X.dtype)
# ensure nothing happens if we want all samples
ics = initialize_q_batch(X=X, Y=Y, n=5)
self.assertTrue(torch.equal(X, ics))
# ensure raises correct warning
Y = torch.zeros(5, device=self.device, dtype=dtype)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
ics = initialize_q_batch(X=X, Y=Y, n=2)
self.assertEqual(len(w), 1)
self.assertTrue(
issubclass(w[-1].category, BadInitialCandidatesWarning)
)
self.assertEqual(ics.shape, torch.Size([2, *batch_shape, 3, 4]))
with self.assertRaises(RuntimeError):
initialize_q_batch(X=X, Y=Y, n=10)
def test_initialize_q_batch_largeZ(self):
for dtype in (torch.float, torch.double):
# testing large eta*Z
X = torch.rand(5, 3, 4, device=self.device, dtype=dtype)
Y = torch.tensor([-1e12, 0, 0, 0, 1e12], device=self.device, dtype=dtype)
ics = initialize_q_batch(X=X, Y=Y, n=2, eta=100)
self.assertEqual(ics.shape[0], 2)
class TestGenBatchInitialCandidates(BotorchTestCase):
def test_gen_batch_initial_inf_bounds(self):
bounds = torch.rand(2, 2)
bounds[0, 1] = float("inf")
with self.assertRaisesRegex(
NotImplementedError,
r"Currently only finite values in `bounds` are supported for "
r"generating initial conditions for optimization.",
):
gen_batch_initial_conditions(
acq_function=mock.Mock(),
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=2,
)
def test_gen_batch_initial_conditions(self):
bounds = torch.stack([torch.zeros(2), torch.ones(2)])
mock_acqf = MockAcquisitionFunction()
mock_acqf.objective = lambda y: y.squeeze(-1)
for dtype in (torch.float, torch.double):
bounds = bounds.to(device=self.device, dtype=dtype)
mock_acqf.X_baseline = bounds # for testing sample_around_best
mock_acqf.model = MockModel(MockPosterior(mean=bounds[:, :1]))
for nonnegative, seed, init_batch_limit, ffs, sample_around_best in product(
[True, False], [None, 1234], [None, 1], [None, {0: 0.5}], [True, False]
):
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
) as mock_acqf_call, warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=10,
fixed_features=ffs,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": init_batch_limit,
"sample_around_best": sample_around_best,
},
)
expected_shape = torch.Size([2, 1, 2])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds),
1e-6,
)
batch_shape = (
torch.Size([])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
raw_samps = mock_acqf_call.call_args[0][0]
batch_shape = (
torch.Size([20 if sample_around_best else 10])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
expected_raw_samps_shape = batch_shape + torch.Size([1, 2])
self.assertEqual(raw_samps.shape, expected_raw_samps_shape)
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_gen_batch_initial_conditions_highdim(self):
d = 2200 # 2200 * 10 (q) > 21201 (sobol max dim)
bounds = torch.stack([torch.zeros(d), torch.ones(d)])
ffs_map = {i: random() for i in range(0, d, 2)}
mock_acqf = MockAcquisitionFunction()
mock_acqf.objective = lambda y: y.squeeze(-1)
for dtype in (torch.float, torch.double):
bounds = bounds.to(device=self.device, dtype=dtype)
mock_acqf.X_baseline = bounds # for testing sample_around_best
mock_acqf.model = MockModel(MockPosterior(mean=bounds[:, :1]))
for nonnegative, seed, ffs, sample_around_best in product(
[True, False], [None, 1234], [None, ffs_map], [True, False]
):
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=MockAcquisitionFunction(),
bounds=bounds,
q=10,
num_restarts=1,
raw_samples=2,
fixed_features=ffs,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"sample_around_best": sample_around_best,
},
)
self.assertTrue(
any(issubclass(w.category, SamplingWarning) for w in ws)
)
expected_shape = torch.Size([1, 10, d])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds), 1e-6
)
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_gen_batch_initial_conditions_warning(self) -> None:
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
samples = torch.zeros(10, 1, 2, device=self.device, dtype=dtype)
with self.assertWarnsRegex(
expected_warning=BadInitialCandidatesWarning,
expected_regex="Unable to find non-zero acquisition",
), mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
return_value=samples,
):
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=MockAcquisitionFunction(),
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=10,
options={"seed": 1234},
)
self.assertTrue(
torch.equal(
batch_initial_conditions,
torch.zeros(2, 1, 2, device=self.device, dtype=dtype),
)
)
def test_gen_batch_initial_conditions_transform_intra_point_constraint(self):
for dtype in (torch.float, torch.double):
constraint = (
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
torch.tensor([-1, -1]).to(dtype=dtype, device=self.device),
-1.0,
)
constraints = transform_intra_point_constraint(
constraint=constraint, d=3, q=3
)
self.assertEqual(len(constraints), 3)
self.assertAllClose(
constraints[0][0],
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
constraints[1][0],
torch.tensor([3, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
constraints[2][0],
torch.tensor([6, 7], dtype=torch.int64, device=self.device),
)
for constraint in constraints:
self.assertAllClose(
torch.tensor([-1, -1], dtype=dtype, device=self.device),
constraint[1],
)
self.assertEqual(constraint[2], -1.0)
# test failure on invalid d
constraint = (
torch.tensor([[0, 3]], dtype=torch.int64, device=self.device),
torch.tensor([-1.0, -1.0], dtype=dtype, device=self.device),
0,
)
with self.assertRaisesRegex(
ValueError,
"Constraint indices cannot exceed the problem dimension d=3.",
):
transform_intra_point_constraint(constraint=constraint, d=3, q=2)
def test_gen_batch_intial_conditions_transform_inter_point_constraint(self):
for dtype in (torch.float, torch.double):
constraint = (
torch.tensor([[0, 1], [1, 1]], dtype=torch.int64, device=self.device),
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
0,
)
transformed = transform_inter_point_constraint(constraint=constraint, d=3)
self.assertAllClose(
transformed[0],
torch.tensor([1, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[1],
torch.tensor([1.0, -1.0]).to(dtype=dtype, device=self.device),
)
self.assertEqual(constraint[2], 0.0)
# test failure on invalid d
constraint = (
torch.tensor([[0, 1], [1, 3]], dtype=torch.int64, device=self.device),
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
0,
)
with self.assertRaisesRegex(
ValueError,
"Constraint indices cannot exceed the problem dimension d=3.",
):
transform_inter_point_constraint(constraint=constraint, d=3)
def test_gen_batch_initial_conditions_transform_constraints(self):
for dtype in (torch.float, torch.double):
# test with None
self.assertIsNone(transform_constraints(constraints=None, d=3, q=3))
constraints = [
(
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
torch.tensor([-1.0, -1.0], dtype=dtype, device=self.device),
-1.0,
),
(
torch.tensor(
[[0, 1], [1, 1]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
0,
),
]
transformed = transform_constraints(constraints=constraints, d=3, q=3)
self.assertEqual(len(transformed), 4)
self.assertAllClose(
transformed[0][0],
torch.tensor([0, 1], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[1][0],
torch.tensor([3, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[2][0],
torch.tensor([6, 7], dtype=torch.int64, device=self.device),
)
for constraint in transformed[:3]:
self.assertAllClose(
torch.tensor([-1, -1], dtype=dtype, device=self.device),
constraint[1],
)
self.assertEqual(constraint[2], -1.0)
self.assertAllClose(
transformed[-1][0],
torch.tensor([1, 4], dtype=torch.int64, device=self.device),
)
self.assertAllClose(
transformed[-1][1],
torch.tensor([1.0, -1.0], dtype=dtype, device=self.device),
)
self.assertEqual(transformed[-1][2], 0.0)
def test_gen_batch_initial_conditions_sample_q_batches_from_polytope(self):
n = 5
q = 2
d = 3
for dtype in (torch.float, torch.double):
bounds = torch.tensor(
[[0, 0, 0], [1, 1, 1]], device=self.device, dtype=dtype
)
inequality_constraints = [
(
torch.tensor([0, 1], device=self.device, dtype=torch.int64),
torch.tensor([-1, 1], device=self.device, dtype=dtype),
torch.tensor(-0.5, device=self.device, dtype=dtype),
)
]
inter_point_inequality_constraints = [
(
torch.tensor([0, 1], device=self.device, dtype=torch.int64),
torch.tensor([-1, 1], device=self.device, dtype=dtype),
torch.tensor(-0.4, device=self.device, dtype=dtype),
),
(
torch.tensor(
[[0, 1], [1, 1]], device=self.device, dtype=torch.int64
),
torch.tensor([1, 1], device=self.device, dtype=dtype),
torch.tensor(0.3, device=self.device, dtype=dtype),
),
]
equality_constraints = [
(
torch.tensor([0, 1, 2], device=self.device, dtype=torch.int64),
torch.tensor([1, 1, 1], device=self.device, dtype=dtype),
torch.tensor(1, device=self.device, dtype=dtype),
)
]
inter_point_equality_constraints = [
(
torch.tensor([0, 1, 2], device=self.device, dtype=torch.int64),
torch.tensor([1, 1, 1], device=self.device, dtype=dtype),
torch.tensor(1, device=self.device, dtype=dtype),
),
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], device=self.device, dtype=dtype),
0,
),
]
for equalities, inequalities in product(
[None, equality_constraints, inter_point_equality_constraints],
[None, inequality_constraints, inter_point_inequality_constraints],
):
samples = sample_q_batches_from_polytope(
n=n,
q=q,
bounds=bounds,
n_burnin=10000,
thinning=32,
seed=42,
inequality_constraints=inequalities,
equality_constraints=equalities,
)
self.assertEqual(samples.shape, torch.Size((n, q, d)))
tol = 4e-7
# samples are always on cpu
def _to_self_device(
x: Optional[torch.Tensor],
) -> Optional[torch.Tensor]:
return None if x is None else x.to(device=self.device)
self.assertLess(
_get_max_violation_of_bounds(_to_self_device(samples), bounds), tol
)
self.assertLess(
_get_max_violation_of_constraints(
_to_self_device(samples), constraints=equalities, equality=True
),
tol,
)
self.assertLess(
_get_max_violation_of_constraints(
_to_self_device(samples),
constraints=inequalities,
equality=False,
),
tol,
)
def test_gen_batch_initial_conditions_constraints(self):
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
inequality_constraints = [
(
torch.tensor([1], device=self.device, dtype=torch.int64),
torch.tensor([-4], device=self.device, dtype=dtype),
torch.tensor(-3, device=self.device, dtype=dtype),
)
]
equality_constraints = [
(
torch.tensor([0], device=self.device, dtype=torch.int64),
torch.tensor([1], device=self.device, dtype=dtype),
torch.tensor(0.5, device=self.device, dtype=dtype),
)
]
for nonnegative, seed, init_batch_limit, ffs in product(
[True, False], [None, 1234], [None, 1], [None, {0: 0.5}]
):
mock_acqf = MockAcquisitionFunction()
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
) as mock_acqf_call, warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=1,
num_restarts=2,
raw_samples=10,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": init_batch_limit,
"thinning": 2,
"n_burnin": 3,
},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
expected_shape = torch.Size([2, 1, 2])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds),
1e-6,
)
self.assertLess(
_get_max_violation_of_constraints(
batch_initial_conditions,
inequality_constraints,
equality=False,
),
1e-6,
)
self.assertLess(
_get_max_violation_of_constraints(
batch_initial_conditions,
equality_constraints,
equality=True,
),
1e-6,
)
batch_shape = (
torch.Size([])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
raw_samps = mock_acqf_call.call_args[0][0]
batch_shape = (
torch.Size([10])
if init_batch_limit is None
else torch.Size([init_batch_limit])
)
expected_raw_samps_shape = batch_shape + torch.Size([1, 2])
self.assertEqual(raw_samps.shape, expected_raw_samps_shape)
self.assertTrue((raw_samps[..., 0] == 0.5).all())
self.assertTrue((-4 * raw_samps[..., 1] >= -3).all())
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_gen_batch_initial_conditions_interpoint_constraints(self):
for dtype in (torch.float, torch.double):
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
inequality_constraints = [
(
torch.tensor([0, 1], device=self.device, dtype=torch.int64),
torch.tensor([-1, -1.0], device=self.device, dtype=dtype),
torch.tensor(-1.0, device=self.device, dtype=dtype),
)
]
equality_constraints = [
(
torch.tensor(
[[0, 0], [1, 0]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], device=self.device, dtype=dtype),
0,
),
(
torch.tensor(
[[0, 0], [2, 0]], device=self.device, dtype=torch.int64
),
torch.tensor([1.0, -1.0], device=self.device, dtype=dtype),
0,
),
]
for nonnegative, seed in product([True, False], [None, 1234]):
mock_acqf = MockAcquisitionFunction()
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
):
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=3,
num_restarts=2,
raw_samples=10,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": None,
"thinning": 2,
"n_burnin": 3,
},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
expected_shape = torch.Size([2, 3, 2])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertTrue((batch_initial_conditions.sum(dim=-1) <= 1).all())
self.assertAllClose(
batch_initial_conditions[0, 0, 0],
batch_initial_conditions[0, 1, 0],
batch_initial_conditions[0, 2, 0],
atol=1e-7,
)
self.assertAllClose(
batch_initial_conditions[1, 0, 0],
batch_initial_conditions[1, 1, 0],
batch_initial_conditions[1, 2, 0],
)
self.assertLess(
_get_max_violation_of_constraints(
batch_initial_conditions,
inequality_constraints,
equality=False,
),
1e-6,
)
def test_gen_batch_initial_conditions_generator(self):
mock_acqf = MockAcquisitionFunction()
mock_acqf.objective = lambda y: y.squeeze(-1)
for dtype in (torch.float, torch.double):
bounds = torch.tensor(
[[0, 0, 0], [1, 1, 1]], device=self.device, dtype=dtype
)
for nonnegative, seed, init_batch_limit, ffs in product(
[True, False], [None, 1234], [None, 1], [None, {0: 0.5}]
):
def generator(n: int, q: int, seed: int):
with manual_seed(seed):
X_rnd_nlzd = torch.rand(
n,
q,
bounds.shape[-1],
dtype=bounds.dtype,
device=self.device,
)
X_rnd = bounds[0] + (bounds[1] - bounds[0]) * X_rnd_nlzd
X_rnd[..., -1] = 0.42
return X_rnd
mock_acqf = MockAcquisitionFunction()
with mock.patch.object(
MockAcquisitionFunction,
"__call__",
wraps=mock_acqf.__call__,
), warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=BadInitialCandidatesWarning
)
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=mock_acqf,
bounds=bounds,
q=2,
num_restarts=4,
raw_samples=10,
generator=generator,
fixed_features=ffs,
options={
"nonnegative": nonnegative,
"eta": 0.01,
"alpha": 0.1,
"seed": seed,
"init_batch_limit": init_batch_limit,
},
)
expected_shape = torch.Size([4, 2, 3])
self.assertEqual(batch_initial_conditions.shape, expected_shape)
self.assertEqual(batch_initial_conditions.device, bounds.device)
self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
self.assertTrue((batch_initial_conditions[..., -1] == 0.42).all())
self.assertLess(
_get_max_violation_of_bounds(batch_initial_conditions, bounds),
1e-6,
)
if ffs is not None:
for idx, val in ffs.items():
self.assertTrue(
torch.all(batch_initial_conditions[..., idx] == val)
)
def test_error_generator_with_sample_around_best(self):
tkwargs = {"device": self.device, "dtype": torch.double}
def generator(n: int, q: int, seed: int):
return torch.rand(n, q, 3).to(**tkwargs)
with self.assertRaisesRegex(
UnsupportedError,
"Option 'sample_around_best' is not supported when custom "
"generator is be used.",
):
gen_batch_initial_conditions(
MockAcquisitionFunction(),
bounds=torch.tensor([[0, 0], [1, 1]], **tkwargs),
q=1,
num_restarts=1,
raw_samples=1,
generator=generator,
options={"sample_around_best": True},
)
def test_error_equality_constraints_with_sample_around_best(self):
tkwargs = {"device": self.device, "dtype": torch.double}
# this will give something that does not respect the constraints
# TODO: it would be good to have a utils function to check if the
# constraints are obeyed
with self.assertRaisesRegex(
UnsupportedError,
"Option 'sample_around_best' is not supported when equality"
"constraints are present.",
):
gen_batch_initial_conditions(
MockAcquisitionFunction(),
bounds=torch.tensor([[0, 0], [1, 1]], **tkwargs),
q=1,
num_restarts=1,
raw_samples=1,
equality_constraints=[
(
torch.tensor([0], **tkwargs),
torch.tensor([1], **tkwargs),
torch.tensor(0.5, **tkwargs),
)
],
options={"sample_around_best": True},
)
class TestGenOneShotKGInitialConditions(BotorchTestCase):
def test_gen_one_shot_kg_initial_conditions(self):
num_fantasies = 8
num_restarts = 4
raw_samples = 16
for dtype in (torch.float, torch.double):
mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean))
mock_kg = qKnowledgeGradient(model=mm, num_fantasies=num_fantasies)
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
# test option error
with self.assertRaises(ValueError):
gen_one_shot_kg_initial_conditions(
acq_function=mock_kg,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options={"frac_random": 2.0},
)
# test generation logic
q = 2
mock_random_ics = torch.rand(num_restarts, q + num_fantasies, 2)
mock_fantasy_cands = torch.ones(20, 1, 2)
mock_fantasy_vals = torch.randn(20)
with ExitStack() as es:
mock_gbics = es.enter_context(
mock.patch(
"botorch.optim.initializers.gen_batch_initial_conditions",
return_value=mock_random_ics,
)
)
mock_optacqf = es.enter_context(
mock.patch(
"botorch.optim.optimize.optimize_acqf",
return_value=(mock_fantasy_cands, mock_fantasy_vals),
)
)
ics = gen_one_shot_kg_initial_conditions(
acq_function=mock_kg,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
mock_gbics.assert_called_once()
mock_optacqf.assert_called_once()
n_value = int((1 - 0.1) * num_fantasies)
self.assertTrue(
torch.equal(
ics[..., :-n_value, :], mock_random_ics[..., :-n_value, :]
)
)
self.assertTrue(torch.all(ics[..., -n_value:, :] == 1))
class TestGenValueFunctionInitialConditions(BotorchTestCase):
def test_gen_value_function_initial_conditions(self):
num_fantasies = 2
num_solutions = 3
num_restarts = 4
raw_samples = 5
n_train = 6
dim = 2
dtype = torch.float
# run a thorough test with dtype float
train_X = torch.rand(n_train, dim, device=self.device, dtype=dtype)
train_Y = torch.rand(n_train, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
fant_X = torch.rand(num_solutions, 1, dim, device=self.device, dtype=dtype)
fantasy_model = model.fantasize(
fant_X, IIDNormalSampler(sample_shape=torch.Size([num_fantasies]))
)
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
value_function = PosteriorMean(fantasy_model)
# test option error
with self.assertRaises(ValueError):
gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
current_model=model,
options={"frac_random": 2.0},
)
# test output shape
ics = gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
current_model=model,
)
self.assertEqual(
ics.shape, torch.Size([num_restarts, num_fantasies, num_solutions, 1, dim])
)
# test bounds
self.assertTrue(torch.all(ics >= bounds[0]))
self.assertTrue(torch.all(ics <= bounds[1]))
# test dtype
self.assertEqual(dtype, ics.dtype)
# minimal test cases for when all raw samples are random, with dtype double
dtype = torch.double
n_train = 2
dim = 1
num_solutions = 1
train_X = torch.rand(n_train, dim, device=self.device, dtype=dtype)
train_Y = torch.rand(n_train, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X, train_Y)
fant_X = torch.rand(1, 1, dim, device=self.device, dtype=dtype)
fantasy_model = model.fantasize(
fant_X, IIDNormalSampler(sample_shape=torch.Size([num_fantasies]))
)
bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
value_function = PosteriorMean(fantasy_model)
ics = gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=1,
raw_samples=1,
current_model=model,
options={"frac_random": 0.99},
)
self.assertEqual(
ics.shape, torch.Size([1, num_fantasies, num_solutions, 1, dim])
)
# test bounds
self.assertTrue(torch.all(ics >= bounds[0]))
self.assertTrue(torch.all(ics <= bounds[1]))
# test dtype
self.assertEqual(dtype, ics.dtype)
class TestSampleAroundBest(BotorchTestCase):
def test_sample_truncated_normal_perturbations(self):
tkwargs = {"device": self.device}
n_discrete_points = 5
_bounds = torch.ones(2, 4)
_bounds[1] = 2
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
bounds = _bounds.to(**tkwargs)
for n_best in (1, 2):
X = 1 + torch.rand(n_best, 4, **tkwargs)
# basic test
perturbed_X = sample_truncated_normal_perturbations(
X=X,
n_discrete_points=n_discrete_points,
sigma=4,
bounds=bounds,
qmc=False,
)
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 4]))
self.assertTrue((perturbed_X >= 1).all())
self.assertTrue((perturbed_X <= 2).all())
# test qmc
with mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
wraps=draw_sobol_samples,
) as mock_sobol:
perturbed_X = sample_truncated_normal_perturbations(
X=X,
n_discrete_points=n_discrete_points,
sigma=4,
bounds=bounds,
qmc=True,
)
mock_sobol.assert_called_once()
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 4]))
self.assertTrue((perturbed_X >= 1).all())
self.assertTrue((perturbed_X <= 2).all())
def test_sample_perturbed_subset_dims(self):
tkwargs = {"device": self.device}
n_discrete_points = 5
# test that errors are raised
with self.assertRaises(BotorchTensorDimensionError):
sample_perturbed_subset_dims(
X=torch.zeros(1, 1),
n_discrete_points=1,
sigma=1e-3,
bounds=torch.zeros(1, 2, 1),
)
with self.assertRaises(BotorchTensorDimensionError):
sample_perturbed_subset_dims(
X=torch.zeros(1, 1, 1),
n_discrete_points=1,
sigma=1e-3,
bounds=torch.zeros(2, 1),
)
for dtype in (torch.float, torch.double):
for n_best in (1, 2):
tkwargs["dtype"] = dtype
bounds = torch.zeros(2, 21, **tkwargs)
bounds[1] = 1
X = torch.rand(n_best, 21, **tkwargs)
# basic test
with mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
) as mock_sobol:
perturbed_X = sample_perturbed_subset_dims(
X=X,
n_discrete_points=n_discrete_points,
qmc=False,
sigma=1e-3,
bounds=bounds,
)
mock_sobol.assert_not_called()
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 21]))
self.assertTrue((perturbed_X >= 0).all())
self.assertTrue((perturbed_X <= 1).all())
# test qmc
with mock.patch(
"botorch.optim.initializers.draw_sobol_samples",
wraps=draw_sobol_samples,
) as mock_sobol:
perturbed_X = sample_perturbed_subset_dims(
X=X,
n_discrete_points=n_discrete_points,
sigma=1e-3,
bounds=bounds,
)
mock_sobol.assert_called_once()
self.assertEqual(perturbed_X.shape, torch.Size([n_discrete_points, 21]))
self.assertTrue((perturbed_X >= 0).all())
self.assertTrue((perturbed_X <= 1).all())
# for each point in perturbed_X compute the number of
# dimensions it has in common with each point in X
# and take the maximum number
max_equal_dims = (
(perturbed_X.unsqueeze(0) == X.unsqueeze(1))
.sum(dim=-1)
.max(dim=0)
.values
)
# check that at least one dimension is perturbed
self.assertTrue((20 - max_equal_dims >= 1).all())
def test_sample_points_around_best(self):
tkwargs = {"device": self.device}
_bounds = torch.ones(2, 2)
_bounds[1] = 2
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
bounds = _bounds.to(**tkwargs)
X_train = 1 + torch.rand(20, 2, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train, prune_baseline=False, cache_root=False
)
with mock.patch(
"botorch.optim.initializers.sample_perturbed_subset_dims"
) as mock_subset_dims:
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
mock_subset_dims.assert_not_called()
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test model that returns a batched mean
model = MockModel(
MockPosterior(
mean=(2 * X_train + 1).sum(dim=-1, keepdim=True).unsqueeze(0)
)
)
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train, prune_baseline=False, cache_root=False
)
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test EI without X_baseline
acqf = qExpectedImprovement(model, best_f=0.0)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# set train inputs
model.train_inputs = (X_train,)
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test an acquisition function that has no posterior_transform
# and maximize=False
pm = PosteriorMean(model, maximize=False)
self.assertIsNone(pm.posterior_transform)
self.assertFalse(pm.maximize)
X_rnd = sample_points_around_best(
acq_function=pm,
n_discrete_points=4,
sigma=0,
bounds=bounds,
best_pct=1e-8, # ensures that we only use best value
)
idx = (-model.posterior(X_train).mean).argmax()
self.assertTrue((X_rnd == X_train[idx : idx + 1]).all(dim=-1).all())
# test acquisition function that has no model
ff = FixedFeatureAcquisitionFunction(pm, d=2, columns=[0], values=[0])
# set X_baseline for testing purposes
ff.X_baseline = X_train
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = sample_points_around_best(
acq_function=ff,
n_discrete_points=4,
sigma=1e-3,
bounds=bounds,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# test constraints with NEHVI
constraints = [lambda Y: Y[..., 0]]
ref_point = torch.zeros(2, **tkwargs)
# test cases when there are and are not any feasible points
for any_feas in (True, False):
Y_train = torch.stack(
[
torch.linspace(-0.5, 0.5, X_train.shape[0], **tkwargs)
if any_feas
else torch.ones(X_train.shape[0], **tkwargs),
X_train.sum(dim=-1),
],
dim=-1,
)
moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
acqf = qNoisyExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
X_baseline=X_train,
constraints=constraints,
cache_root=False,
sampler=IIDNormalSampler(sample_shape=torch.Size([2])),
)
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=4,
sigma=0.0,
bounds=bounds,
)
self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
# this should be true since sigma=0
# and we should only be returning feasible points
violation = constraints[0](Y_train)
neg_violation = -violation.clamp_min(0.0)
feas = neg_violation == 0
eq_mask = (X_train.unsqueeze(1) == X_rnd.unsqueeze(0)).all(dim=-1)
if feas.any():
# determine
# create n_train x n_rnd tensor of booleans
eq_mask = (X_train.unsqueeze(1) == X_rnd.unsqueeze(0)).all(dim=-1)
# check that all X_rnd correspond to feasible points
self.assertEqual(eq_mask[feas].sum(), 4)
else:
idcs = torch.topk(neg_violation, k=2).indices
self.assertEqual(eq_mask[idcs].sum(), 4)
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
# test that subset_dims is called if d>=20
X_train = 1 + torch.rand(10, 20, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
bounds = torch.ones(2, 20, **tkwargs)
bounds[1] = 2
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train, prune_baseline=False, cache_root=False
)
with mock.patch(
"botorch.optim.initializers.sample_perturbed_subset_dims",
wraps=sample_perturbed_subset_dims,
) as mock_subset_dims:
X_rnd = sample_points_around_best(
acq_function=acqf, n_discrete_points=5, sigma=1e-3, bounds=bounds
)
self.assertEqual(X_rnd.shape, torch.Size([5, 20]))
self.assertTrue((X_rnd >= 1).all())
self.assertTrue((X_rnd <= 2).all())
mock_subset_dims.assert_called_once()
# test tiny prob_perturb to make sure we perturb at least one dimension
X_rnd = sample_points_around_best(
acq_function=acqf,
n_discrete_points=5,
sigma=1e-3,
bounds=bounds,
prob_perturb=1e-8,
)
self.assertTrue(
((X_rnd.unsqueeze(0) == X_train.unsqueeze(1)).all(dim=-1)).sum() == 0
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from functools import partial
from typing import Dict
from unittest.mock import MagicMock, patch
import torch
from botorch.optim import core
from botorch.optim.closures import ForwardBackwardClosure, NdarrayOptimizationClosure
from botorch.optim.core import (
OptimizationResult,
OptimizationStatus,
scipy_minimize,
torch_minimize,
)
from botorch.utils.testing import BotorchTestCase
from numpy import allclose
from scipy.optimize import OptimizeResult
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim.sgd import SGD
try:
from torch.optim.lr_scheduler import LRScheduler
except ImportError: # pragma: no cover
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # pragma: no cover
class ToyModule(Module):
def __init__(self, b: Parameter, x: Parameter, dummy: Parameter):
r"""Toy module for unit testing."""
super().__init__()
self.x = x
self.b = b
self.dummy = dummy
def forward(self) -> Tensor:
return (self.x - self.b).square().sum()
@property
def free_parameters(self) -> Dict[str, Tensor]:
return {n: p for n, p in self.named_parameters() if p.requires_grad}
def norm_squared(x, delay: float = 0.0):
if x.grad is not None:
x.grad.zero_()
loss = x.square().sum()
loss.backward()
if delay:
time.sleep(delay)
return loss, [x.grad]
class TestScipyMinimize(BotorchTestCase):
def setUp(self):
super().setUp()
module = ToyModule(
x=Parameter(torch.tensor(0.5, device=self.device)),
b=Parameter(torch.tensor(0.0, device=self.device), requires_grad=False),
dummy=Parameter(torch.tensor(1.0, device=self.device)),
).to(self.device)
self.closures = {}
for dtype in ("float32", "float64"):
m = module.to(dtype=getattr(torch, dtype))
self.closures[dtype] = ForwardBackwardClosure(m, m.free_parameters)
def test_basic(self):
x = Parameter(torch.rand([]))
closure = partial(norm_squared, x)
result = scipy_minimize(closure, {"x": x})
self.assertEqual(result.status, OptimizationStatus.SUCCESS)
self.assertTrue(allclose(result.fval, 0.0))
def test_timeout(self):
x = Parameter(torch.tensor(1.0))
# adding a small delay here to combat some timing issues on windows
closure = partial(norm_squared, x, delay=1e-2)
result = scipy_minimize(closure, {"x": x}, timeout_sec=1e-4)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue("Optimization timed out after" in result.message)
def test_main(self):
def _callback(parameters, result, out) -> None:
out.append(result)
for closure in self.closures.values():
for with_wrapper in (True, False):
with torch.no_grad():
cache = {} # cache random starting values
for name, param in closure.parameters.items():
init = cache[name] = torch.rand_like(param)
param.data.copy_(init)
closure_arg = (
NdarrayOptimizationClosure(closure, closure.parameters)
if with_wrapper
else closure
)
result = scipy_minimize(
closure=closure_arg,
parameters=closure.parameters,
bounds={"x": (0, 1)},
)
self.assertIsInstance(result, OptimizationResult)
self.assertEqual(result.status, OptimizationStatus.SUCCESS)
self.assertTrue(allclose(result.fval, 0.0))
self.assertTrue(closure.parameters["dummy"].equal(cache["dummy"]))
self.assertFalse(closure.parameters["x"].equal(cache["x"]))
# Test `bounds` and `callback`
with torch.no_grad(): # closure.forward is a ToyModule instance
closure.forward.b.fill_(0.0)
closure.forward.x.fill_(0.5)
step_results = []
result = scipy_minimize(
closure=closure,
parameters=closure.parameters,
bounds={"x": (0.1, 1.0)},
callback=partial(_callback, out=step_results),
)
self.assertTrue(allclose(0.01, result.fval))
self.assertTrue(allclose(0.1, closure.forward.x.detach().cpu().item()))
self.assertEqual(result.step, len(step_results))
self.assertEqual(result.step, step_results[-1].step)
self.assertEqual(result.fval, step_results[-1].fval)
def test_post_processing(self):
closure = next(iter(self.closures.values()))
wrapper = NdarrayOptimizationClosure(closure, closure.parameters)
with patch.object(core, "minimize_with_timeout") as mock_minimize_with_timeout:
for status, msg in (
(OptimizationStatus.FAILURE, b"ABNORMAL_TERMINATION_IN_LNSRCH"),
(OptimizationStatus.STOPPED, "TOTAL NO. of ITERATIONS REACHED LIMIT"),
):
mock_minimize_with_timeout.return_value = OptimizeResult(
x=wrapper.state,
fun=1.0,
nit=3,
success=False,
message=msg,
)
result = core.scipy_minimize(wrapper, closure.parameters)
self.assertEqual(result.status, status)
self.assertEqual(
result.fval, mock_minimize_with_timeout.return_value.fun
)
self.assertEqual(
result.message, msg if isinstance(msg, str) else msg.decode("ascii")
)
class TestTorchMinimize(BotorchTestCase):
def setUp(self):
super().setUp()
module = ToyModule(
x=Parameter(torch.tensor(0.5, device=self.device)),
b=Parameter(torch.tensor(0.0, device=self.device), requires_grad=False),
dummy=Parameter(torch.tensor(1.0, device=self.device)),
).to(self.device)
self.closures = {}
for dtype in ("float32", "float64"):
m = module.to(dtype=getattr(torch, dtype))
self.closures[dtype] = ForwardBackwardClosure(m, m.free_parameters)
def test_basic(self):
x = Parameter(torch.tensor([0.02]))
closure = partial(norm_squared, x)
result = torch_minimize(closure, {"x": x}, step_limit=100)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue(allclose(result.fval, 0.0))
def test_timeout(self):
x = Parameter(torch.tensor(1.0))
# adding a small delay here to combat some timing issues on windows
closure = partial(norm_squared, x, delay=1e-3)
result = torch_minimize(closure, {"x": x}, timeout_sec=1e-4)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue("stopped due to timeout after" in result.message)
def test_main(self):
def _callback(parameters, result, out) -> None:
out.append(result)
for closure in self.closures.values():
# Test that we error out if no termination conditions are given
with self.assertRaisesRegex(RuntimeError, "No termination conditions"):
torch_minimize(closure=closure, parameters=closure.parameters)
# Test single step behavior
for optimizer in (
SGD(params=list(closure.parameters.values()), lr=0.1), # instance
partial(SGD, lr=0.1), # factory
):
cache = {n: p.detach().clone() for n, p in closure.parameters.items()}
grads = [g if g is None else g.detach().clone() for g in closure()[1]]
result = torch_minimize(
closure=closure,
parameters=closure.parameters,
optimizer=optimizer,
step_limit=1,
)
self.assertIsInstance(result, OptimizationResult)
self.assertEqual(result.fval, closure()[0])
self.assertEqual(result.step, 1)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
self.assertTrue(closure.parameters["dummy"].equal(cache["dummy"]))
self.assertFalse(closure.parameters["x"].equal(cache["x"]))
for (name, param), g in zip(closure.parameters.items(), grads):
self.assertTrue(
param.allclose(cache[name] - (0 if g is None else 0.1 * g))
)
# Test local convergence
with torch.no_grad(): # closure.forward is a ToyModule instance
closure.forward.b.fill_(0.0)
closure.forward.x.fill_(0.02)
result = torch_minimize(closure, closure.parameters, step_limit=100)
self.assertTrue(allclose(0.0, result.fval))
self.assertEqual(result.step, 100)
# Test `bounds` and `callback`
with torch.no_grad(): # closure.forward is a ToyModule instance
closure.forward.b.fill_(0.0)
closure.forward.x.fill_(0.11)
step_results = []
result = torch_minimize(
closure=closure,
parameters=closure.parameters,
bounds={"x": (0.1, 1.0)},
callback=partial(_callback, out=step_results),
step_limit=100,
)
self.assertTrue(allclose(0.01, result.fval))
self.assertEqual(result.step, len(step_results))
# Test `stopping_criterion`
stopping_decisions = iter((False, False, True, False))
result = torch_minimize(
closure=closure,
parameters=closure.parameters,
stopping_criterion=lambda fval: next(stopping_decisions),
)
self.assertEqual(result.step, 3)
self.assertEqual(result.status, OptimizationStatus.STOPPED)
# Test passing `scheduler`
mock_scheduler = MagicMock(spec=LRScheduler)
mock_scheduler.step = MagicMock(side_effect=RuntimeError("foo"))
with self.assertRaisesRegex(RuntimeError, "foo"):
torch_minimize(
closure=closure,
parameters=closure.parameters,
scheduler=mock_scheduler,
step_limit=1,
)
mock_scheduler.step.assert_called_once()
# Test passing `scheduler` as a factory
optimizer = SGD(list(closure.parameters.values()), lr=1e-3)
mock_factory = MagicMock(side_effect=RuntimeError("foo"))
with self.assertRaisesRegex(RuntimeError, "foo"):
torch_minimize(
closure=closure,
parameters=closure.parameters,
optimizer=optimizer,
scheduler=mock_factory,
step_limit=1,
)
mock_factory.assert_called_once_with(optimizer)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import re
from unittest.mock import MagicMock, patch
from warnings import catch_warnings
import torch
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models import SingleTaskGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.optim import core, fit
from botorch.optim.core import OptimizationResult
from botorch.settings import debug
from botorch.utils.context_managers import module_rollback_ctx, TensorCheckpoint
from botorch.utils.testing import BotorchTestCase
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from scipy.optimize import OptimizeResult
class TestFitGPyTorchMLLScipy(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_mll_scipy(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_mll_scipy(mll.to(dtype=dtype))
def _test_fit_gpytorch_mll_scipy(self, mll):
options = {"disp": False, "maxiter": 2}
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True) as ws, debug(True):
result = fit.fit_gpytorch_mll_scipy(mll, options=options)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.equal(ckpt[name].values) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test maxiter warning message
self.assertTrue(any("TOTAL NO. of" in str(w.message) for w in ws))
self.assertTrue(
any(issubclass(w.category, OptimizationWarning) for w in ws)
)
# Test iteration tracking
self.assertIsInstance(result, OptimizationResult)
self.assertLessEqual(result.step, options["maxiter"])
self.assertEqual(sum(1 for w in ws if "TOTAL NO. of" in str(w.message)), 1)
# Test that user provided bounds are respected
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_mll_scipy(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
options=options,
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
for name, param in mll.named_parameters():
self.assertNotEqual(param.requires_grad, param.equal(ckpt[name].values))
# Test handling of scipy optimization failures and parameter assignments
mock_x = []
assignments = {}
for name, param in mll.named_parameters():
if not param.requires_grad:
continue # pragma: no cover
values = assignments[name] = torch.rand_like(param)
mock_x.append(values.view(-1))
with module_rollback_ctx(mll, checkpoint=ckpt), patch.object(
core, "minimize_with_timeout"
) as mock_minimize_with_timeout:
mock_minimize_with_timeout.return_value = OptimizeResult(
x=torch.concat(mock_x).tolist(),
success=False,
status=0,
fun=float("nan"),
jac=None,
nfev=1,
njev=1,
nhev=1,
nit=1,
message="ABNORMAL_TERMINATION_IN_LNSRCH".encode(),
)
with catch_warnings(record=True) as ws, debug(True):
fit.fit_gpytorch_mll_scipy(mll, options=options)
# Test that warning gets raised
self.assertTrue(
any("ABNORMAL_TERMINATION_IN_LNSRCH" in str(w.message) for w in ws)
)
# Test that parameter values get assigned correctly
self.assertTrue(
all(
param.equal(assignments[name])
for name, param in mll.named_parameters()
if param.requires_grad
)
)
# Test `closure_kwargs`
with self.subTest("closure_kwargs"):
mock_closure = MagicMock(side_effect=StopIteration("foo"))
with self.assertRaisesRegex(StopIteration, "foo"):
fit.fit_gpytorch_mll_scipy(
mll, closure=mock_closure, closure_kwargs={"ab": "cd"}
)
mock_closure.assert_called_once_with(ab="cd")
def test_fit_with_nans(self) -> None:
"""Test the branch of NdarrayOptimizationClosure that handles errors."""
from botorch.optim.closures import NdarrayOptimizationClosure
def closure():
raise RuntimeError("singular")
for dtype in [torch.float32, torch.float64]:
parameters = {"x": torch.tensor([0.0], dtype=dtype)}
wrapper = NdarrayOptimizationClosure(closure=closure, parameters=parameters)
def _assert_np_array_is_float64_type(array) -> bool:
# e.g. "float32" in "torch.float32"
self.assertEqual(str(array.dtype), "float64")
_assert_np_array_is_float64_type(wrapper()[0])
_assert_np_array_is_float64_type(wrapper()[1])
_assert_np_array_is_float64_type(wrapper.state)
_assert_np_array_is_float64_type(wrapper._get_gradient_ndarray())
# Any mll will do
mll = next(iter(self.mlls.values()))
# will error if dtypes are wrong
fit.fit_gpytorch_mll_scipy(mll, closure=wrapper, parameters=parameters)
class TestFitGPyTorchMLLTorch(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_mll_torch(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_mll_torch(mll.to(dtype=dtype))
def _test_fit_gpytorch_mll_torch(self, mll):
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True) as _, debug(True):
result = fit.fit_gpytorch_mll_torch(mll, step_limit=2)
self.assertIsInstance(result, OptimizationResult)
self.assertLessEqual(result.step, 2)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.requires_grad != param.equal(ckpt[name].values)
for name, param in mll.named_parameters()
)
)
# Test that user provided bounds are respected
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_mll_torch(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
# Test `closure_kwargs`
with self.subTest("closure_kwargs"):
mock_closure = MagicMock(side_effect=StopIteration("foo"))
with self.assertRaisesRegex(StopIteration, "foo"):
fit.fit_gpytorch_mll_torch(
mll, closure=mock_closure, closure_kwargs={"ab": "cd"}
)
mock_closure.assert_called_once_with(ab="cd")
class TestFitGPyTorchScipy(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_scipy(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_scipy(mll.to(dtype=dtype))
def _test_fit_gpytorch_scipy(self, mll):
options = {"disp": False, "maxiter": 3, "maxfun": 2}
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True) as ws, debug(True):
_, info_dict = fit.fit_gpytorch_scipy(
mll, track_iterations=True, options=options
)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test maxiter warning message
self.assertTrue(any("TOTAL NO. of" in str(w.message) for w in ws))
self.assertTrue(
any(issubclass(w.category, OptimizationWarning) for w in ws)
)
# Test iteration tracking
self.assertLessEqual(len(info_dict["iterations"]), options["maxiter"])
self.assertIsInstance(info_dict["iterations"][0], OptimizationResult)
self.assertTrue("fopt" in info_dict)
self.assertTrue("wall_time" in info_dict)
self.assertEqual(sum(1 for w in ws if "TOTAL NO. of" in str(w.message)), 1)
# Test that user provided bounds and `exclude` argument are respected
exclude = "model.mean_module.constant", re.compile("raw_lengthscale$")
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_scipy(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
options={**options, "exclude": exclude},
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
for name, param in mll.named_parameters():
if (
name
in (
"model.mean_module.constant",
"model.covar_module.base_kernel.raw_lengthscale",
)
or not param.requires_grad
):
self.assertTrue(param.equal(ckpt[name][0]))
else:
self.assertFalse(param.equal(ckpt[name][0]))
# Test use of `approx_mll` flag
with self.subTest("approx_mll"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_scipy(mll, approx_mll=True, options=options)
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test handling of scipy optimization failures and parameter assignments
mock_x = []
assignments = {}
for name, param in mll.named_parameters():
if not param.requires_grad:
continue # pragma: no cover
values = assignments[name] = torch.rand_like(param)
mock_x.append(values.view(-1))
with module_rollback_ctx(mll, checkpoint=ckpt), patch.object(
fit, "minimize"
) as mock_minimize:
mock_minimize.return_value = OptimizeResult(
x=torch.concat(mock_x).tolist(),
success=False,
status=0,
fun=float("nan"),
jac=None,
nfev=1,
njev=1,
nhev=1,
nit=1,
message="ABNORMAL_TERMINATION_IN_LNSRCH".encode(),
)
with catch_warnings(record=True) as ws, debug(True):
fit.fit_gpytorch_scipy(mll, options=options)
# Test that warning gets raised
self.assertTrue(
any("ABNORMAL_TERMINATION_IN_LNSRCH" in str(w.message) for w in ws)
)
# Test that parameter values get assigned correctly
self.assertTrue(
all(
param.equal(assignments[name])
for name, param in mll.named_parameters()
if param.requires_grad
)
)
class TestFitGPyTorchTorch(BotorchTestCase):
def setUp(self):
super().setUp()
self.mlls = {}
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * math.pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
self.mlls[SingleTaskGP, 1] = ExactMarginalLogLikelihood(model.likelihood, model)
def test_fit_gpytorch_torch(self):
for mll in self.mlls.values():
for dtype in (torch.float32, torch.float64):
self._test_fit_gpytorch_torch(mll.to(dtype=dtype))
def _test_fit_gpytorch_torch(self, mll):
options = {"maxiter": 3}
ckpt = {
k: TensorCheckpoint(v.detach().clone(), v.device, v.dtype)
for k, v in mll.state_dict().items()
}
with self.subTest("main"), module_rollback_ctx(mll, checkpoint=ckpt):
with catch_warnings(record=True), debug(True):
_, info_dict = fit.fit_gpytorch_torch(
mll, track_iterations=True, options=options
)
# Test only parameters requiring gradients have changed
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
# Test iteration tracking
self.assertEqual(len(info_dict["iterations"]), options["maxiter"])
self.assertIsInstance(info_dict["iterations"][0], OptimizationResult)
self.assertTrue("fopt" in info_dict)
self.assertTrue("wall_time" in info_dict)
# Test that user provided bounds and `exclude` argument are respected
exclude = "model.mean_module.constant", re.compile("raw_lengthscale$")
with self.subTest("bounds"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_torch(
mll,
bounds={"likelihood.noise_covar.raw_noise": (123, 456)},
options={**options, "exclude": exclude},
)
self.assertTrue(
mll.likelihood.noise_covar.raw_noise >= 123
and mll.likelihood.noise_covar.raw_noise <= 456
)
for name, param in mll.named_parameters():
if (
name
in (
"model.mean_module.constant",
"model.covar_module.base_kernel.raw_lengthscale",
)
or not param.requires_grad
):
self.assertTrue(param.equal(ckpt[name][0]))
else:
self.assertFalse(param.equal(ckpt[name][0]))
# Test use of `approx_mll` flag
with self.subTest("approx_mll"), module_rollback_ctx(mll, checkpoint=ckpt):
fit.fit_gpytorch_torch(mll, approx_mll=True, options=options)
self.assertTrue(
all(
param.equal(ckpt[name][0]) != param.requires_grad
for name, param in mll.named_parameters()
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from math import pi
from unittest.mock import MagicMock, patch
from warnings import catch_warnings, simplefilter
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.optim import numpy_converter
from botorch.optim.numpy_converter import (
_scipy_objective_and_grad,
module_to_array,
set_params_with_array,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.constraints import GreaterThan
from gpytorch.kernels.rbf_kernel import RBFKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means.constant_mean import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.models.exact_gp import ExactGP
def _get_index(property_dict, parameter_name):
idx = 0
for p_name, ta in property_dict.items():
if p_name == parameter_name:
break
idx += ta.shape.numel()
return idx
class TestModuleToArray(BotorchTestCase):
def test_basic(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(module=mll)
self.assertTrue(np.array_equal(x, np.zeros(5)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
"model.mean_module.raw_constant": torch.Size(),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
self.assertIsNone(bounds)
def test_exclude(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll, exclude={"model.mean_module.raw_constant"}
)
self.assertTrue(np.array_equal(x, np.zeros(4)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
self.assertIsNone(bounds)
def test_manual_bounds(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll,
bounds={"model.covar_module.raw_lengthscale": (0.1, None)},
)
self.assertTrue(np.array_equal(x, np.zeros(5)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
"model.mean_module.raw_constant": torch.Size(),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
lower_exp = np.full_like(x, 0.1)
for p in (
"likelihood.noise_covar.raw_noise",
"model.mean_module.raw_constant",
):
lower_exp[_get_index(pdict, p)] = -np.inf
self.assertTrue(np.equal(bounds[0], lower_exp).all())
self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll,
bounds={
key: (-float("inf"), float("inf"))
for key, _ in mll.named_parameters()
},
)
self.assertIsNone(bounds)
def test_module_bounds(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood(
noise_constraint=GreaterThan(1e-5, transform=None)
)
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
# test the basic case
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(
module=mll,
bounds={"model.covar_module.raw_lengthscale": (0.1, None)},
)
self.assertTrue(np.array_equal(x, np.zeros(5)))
expected_sizes = {
"likelihood.noise_covar.raw_noise": torch.Size([1]),
"model.covar_module.raw_lengthscale": torch.Size([1, 3]),
"model.mean_module.raw_constant": torch.Size(),
}
self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
for pname, val in pdict.items():
self.assertEqual(val.dtype, dtype)
self.assertEqual(val.shape, expected_sizes[pname])
self.assertEqual(val.device.type, self.device.type)
lower_exp = np.full_like(x, 0.1)
lower_exp[_get_index(pdict, "model.mean_module.raw_constant")] = -np.inf
lower_exp[_get_index(pdict, "likelihood.noise_covar.raw_noise")] = 1e-5
self.assertTrue(np.allclose(bounds[0], lower_exp))
self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
class TestSetParamsWithArray(BotorchTestCase):
def test_set_parameters(self):
for dtype in (torch.float, torch.double):
# get a test module
train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype)
train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
likelihood = GaussianLikelihood()
model = ExactGP(train_x, train_y, likelihood)
model.covar_module = RBFKernel(ard_num_dims=3)
model.mean_module = ConstantMean()
model.to(device=self.device, dtype=dtype)
mll = ExactMarginalLogLikelihood(likelihood, model)
with catch_warnings():
# Get parameters
simplefilter("ignore", category=DeprecationWarning)
x, pdict, bounds = module_to_array(module=mll)
# Set parameters
mll = set_params_with_array(
mll, np.array([1.0, 2.0, 3.0, 4.0, 5.0]), pdict
)
z = dict(mll.named_parameters())
self.assertTrue(
torch.equal(
z["likelihood.noise_covar.raw_noise"],
torch.tensor([1.0], device=self.device, dtype=dtype),
)
)
self.assertTrue(
torch.equal(
z["model.covar_module.raw_lengthscale"],
torch.tensor([[2.0, 3.0, 4.0]], device=self.device, dtype=dtype),
)
)
self.assertTrue(
torch.equal(
z["model.mean_module.raw_constant"],
torch.tensor(5.0, device=self.device, dtype=dtype),
)
)
# Extract again
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x2, pdict2, bounds2 = module_to_array(module=mll)
self.assertTrue(np.array_equal(x2, np.array([1.0, 2.0, 3.0, 4.0, 5.0])))
class TestScipyObjectiveAndGrad(BotorchTestCase):
def setUp(self) -> None:
super().setUp()
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
self.mll = ExactMarginalLogLikelihood(model.likelihood, model)
def test_scipy_objective_and_grad(self):
with catch_warnings():
simplefilter("ignore", category=DeprecationWarning)
x, property_dict, bounds = module_to_array(module=self.mll)
loss, grad = _scipy_objective_and_grad(x, self.mll, property_dict)
_dist = self.mll.model(*self.mll.model.train_inputs)
_loss = -self.mll(_dist, self.mll.model.train_targets)
_loss.sum().backward()
_grad = torch.concat(
[self.mll.get_parameter(name).grad.view(-1) for name in property_dict]
)
self.assertEqual(loss, _loss.detach().sum().item())
self.assertTrue(np.allclose(grad, _grad.detach().numpy()))
def _getter(*args, **kwargs):
raise RuntimeError("foo")
_handler = MagicMock()
with catch_warnings(), patch.multiple(
numpy_converter,
_get_extra_mll_args=_getter,
_handle_numerical_errors=_handler,
):
simplefilter("ignore", category=DeprecationWarning)
_scipy_objective_and_grad(x, self.mll, property_dict)
self.assertEqual(_handler.call_count, 1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.optim.stopping import ExpMAStoppingCriterion, StoppingCriterion
from botorch.utils.testing import BotorchTestCase
class TestStoppingCriterion(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
StoppingCriterion()
def test_exponential_moving_average(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# test max iter
sc = ExpMAStoppingCriterion(maxiter=2)
self.assertEqual(sc.maxiter, 2)
self.assertEqual(sc.n_window, 10)
self.assertEqual(sc.rel_tol, 1e-5)
self.assertFalse(sc.evaluate(fvals=torch.ones(1, **tkwargs)))
self.assertTrue(sc.evaluate(fvals=torch.zeros(1, **tkwargs)))
# test convergence
n_window = 4
for minimize in (True, False):
# test basic
sc = ExpMAStoppingCriterion(
minimize=minimize, n_window=n_window, rel_tol=0.0375
)
self.assertEqual(sc.rel_tol, 0.0375)
self.assertIsNone(sc._prev_fvals)
weights_exp = torch.tensor([0.1416, 0.1976, 0.2758, 0.3849])
self.assertAllClose(sc.weights, weights_exp, atol=1e-4)
f_vals = 1 + torch.linspace(1, 0, 25, **tkwargs) ** 2
if not minimize:
f_vals = -f_vals
for i, fval in enumerate(f_vals):
if sc.evaluate(fval):
self.assertEqual(i, 10)
break
# test multiple components
sc = ExpMAStoppingCriterion(
minimize=minimize, n_window=n_window, rel_tol=0.0375
)
df = torch.linspace(0, 0.1, 25, **tkwargs)
if not minimize:
df = -df
f_vals = torch.stack([f_vals, f_vals + df], dim=-1)
for i, fval in enumerate(f_vals):
if sc.evaluate(fval):
self.assertEqual(i, 10)
break
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
import torch
from botorch import settings
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
)
from botorch.acquisition.multi_objective.max_value_entropy_search import (
qMultiObjectiveMaxValueEntropy,
)
from botorch.acquisition.multi_objective.monte_carlo import (
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.exceptions import BotorchError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import ModelListGP, SingleTaskGP
from botorch.models.transforms.input import Warp
from botorch.optim.utils import columnwise_clamp, fix_features, get_X_baseline
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestColumnWiseClamp(BotorchTestCase):
def setUp(self):
super().setUp()
self.X = torch.tensor([[-2, 1], [0.5, -0.5]], device=self.device)
self.X_expected = torch.tensor([[-1, 0.5], [0.5, -0.5]], device=self.device)
def test_column_wise_clamp_scalars(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, 1, -1)
X_clmp = columnwise_clamp(X, -1, 0.5)
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, -3, 3)
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_scalar_tensors(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.tensor(1), torch.tensor(-1))
X_clmp = columnwise_clamp(X, torch.tensor(-1), torch.tensor(0.5))
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, torch.tensor(-3), torch.tensor(3))
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_tensors(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.ones(2), torch.zeros(2))
with self.assertRaises(RuntimeError):
X_clmp = columnwise_clamp(X, torch.zeros(3), torch.ones(3))
X_clmp = columnwise_clamp(X, torch.tensor([-1, -1]), torch.tensor([0.5, 0.5]))
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, torch.tensor([-3, -3]), torch.tensor([3, 3]))
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_full_dim_tensors(self):
X = torch.tensor([[[-1, 2, 0.5], [0.5, 3, 1.5]], [[0.5, 1, 0], [2, -2, 3]]])
lower = torch.tensor([[[0, 0.5, 1], [0, 2, 2]], [[0, 2, 0], [1, -1, 0]]])
upper = torch.tensor([[[1, 1.5, 1], [1, 4, 3]], [[1, 3, 0.5], [3, 1, 2.5]]])
X_expected = torch.tensor(
[[[0, 1.5, 1], [0.5, 3, 2]], [[0.5, 2, 0], [2, -1, 2.5]]]
)
X_clmp = columnwise_clamp(X, lower, upper)
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, lower - 5, upper + 5)
self.assertTrue(torch.equal(X_clmp, X))
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.ones_like(X), torch.zeros_like(X))
with self.assertRaises(RuntimeError):
X_clmp = columnwise_clamp(X, lower.unsqueeze(-3), upper.unsqueeze(-3))
def test_column_wise_clamp_raise_on_violation(self):
X = self.X
with self.assertRaises(BotorchError):
X_clmp = columnwise_clamp(
X, torch.zeros(2), torch.ones(2), raise_on_violation=True
)
X_clmp = columnwise_clamp(
X, torch.tensor([-3, -3]), torch.tensor([3, 3]), raise_on_violation=True
)
self.assertTrue(torch.equal(X_clmp, X))
class TestFixFeatures(BotorchTestCase):
def _getTensors(self):
X = torch.tensor([[-2, 1, 3], [0.5, -0.5, 1.0]], device=self.device)
X_null_two = torch.tensor([[-2, 1, 3], [0.5, -0.5, 1.0]], device=self.device)
X_expected = torch.tensor([[-1, 1, -2], [-1, -0.5, -2]], device=self.device)
X_expected_null_two = torch.tensor(
[[-1, 1, 3], [-1, -0.5, 1.0]], device=self.device
)
return X, X_null_two, X_expected, X_expected_null_two
def test_fix_features(self):
X, X_null_two, X_expected, X_expected_null_two = self._getTensors()
X.requires_grad_(True)
X_null_two.requires_grad_(True)
X_fix = fix_features(X, {0: -1, 2: -2})
X_fix_null_two = fix_features(X_null_two, {0: -1, 2: None})
self.assertTrue(torch.equal(X_fix, X_expected))
self.assertTrue(torch.equal(X_fix_null_two, X_expected_null_two))
def f(X):
return X.sum()
f(X).backward()
self.assertTrue(torch.equal(X.grad, torch.ones_like(X)))
X.grad.zero_()
f(X_fix).backward()
self.assertTrue(
torch.equal(
X.grad,
torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], device=self.device),
)
)
f(X_null_two).backward()
self.assertTrue(torch.equal(X_null_two.grad, torch.ones_like(X)))
X_null_two.grad.zero_()
f(X_fix_null_two).backward()
self.assertTrue(
torch.equal(
X_null_two.grad,
torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], device=self.device),
)
)
class TestGetXBaseline(BotorchTestCase):
def test_get_X_baseline(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
X_train = torch.rand(20, 2, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train[:2], prune_baseline=False, cache_root=False
)
X = get_X_baseline(acq_function=acqf)
self.assertTrue(torch.equal(X, acqf.X_baseline))
# test EI without X_baseline
acqf = qExpectedImprovement(model, best_f=0.0)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = get_X_baseline(
acq_function=acqf,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# set train inputs
model.train_inputs = (X_train,)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test that we fail back to train_inputs if X_baseline is an empty tensor
acqf.register_buffer("X_baseline", X_train[:0])
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test acquisition function without X_baseline or model
acqf = FixedFeatureAcquisitionFunction(acqf, d=2, columns=[0], values=[0])
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = get_X_baseline(
acq_function=acqf,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
Y_train = 2 * X_train[:2] + 1
moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
ref_point = torch.zeros(2, **tkwargs)
# test NEHVI with X_baseline
acqf = qNoisyExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
X_baseline=X_train[:2],
sampler=IIDNormalSampler(sample_shape=torch.Size([2])),
cache_root=False,
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, acqf.X_baseline))
# test qEHVI without train_inputs
acqf = qExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
partitioning=FastNondominatedPartitioning(
ref_point=ref_point,
Y=Y_train,
),
)
# test extracting train_inputs from model list GP
model_list = ModelListGP(
SingleTaskGP(X_train, Y_train[:, :1]),
SingleTaskGP(X_train, Y_train[:, 1:]),
)
acqf = qExpectedHypervolumeImprovement(
model_list,
ref_point=ref_point,
partitioning=FastNondominatedPartitioning(
ref_point=ref_point,
Y=Y_train,
),
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test MESMO for which we need to use
# `acqf.mo_model`
batched_mo_model = SingleTaskGP(X_train, Y_train)
acqf = qMultiObjectiveMaxValueEntropy(
batched_mo_model,
sample_pareto_frontiers=lambda model: torch.rand(10, 2, **tkwargs),
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test that if there is an input transform that is applied
# to the train_inputs when the model is in eval mode, we
# extract the untransformed train_inputs
model = SingleTaskGP(
X_train, Y_train[:, :1], input_transform=Warp(indices=[0, 1])
)
model.eval()
self.assertFalse(torch.equal(model.train_inputs[0], X_train))
acqf = qExpectedImprovement(model, best_f=0.0)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import MagicMock
import numpy as np
import torch
from botorch.optim.closures.core import (
as_ndarray,
get_tensors_as_ndarray_1d,
set_tensors_from_ndarray_1d,
)
from botorch.optim.utils import get_bounds_as_ndarray
from botorch.optim.utils.numpy_utils import torch_to_numpy_dtype_dict
from botorch.utils.testing import BotorchTestCase
from torch.nn import Parameter
class TestNumpyUtils(BotorchTestCase):
def setUp(self):
super().setUp()
self.parameters = {"foo": torch.rand(2), "bar": Parameter(torch.rand(3))}
def test_as_ndarray(self):
base = np.random.randn(3)
tnsr = torch.from_numpy(base)
# Test inplace conversion
result = as_ndarray(tnsr)
self.assertTrue(np.shares_memory(base, result))
# Test conversion with memory allocation
result = as_ndarray(tnsr, inplace=False)
self.assertTrue(np.allclose(base, result))
self.assertFalse(np.shares_memory(base, result))
result = as_ndarray(tnsr, dtype=np.float32)
self.assertTrue(np.allclose(base, result))
self.assertFalse(np.shares_memory(base, result))
self.assertEqual(result.dtype, np.float32)
# Test that `clone` does not get called on non-CPU tensors
mock_tensor = MagicMock()
mock_tensor.cpu.return_value = mock_tensor
mock_tensor.device.return_value = "foo"
mock_tensor.clone.return_value = mock_tensor
as_ndarray(mock_tensor)
mock_tensor.cpu.assert_called_once()
mock_tensor.clone.assert_not_called()
mock_tensor.numpy.assert_called_once()
def test_as_ndarray_dtypes(self) -> None:
for torch_dtype, np_dtype in torch_to_numpy_dtype_dict.items():
tens = torch.tensor(0, dtype=torch_dtype, device="cpu")
self.assertEqual(torch_dtype, tens.dtype)
self.assertEqual(tens.numpy().dtype, np_dtype)
self.assertEqual(as_ndarray(tens, np_dtype).dtype, np_dtype)
def test_get_tensors_as_ndarray_1d(self):
with self.assertRaisesRegex(RuntimeError, "Argument `tensors` .* is empty"):
get_tensors_as_ndarray_1d(())
values = get_tensors_as_ndarray_1d(self.parameters)
self.assertTrue(
np.allclose(values, get_tensors_as_ndarray_1d(self.parameters.values()))
)
n = 0
for param in self.parameters.values():
k = param.numel()
self.assertTrue(
np.allclose(values[n : n + k], param.view(-1).detach().cpu().numpy())
)
n += k
with self.assertRaisesRegex(ValueError, "Expected a vector for `out`"):
get_tensors_as_ndarray_1d(self.parameters, out=np.empty((1, 1)))
with self.assertRaisesRegex(ValueError, "Size of `parameters` .* not match"):
get_tensors_as_ndarray_1d(self.parameters, out=np.empty(values.size - 1))
with self.assertRaisesRegex(RuntimeError, "failed while copying values .* foo"):
get_tensors_as_ndarray_1d(
self.parameters,
out=np.empty(values.size),
as_array=MagicMock(side_effect=RuntimeError("foo")),
)
def test_set_tensors_from_ndarray_1d(self):
values = get_tensors_as_ndarray_1d(self.parameters)
others = np.random.rand(*values.shape).astype(values.dtype)
with self.assertRaisesRegex(RuntimeError, "failed while copying values to"):
set_tensors_from_ndarray_1d(self.parameters, np.empty([1]))
set_tensors_from_ndarray_1d(self.parameters, others)
n = 0
for param in self.parameters.values():
k = param.numel()
self.assertTrue(
np.allclose(others[n : n + k], param.view(-1).detach().cpu().numpy())
)
n += k
def test_get_bounds_as_ndarray(self):
params = {"a": torch.rand(1), "b": torch.rand(1), "c": torch.rand(2)}
bounds = {"a": (None, 1), "c": (0, None)}
test = np.full((4, 2), (-float("inf"), float("inf")))
test[0, 1] = 1
test[2, 0] = 0
test[3, 0] = 0
array = get_bounds_as_ndarray(parameters=params, bounds=bounds)
self.assertTrue(np.array_equal(test, array))
# Test with tensor bounds.
bounds = {
"a": (None, torch.tensor(1, device=self.device)),
"c": (torch.tensor(0, device=self.device), None),
}
array = get_bounds_as_ndarray(parameters=params, bounds=bounds)
self.assertTrue(np.array_equal(test, array))
# Test with n-dim tensor bounds.
bounds = {
"a": (None, torch.tensor(1, device=self.device)),
"c": (
torch.tensor([0, 0], device=self.device),
torch.tensor([1, 1], device=self.device),
),
}
test[2:, 1] = 1
array = get_bounds_as_ndarray(parameters=params, bounds=bounds)
self.assertTrue(np.array_equal(test, array))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from functools import partial
from warnings import catch_warnings, warn
import numpy as np
from botorch.optim.utils import (
_filter_kwargs,
_handle_numerical_errors,
_warning_handler_template,
)
from botorch.utils.testing import BotorchTestCase
from linear_operator.utils.errors import NanError, NotPSDError
class TestUtilsCommon(BotorchTestCase):
def test__filter_kwargs(self) -> None:
def mock_adam(params, lr: float = 0.001) -> None:
return # pragma: nocover
kwargs = {"lr": 0.01, "maxiter": 3000}
expected_msg = (
r"Keyword arguments \['maxiter'\] will be ignored because they are "
r"not allowed parameters for function mock_adam. Allowed parameters "
r"are \['params', 'lr'\]."
)
with self.assertWarnsRegex(Warning, expected_msg):
valid_kwargs = _filter_kwargs(mock_adam, **kwargs)
self.assertEqual(set(valid_kwargs.keys()), {"lr"})
mock_partial = partial(mock_adam, lr=2.0)
expected_msg = (
r"Keyword arguments \['maxiter'\] will be ignored because they are "
r"not allowed parameters. Allowed parameters are \['params', 'lr'\]."
)
with self.assertWarnsRegex(Warning, expected_msg):
valid_kwargs = _filter_kwargs(mock_partial, **kwargs)
self.assertEqual(set(valid_kwargs.keys()), {"lr"})
def test_handle_numerical_errors(self):
x = np.zeros(1, dtype=np.float64)
with self.assertRaisesRegex(NotPSDError, "foo"):
_handle_numerical_errors(NotPSDError("foo"), x=x)
for error in (
NanError(),
RuntimeError("singular"),
RuntimeError("input is not positive-definite"),
):
fake_loss, fake_grad = _handle_numerical_errors(error, x=x)
self.assertTrue(np.isnan(fake_loss))
self.assertEqual(fake_grad.shape, x.shape)
self.assertTrue(np.isnan(fake_grad).all())
fake_loss, fake_grad = _handle_numerical_errors(error, x=x, dtype=np.float32)
self.assertEqual(np.float32, fake_loss.dtype)
self.assertEqual(np.float32, fake_grad.dtype)
with self.assertRaisesRegex(RuntimeError, "foo"):
_handle_numerical_errors(RuntimeError("foo"), x=x)
def test_warning_handler_template(self):
with catch_warnings(record=True) as ws:
warn(DeprecationWarning("foo"))
warn(RuntimeWarning("bar"))
self.assertFalse(any(_warning_handler_template(w) for w in ws))
handler = partial(
_warning_handler_template,
debug=lambda w: issubclass(w.category, DeprecationWarning),
rethrow=lambda w: True,
)
with self.assertLogs(level="DEBUG") as logs, catch_warnings(record=True) as _ws:
self.assertTrue(all(handler(w) for w in ws))
self.assertEqual(1, len(logs.output))
self.assertTrue("foo" in logs.output[0])
self.assertEqual(1, len(_ws))
self.assertEqual("bar", str(_ws[0].message))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import re
import warnings
from copy import deepcopy
from string import ascii_lowercase
from unittest.mock import MagicMock, patch
import torch
from botorch import settings
from botorch.models import ModelListGP, SingleTaskGP
from botorch.optim.utils import (
_get_extra_mll_args,
get_data_loader,
get_name_filter,
get_parameters,
get_parameters_and_bounds,
model_utils,
sample_all_priors,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.constraints import GreaterThan
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood
from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
from gpytorch.priors import UniformPrior
from gpytorch.priors.prior import Prior
from gpytorch.priors.torch_priors import GammaPrior
class DummyPrior(Prior):
arg_constraints = {}
def rsample(self, sample_shape=torch.Size()): # noqa: B008
raise NotImplementedError
class DummyPriorRuntimeError(Prior):
arg_constraints = {}
def rsample(self, sample_shape=torch.Size()): # noqa: B008
raise RuntimeError("Another runtime error.")
class TestGetExtraMllArgs(BotorchTestCase):
def test_get_extra_mll_args(self):
train_X = torch.rand(3, 5)
train_Y = torch.rand(3, 1)
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
# test ExactMarginalLogLikelihood
exact_mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
exact_extra_args = _get_extra_mll_args(mll=exact_mll)
self.assertEqual(len(exact_extra_args), 1)
self.assertTrue(torch.equal(exact_extra_args[0], train_X))
# test SumMarginalLogLikelihood
model2 = ModelListGP(model)
sum_mll = SumMarginalLogLikelihood(model2.likelihood, model2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
sum_mll_extra_args = _get_extra_mll_args(mll=sum_mll)
self.assertEqual(len(sum_mll_extra_args), 1)
self.assertEqual(len(sum_mll_extra_args[0]), 1)
self.assertTrue(torch.equal(sum_mll_extra_args[0][0], train_X))
# test unsupported MarginalLogLikelihood type
unsupported_mll = MarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
unsupported_mll_extra_args = _get_extra_mll_args(mll=unsupported_mll)
self.assertEqual(unsupported_mll_extra_args, [])
class TestGetDataLoader(BotorchTestCase):
def setUp(self):
super().setUp()
with torch.random.fork_rng():
torch.random.manual_seed(0)
train_X = torch.rand(3, 5, device=self.device)
train_Y = torch.rand(3, 1, device=self.device)
self.model = SingleTaskGP(train_X=train_X, train_Y=train_Y).to(torch.float64)
def test_get_data_loader(self):
data_loader = get_data_loader(self.model)
self.assertEqual(data_loader.batch_size, len(self.model.train_targets))
train_X, train_Y = next(iter(data_loader))
self.assertTrue(self.model.train_inputs[0].equal(train_X))
self.assertTrue(self.model.train_targets.equal(train_Y))
_TensorDataset = MagicMock(return_value="foo")
_DataLoader = MagicMock()
with patch.multiple(
model_utils, TensorDataset=_TensorDataset, DataLoader=_DataLoader
):
model_utils.get_data_loader(self.model, batch_size=2, shuffle=True)
_DataLoader.assert_called_once_with(
dataset="foo",
batch_size=2,
shuffle=True,
)
class TestGetParameters(BotorchTestCase):
def setUp(self):
super().setUp()
self.module = GaussianLikelihood(
noise_constraint=GreaterThan(1e-6, initial_value=0.123),
)
def test_get_parameters(self):
self.assertEqual(0, len(get_parameters(self.module, requires_grad=False)))
params = get_parameters(self.module)
self.assertEqual(1, len(params))
self.assertEqual(next(iter(params)), "noise_covar.raw_noise")
self.assertTrue(
self.module.noise_covar.raw_noise.equal(next(iter(params.values())))
)
def test_get_parameters_and_bounds(self):
param_dict, bounds_dict = get_parameters_and_bounds(self.module)
self.assertTrue(1 == len(param_dict) == len(bounds_dict))
name, bounds = next(iter(bounds_dict.items()))
self.assertEqual(name, "noise_covar.raw_noise")
self.assertEqual(bounds, (-float("inf"), float("inf")))
mock_module = torch.nn.Module()
mock_module.named_parameters = MagicMock(
return_value=self.module.named_parameters()
)
param_dict2, bounds_dict2 = get_parameters_and_bounds(mock_module)
self.assertEqual(param_dict, param_dict2)
self.assertTrue(len(bounds_dict2) == 0)
class TestGetNameFilter(BotorchTestCase):
def test_get_name_filter(self):
with self.assertRaisesRegex(TypeError, "Expected `patterns` to contain"):
get_name_filter(("foo", re.compile("bar"), 1))
names = ascii_lowercase
name_filter = get_name_filter(iter(names[1::2]))
self.assertEqual(names[::2], "".join(filter(name_filter, names)))
items = tuple(zip(names, range(len(names))))
self.assertEqual(items[::2], tuple(filter(name_filter, items)))
class TestSampleAllPriors(BotorchTestCase):
def test_sample_all_priors(self):
for dtype in (torch.float, torch.double):
train_X = torch.rand(3, 5, device=self.device, dtype=dtype)
train_Y = torch.rand(3, 1, device=self.device, dtype=dtype)
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.to(device=self.device, dtype=dtype)
original_state_dict = dict(deepcopy(mll.model.state_dict()))
sample_all_priors(model)
# make sure one of the hyperparameters changed
self.assertTrue(
dict(model.state_dict())["likelihood.noise_covar.raw_noise"]
!= original_state_dict["likelihood.noise_covar.raw_noise"]
)
# check that lengthscales are all different
ls = model.covar_module.base_kernel.raw_lengthscale.view(-1).tolist()
self.assertTrue(all(ls[0] != ls[i]) for i in range(1, len(ls)))
# change one of the priors to a dummy prior that does not support sampling
model.covar_module = ScaleKernel(
MaternKernel(
nu=2.5,
ard_num_dims=model.train_inputs[0].shape[-1],
batch_shape=model._aug_batch_shape,
lengthscale_prior=DummyPrior(),
),
batch_shape=model._aug_batch_shape,
outputscale_prior=GammaPrior(2.0, 0.15),
)
original_state_dict = dict(deepcopy(mll.model.state_dict()))
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
sample_all_priors(model)
self.assertEqual(len(ws), 1)
self.assertTrue("rsample" in str(ws[0].message))
# change to dummy prior that raises an unrecognized RuntimeError
model.covar_module = ScaleKernel(
MaternKernel(
nu=2.5,
ard_num_dims=model.train_inputs[0].shape[-1],
batch_shape=model._aug_batch_shape,
lengthscale_prior=DummyPriorRuntimeError(),
),
batch_shape=model._aug_batch_shape,
outputscale_prior=GammaPrior(2.0, 0.15),
)
with self.assertRaises(RuntimeError):
sample_all_priors(model)
# the lengthscale should not have changed because sampling is
# not implemented for DummyPrior
self.assertTrue(
torch.equal(
dict(model.state_dict())[
"covar_module.base_kernel.raw_lengthscale"
],
original_state_dict["covar_module.base_kernel.raw_lengthscale"],
)
)
# set setting_closure to None and make sure RuntimeError is raised
prior_tuple = model.likelihood.noise_covar._priors["noise_prior"]
model.likelihood.noise_covar._priors["noise_prior"] = (
prior_tuple[0],
prior_tuple[1],
None,
)
with self.assertRaises(RuntimeError):
sample_all_priors(model)
# test for error when sampling violates constraint
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.to(device=self.device, dtype=dtype)
model.covar_module = ScaleKernel(
MaternKernel(
nu=2.5,
ard_num_dims=model.train_inputs[0].shape[-1],
batch_shape=model._aug_batch_shape,
lengthscale_prior=GammaPrior(3.0, 6.0),
),
batch_shape=model._aug_batch_shape,
outputscale_prior=UniformPrior(1.0, 2.0),
outputscale_constraint=GreaterThan(3.0),
)
original_state_dict = dict(deepcopy(mll.model.state_dict()))
with self.assertRaises(RuntimeError):
sample_all_priors(model)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import numpy as np
from botorch.optim.utils.timeout import minimize_with_timeout
from botorch.utils.testing import BotorchTestCase
from scipy.optimize import OptimizeResult
class TestMinimizeWithTimeout(BotorchTestCase):
def test_minimize_with_timeout(self):
def f_and_g(x: np.ndarray, sleep_sec: float = 0.0):
time.sleep(sleep_sec)
return x**2, 2 * x
base_kwargs = {
"fun": f_and_g,
"x0": np.array([1.0]),
"method": "L-BFGS-B",
"jac": True,
"bounds": [(-2.0, 2.0)],
}
with self.subTest("test w/o timeout"):
res = minimize_with_timeout(**base_kwargs)
self.assertTrue(res.success)
self.assertAlmostEqual(res.fun, 0.0)
self.assertAlmostEqual(res.x.item(), 0.0)
self.assertEqual(res.nit, 2) # quadratic approx. is exact
with self.subTest("test w/ non-binding timeout"):
res = minimize_with_timeout(**base_kwargs, timeout_sec=1.0)
self.assertTrue(res.success)
self.assertAlmostEqual(res.fun, 0.0)
self.assertAlmostEqual(res.x.item(), 0.0)
self.assertEqual(res.nit, 2) # quadratic approx. is exact
with self.subTest("test w/ binding timeout"):
res = minimize_with_timeout(**base_kwargs, args=(1e-2,), timeout_sec=1e-4)
self.assertFalse(res.success)
self.assertEqual(res.nit, 1) # only one call to the callback is made
# set up callback with mutable object to verify callback execution
check_set = set()
def callback(x: np.ndarray) -> None:
check_set.add("foo")
with self.subTest("test w/ callout argument and non-binding timeout"):
res = minimize_with_timeout(
**base_kwargs, callback=callback, timeout_sec=1.0
)
self.assertTrue(res.success)
self.assertTrue("foo" in check_set)
# set up callback for method `trust-constr` w/ different signature
check_set.clear()
self.assertFalse("foo" in check_set)
def callback_trustconstr(x: np.ndarray, state: OptimizeResult) -> bool:
check_set.add("foo")
return False
with self.subTest("test `trust-constr` method w/ callback"):
res = minimize_with_timeout(
**{**base_kwargs, "method": "trust-constr"},
callback=callback_trustconstr,
)
self.assertTrue(res.success)
self.assertTrue("foo" in check_set)
# reset check set
check_set.clear()
self.assertFalse("foo" in check_set)
with self.subTest("test `trust-constr` method w/ callback and timeout"):
res = minimize_with_timeout(
**{**base_kwargs, "method": "trust-constr"},
args=(1e-3,),
callback=callback_trustconstr,
timeout_sec=1e-4,
)
self.assertFalse(res.success)
self.assertTrue("foo" in check_set)
with self.subTest("verify error if passing callable for `method` w/ timeout"):
with self.assertRaisesRegex(
NotImplementedError, "Custom callable not supported"
):
minimize_with_timeout(
**{**base_kwargs, "method": lambda *args, **kwargs: None},
callback=callback,
timeout_sec=1e-4,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import zip_longest
from math import pi
import torch
from botorch.models import ModelListGP, SingleTaskGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.optim.closures.model_closures import (
get_loss_closure,
get_loss_closure_with_grads,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch import settings as gpytorch_settings
from gpytorch.mlls import ExactMarginalLogLikelihood, SumMarginalLogLikelihood
from torch.utils.data import DataLoader, TensorDataset
class TestLossClosures(BotorchTestCase):
def setUp(self):
super().setUp()
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.linspace(0, 1, 10).unsqueeze(-1)
train_Y = torch.sin((2 * pi) * train_X)
train_Y = train_Y + 0.1 * torch.randn_like(train_Y)
self.mlls = {}
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
input_transform=Normalize(d=1),
outcome_transform=Standardize(m=1),
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
self.mlls[type(mll), type(model.likelihood), type(model)] = mll.to(self.device)
model = ModelListGP(model, model)
mll = SumMarginalLogLikelihood(model.likelihood, model)
self.mlls[type(mll), type(model.likelihood), type(model)] = mll.to(self.device)
def test_main(self):
for mll in self.mlls.values():
out = mll.model(*mll.model.train_inputs)
loss = -mll(out, mll.model.train_targets).sum()
loss.backward()
params = {n: p for n, p in mll.named_parameters() if p.requires_grad}
grads = [
torch.zeros_like(p) if p.grad is None else p.grad
for p in params.values()
]
closure = get_loss_closure(mll)
self.assertTrue(loss.equal(closure()))
closure = get_loss_closure_with_grads(mll, params)
_loss, _grads = closure()
self.assertTrue(loss.equal(_loss))
self.assertTrue(all(a.equal(b) for a, b in zip_longest(grads, _grads)))
def test_data_loader(self):
for mll in self.mlls.values():
if type(mll) is not ExactMarginalLogLikelihood:
continue
dataset = TensorDataset(*mll.model.train_inputs, mll.model.train_targets)
loader = DataLoader(dataset, batch_size=len(mll.model.train_targets))
params = {n: p for n, p in mll.named_parameters() if p.requires_grad}
A = get_loss_closure_with_grads(mll, params)
(a, das) = A()
B = get_loss_closure_with_grads(mll, params, data_loader=loader)
with gpytorch_settings.debug(False): # disables GPyTorch's internal check
(b, dbs) = B()
self.assertTrue(a.allclose(b))
for da, db in zip_longest(das, dbs):
self.assertTrue(da.allclose(db))
loader = DataLoader(mll.model.train_targets, len(mll.model.train_targets))
closure = get_loss_closure_with_grads(mll, params, data_loader=loader)
with self.assertRaisesRegex(TypeError, "Expected .* a batch of tensors"):
closure()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import nullcontext
from functools import partial
from typing import Dict
from unittest.mock import MagicMock
import numpy as np
import torch
from botorch.optim.closures.core import (
ForwardBackwardClosure,
get_tensors_as_ndarray_1d,
NdarrayOptimizationClosure,
)
from botorch.optim.utils import as_ndarray
from botorch.utils.context_managers import zero_grad_ctx
from botorch.utils.testing import BotorchTestCase
from linear_operator.utils.errors import NanError, NotPSDError
from torch.nn import Module, Parameter
class ToyModule(Module):
def __init__(self, w: Parameter, b: Parameter, x: Parameter, dummy: Parameter):
r"""Toy module for unit testing."""
super().__init__()
self.w = w
self.b = b
self.x = x
self.dummy = dummy
def forward(self) -> torch.Tensor:
return self.w * self.x + self.b
@property
def free_parameters(self) -> Dict[str, torch.Tensor]:
return {n: p for n, p in self.named_parameters() if p.requires_grad}
class TestForwardBackwardClosure(BotorchTestCase):
def setUp(self):
super().setUp()
module = ToyModule(
w=Parameter(torch.tensor(2.0)),
b=Parameter(torch.tensor(3.0), requires_grad=False),
x=Parameter(torch.tensor(4.0)),
dummy=Parameter(torch.tensor(5.0)),
).to(self.device)
self.modules = {}
for dtype in ("float32", "float64"):
self.modules[dtype] = module.to(dtype=getattr(torch, dtype))
def test_main(self):
for module in self.modules.values():
closure = ForwardBackwardClosure(module, module.free_parameters)
# Test __init__
closure = ForwardBackwardClosure(module, module.free_parameters)
self.assertEqual(module.free_parameters, closure.parameters)
self.assertIsInstance(closure.context_manager, partial)
self.assertEqual(closure.context_manager.func, zero_grad_ctx)
# Test return values
value, (dw, dx, dd) = closure()
self.assertTrue(value.equal(module()))
self.assertTrue(dw.equal(module.x))
self.assertTrue(dx.equal(module.w))
self.assertEqual(dd, None)
# Test `callback`` and `reducer``
closure = ForwardBackwardClosure(module, module.free_parameters)
mock_reducer = MagicMock(return_value=closure.forward())
mock_callback = MagicMock()
closure = ForwardBackwardClosure(
forward=module,
parameters=module.free_parameters,
reducer=mock_reducer,
callback=mock_callback,
)
value, grads = closure()
mock_reducer.assert_called_once_with(value)
mock_callback.assert_called_once_with(value, grads)
# Test `backward`` and `context_manager`
closure = ForwardBackwardClosure(
forward=module,
parameters=module.free_parameters,
backward=partial(torch.Tensor.backward, retain_graph=True),
context_manager=nullcontext,
)
_, (dw, dx, dd) = closure() # x2 because `grad` is no longer zeroed
self.assertTrue(dw.equal(2 * module.x))
self.assertTrue(dx.equal(2 * module.w))
self.assertEqual(dd, None)
class TestNdarrayOptimizationClosure(BotorchTestCase):
def setUp(self):
super().setUp()
self.module = ToyModule(
w=Parameter(torch.tensor(2.0)),
b=Parameter(torch.tensor(3.0), requires_grad=False),
x=Parameter(torch.tensor(4.0)),
dummy=Parameter(torch.tensor(5.0)),
).to(self.device)
self.wrappers = {}
for dtype in ("float32", "float64"):
module = self.module.to(dtype=getattr(torch, dtype))
closure = ForwardBackwardClosure(module, module.free_parameters)
wrapper = NdarrayOptimizationClosure(closure, closure.parameters)
self.wrappers[dtype] = wrapper
def test_main(self):
for wrapper in self.wrappers.values():
# Test setter/getter
state = get_tensors_as_ndarray_1d(wrapper.closure.parameters)
other = np.random.randn(*state.shape).astype(state.dtype)
wrapper.state = other
self.assertTrue(np.allclose(other, wrapper.state))
index = 0
for param in wrapper.closure.parameters.values():
size = param.numel()
self.assertTrue(
np.allclose(
other[index : index + size], wrapper.as_array(param.view(-1))
)
)
index += size
wrapper.state = state
self.assertTrue(np.allclose(state, wrapper.state))
# Test __call__
value, grads = wrapper(other)
self.assertTrue(np.allclose(other, wrapper.state))
self.assertIsInstance(value, np.ndarray)
self.assertIsInstance(grads, np.ndarray)
# Test return values
value_tensor, grad_tensors = wrapper.closure() # get raw Tensor equivalents
self.assertTrue(np.allclose(value, wrapper.as_array(value_tensor)))
index = 0
for x, dx in zip(wrapper.parameters.values(), grad_tensors):
size = x.numel()
grad = grads[index : index + size]
if dx is None:
self.assertTrue((grad == wrapper.fill_value).all())
else:
self.assertTrue(np.allclose(grad, wrapper.as_array(dx)))
index += size
module = wrapper.closure.forward
self.assertTrue(np.allclose(grads[0], as_ndarray(module.x)))
self.assertTrue(np.allclose(grads[1], as_ndarray(module.w)))
self.assertEqual(grads[2], wrapper.fill_value)
# Test persistent buffers
for mode in (False, True):
wrapper.persistent = mode
self.assertEqual(
mode,
wrapper._get_gradient_ndarray() is wrapper._get_gradient_ndarray(),
)
def test_exceptions(self):
for wrapper in self.wrappers.values():
mock_closure = MagicMock(return_value=wrapper.closure())
mock_wrapper = NdarrayOptimizationClosure(
mock_closure, wrapper.closure.parameters
)
with self.assertRaisesRegex(NotPSDError, "foo"):
mock_wrapper.closure.side_effect = NotPSDError("foo")
mock_wrapper()
for exception in (
NanError("foo"),
RuntimeError("singular"),
RuntimeError("input is not positive-definite"),
):
mock_wrapper.closure.side_effect = exception
value, grads = mock_wrapper()
self.assertTrue(np.isnan(value).all())
self.assertTrue(np.isnan(grads).all())
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils.containers import DenseContainer, SliceContainer
from botorch.utils.datasets import FixedNoiseDataset, RankingDataset, SupervisedDataset
from botorch.utils.testing import BotorchTestCase
from torch import rand, randperm, Size, stack, Tensor, tensor
class TestDatasets(BotorchTestCase):
def test_supervised(self):
# Generate some data
Xs = rand(4, 3, 2)
Ys = rand(4, 3, 1)
# Test `__init__`
dataset = SupervisedDataset(X=Xs[0], Y=Ys[0])
self.assertIsInstance(dataset.X, Tensor)
self.assertIsInstance(dataset._X, Tensor)
self.assertIsInstance(dataset.Y, Tensor)
self.assertIsInstance(dataset._Y, Tensor)
dataset = SupervisedDataset(
X=DenseContainer(Xs[0], Xs[0].shape[-1:]),
Y=DenseContainer(Ys[0], Ys[0].shape[-1:]),
)
self.assertIsInstance(dataset.X, Tensor)
self.assertIsInstance(dataset._X, DenseContainer)
self.assertIsInstance(dataset.Y, Tensor)
self.assertIsInstance(dataset._Y, DenseContainer)
# Test `_validate`
with self.assertRaisesRegex(ValueError, "Batch dimensions .* incompatible."):
SupervisedDataset(X=rand(1, 2), Y=rand(2, 1))
# Test `dict_from_iter` and `__eq__`
datasets = SupervisedDataset.dict_from_iter(X=Xs.unbind(), Y=Ys.unbind())
self.assertIsInstance(datasets, dict)
self.assertEqual(tuple(datasets.keys()), tuple(range(len(Xs))))
for i, dataset in datasets.items():
self.assertEqual(dataset, SupervisedDataset(Xs[i], Ys[i]))
self.assertNotEqual(datasets[0], datasets)
datasets = SupervisedDataset.dict_from_iter(X=Xs[0], Y=Ys.unbind())
self.assertEqual(len(datasets), len(Xs))
for i in range(1, len(Xs)):
self.assertTrue(torch.equal(datasets[0].X, datasets[i].X))
# Test with Yvar.
dataset = SupervisedDataset(
X=Xs[0], Y=Ys[0], Yvar=DenseContainer(Ys[0], Ys[0].shape[-1:])
)
self.assertIsInstance(dataset.X, Tensor)
self.assertIsInstance(dataset._X, Tensor)
self.assertIsInstance(dataset.Y, Tensor)
self.assertIsInstance(dataset._Y, Tensor)
self.assertIsInstance(dataset.Yvar, Tensor)
self.assertIsInstance(dataset._Yvar, DenseContainer)
def test_fixedNoise(self):
# Generate some data
Xs = rand(4, 3, 2)
Ys = rand(4, 3, 1)
Ys_var = rand(4, 3, 1)
# Test `dict_from_iter`
datasets = FixedNoiseDataset.dict_from_iter(
X=Xs.unbind(),
Y=Ys.unbind(),
Yvar=Ys_var.unbind(),
)
for i, dataset in datasets.items():
self.assertTrue(dataset.X.equal(Xs[i]))
self.assertTrue(dataset.Y.equal(Ys[i]))
self.assertTrue(dataset.Yvar.equal(Ys_var[i]))
# Test handling of Tensor-valued arguments to `dict_from_iter`
datasets = FixedNoiseDataset.dict_from_iter(
X=Xs[0],
Y=Ys[1],
Yvar=Ys_var.unbind(),
)
for dataset in datasets.values():
self.assertTrue(Xs[0].equal(dataset.X))
self.assertTrue(Ys[1].equal(dataset.Y))
with self.assertRaisesRegex(
ValueError, "`Y` and `Yvar`"
), self.assertWarnsRegex(DeprecationWarning, "SupervisedDataset"):
FixedNoiseDataset(X=Xs, Y=Ys, Yvar=Ys_var[0])
def test_ranking(self):
# Test `_validate`
X_val = rand(16, 2)
X_idx = stack([randperm(len(X_val))[:3] for _ in range(1)])
X = SliceContainer(X_val, X_idx, event_shape=Size([3 * X_val.shape[-1]]))
with self.assertRaisesRegex(ValueError, "out-of-bounds"):
RankingDataset(X=X, Y=tensor([[-1, 0, 1]]))
RankingDataset(X=X, Y=tensor([[2, 0, 1]]))
with self.assertRaisesRegex(ValueError, "out-of-bounds"):
RankingDataset(X=X, Y=tensor([[0, 1, 3]]))
RankingDataset(X=X, Y=tensor([[0, 1, 2]]))
with self.assertRaisesRegex(ValueError, "missing zero-th rank."):
RankingDataset(X=X, Y=tensor([[1, 2, 2]]))
RankingDataset(X=X, Y=tensor([[0, 1, 1]]))
with self.assertRaisesRegex(ValueError, "ranks not skipped after ties."):
RankingDataset(X=X, Y=tensor([[0, 0, 1]]))
RankingDataset(X=X, Y=tensor([[0, 0, 2]]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils.rounding import (
approximate_round,
IdentitySTEFunction,
OneHotArgmaxSTE,
RoundSTE,
)
from botorch.utils.testing import BotorchTestCase
from torch.nn.functional import one_hot
class DummySTEFunction(IdentitySTEFunction):
@staticmethod
def forward(ctx, X):
return 2 * X
class TestApproximateRound(BotorchTestCase):
def test_approximate_round(self):
for dtype in (torch.float, torch.double):
X = torch.linspace(-2.5, 2.5, 11, device=self.device, dtype=dtype)
exact_rounded_X = X.round()
approx_rounded_X = approximate_round(X)
# check that approximate rounding is closer to rounded values than
# the original inputs
rounded_diffs = (approx_rounded_X - exact_rounded_X).abs()
diffs = (X - exact_rounded_X).abs()
self.assertTrue((rounded_diffs <= diffs).all())
# check that not all gradients are zero
X.requires_grad_(True)
approximate_round(X).sum().backward()
self.assertTrue((X.grad.abs() != 0).any())
class TestIdentitySTEFunction(BotorchTestCase):
def test_identity_ste(self):
for dtype in (torch.float, torch.double):
X = torch.rand(3, device=self.device, dtype=dtype)
with self.assertRaises(NotImplementedError):
IdentitySTEFunction.apply(X)
X = X.requires_grad_(True)
X_out = DummySTEFunction.apply(X)
X_out.sum().backward()
self.assertTrue(torch.equal(2 * X, X_out))
self.assertTrue(torch.equal(X.grad, torch.ones_like(X)))
class TestRoundSTE(BotorchTestCase):
def test_round_ste(self):
for dtype in (torch.float, torch.double):
# sample uniformly from the interval [-2.5,2.5]
X = torch.rand(5, 2, device=self.device, dtype=dtype) * 5 - 2.5
expected_rounded_X = X.round()
rounded_X = RoundSTE.apply(X)
# test forward
self.assertTrue(torch.equal(expected_rounded_X, rounded_X))
# test backward
X = X.requires_grad_(True)
output = RoundSTE.apply(X)
# sample some weights to checked that gradients are passed
# as intended
w = torch.rand_like(X)
(w * output).sum().backward()
self.assertTrue(torch.equal(w, X.grad))
class TestOneHotArgmaxSTE(BotorchTestCase):
def test_one_hot_argmax_ste(self):
for dtype in (torch.float, torch.double):
X = torch.rand(5, 4, device=self.device, dtype=dtype)
expected_discretized_X = one_hot(
X.argmax(dim=-1), num_classes=X.shape[-1]
).to(X)
discretized_X = OneHotArgmaxSTE.apply(X)
# test forward
self.assertTrue(torch.equal(expected_discretized_X, discretized_X))
# test backward
X = X.requires_grad_(True)
output = OneHotArgmaxSTE.apply(X)
# sample some weights to checked that gradients are passed
# as intended
w = torch.rand_like(X)
(w * output).sum().backward()
self.assertTrue(torch.equal(w, X.grad))
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import OrderedDict
import torch
from botorch.utils.testing import BotorchTestCase
from botorch.utils.torch import BufferDict
class TestBufferDict(BotorchTestCase):
def test_BufferDict(self):
buffers = OrderedDict(
[
("b1", torch.randn(10, 10)),
("b2", torch.randn(10, 10)),
("b3", torch.randn(10, 10)),
]
)
buffer_dict = BufferDict(buffers)
def check():
self.assertEqual(len(buffer_dict), len(buffers))
for k1, m2 in zip(buffers, buffer_dict.buffers()):
self.assertIs(buffers[k1], m2)
for k1, k2 in zip(buffers, buffer_dict):
self.assertIs(buffers[k1], buffer_dict[k2])
for k in buffer_dict:
self.assertIs(buffer_dict[k], buffers[k])
for k in buffer_dict.keys():
self.assertIs(buffer_dict[k], buffers[k])
for k, v in buffer_dict.items():
self.assertIs(v, buffers[k])
for k1, m2 in zip(buffers, buffer_dict.values()):
self.assertIs(buffers[k1], m2)
for k in buffers.keys():
self.assertTrue(k in buffer_dict)
check()
buffers["b4"] = torch.randn(10, 10)
buffer_dict["b4"] = buffers["b4"]
check()
next_buffers = [("b5", torch.randn(10, 10)), ("b2", torch.randn(10, 10))]
buffers.update(next_buffers)
buffer_dict.update(next_buffers)
check()
next_buffers = OrderedDict(
[("b6", torch.randn(10, 10)), ("b5", torch.randn(10, 10))]
)
buffers.update(next_buffers)
buffer_dict.update(next_buffers)
check()
next_buffers = {"b8": torch.randn(10, 10), "b7": torch.randn(10, 10)}
buffers.update(sorted(next_buffers.items()))
buffer_dict.update(next_buffers)
check()
del buffer_dict["b3"]
del buffers["b3"]
check()
with self.assertRaises(TypeError):
buffer_dict.update(1)
with self.assertRaises(TypeError):
buffer_dict.update([1])
with self.assertRaises(ValueError):
buffer_dict.update(torch.randn(10, 10))
with self.assertRaises(TypeError):
buffer_dict[1] = torch.randn(10, 10)
p_pop = buffer_dict.pop("b4")
self.assertIs(p_pop, buffers["b4"])
buffers.pop("b4")
check()
buffer_dict.clear()
self.assertEqual(len(buffer_dict), 0)
buffers.clear()
check()
# test extra repr
buffer_dict = BufferDict(
OrderedDict(
[
("b1", torch.randn(10, 10)),
("b2", torch.randn(10, 10)),
("b3", torch.randn(10, 10)),
]
)
)
self.assertEqual(
buffer_dict.extra_repr(),
" (b1): Buffer containing: [torch.FloatTensor of size 10x10]\n"
" (b2): Buffer containing: [torch.FloatTensor of size 10x10]\n"
" (b3): Buffer containing: [torch.FloatTensor of size 10x10]",
)
# test that calling a buffer dict raises an exception
with self.assertRaises(RuntimeError):
buffer_dict(1)
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils.feasible_volume import (
estimate_feasible_volume,
get_feasible_samples,
get_outcome_feasibility_probability,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestFeasibleVolumeEstimates(BotorchTestCase):
def test_feasible_samples(self):
# -X[0]+X[1]>=1
inequality_constraints = [(torch.tensor([0, 1]), torch.tensor([-1.0, 1.0]), 1)]
box_samples = torch.tensor([[1.1, 2.0], [0.9, 2.1], [1.5, 2], [1.8, 2.2]])
feasible_samples, p_linear = get_feasible_samples(
samples=box_samples, inequality_constraints=inequality_constraints
)
feasible = box_samples[:, 1] - box_samples[:, 0] >= 1
self.assertTrue(
torch.all(torch.eq(feasible_samples, box_samples[feasible])).item()
)
self.assertEqual(p_linear, feasible.sum(0).float().item() / feasible.size(0))
def test_outcome_feasibility_probability(self):
for dtype in (torch.float, torch.double):
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
X = torch.zeros(1, 1, device=self.device, dtype=torch.double)
for outcome_constraints in [
[lambda y: y[..., 0] - 0.5],
[lambda y: y[..., 0] + 1.0],
]:
p_outcome = get_outcome_feasibility_probability(
model=mm,
X=X,
outcome_constraints=outcome_constraints,
nsample_outcome=2,
)
feasible = outcome_constraints[0](samples) <= 0
self.assertEqual(p_outcome, feasible)
def test_estimate_feasible_volume(self):
for dtype in (torch.float, torch.double):
for samples in (
torch.zeros(1, 2, 1, device=self.device, dtype=dtype),
torch.ones(1, 1, 1, device=self.device, dtype=dtype),
):
mm = MockModel(MockPosterior(samples=samples))
bounds = torch.ones((2, 1))
outcome_constraints = [lambda y: y[..., 0] - 0.5]
p_linear, p_outcome = estimate_feasible_volume(
bounds=bounds,
model=mm,
outcome_constraints=outcome_constraints,
nsample_feature=2,
nsample_outcome=1,
dtype=dtype,
)
self.assertEqual(p_linear, 1.0)
self.assertEqual(p_outcome, 1.0 - samples[0, 0].item())
p_linear, p_outcome = estimate_feasible_volume(
bounds=bounds,
model=mm,
outcome_constraints=None,
nsample_feature=2,
nsample_outcome=1,
dtype=dtype,
)
self.assertEqual(p_linear, 1.0)
self.assertEqual(p_outcome, 1.0)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import BotorchError
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.low_rank import extract_batch_covar, sample_cached_cholesky
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions.multitask_multivariate_normal import (
MultitaskMultivariateNormal,
)
from linear_operator.operators import BlockDiagLinearOperator, to_linear_operator
from linear_operator.utils.errors import NanError
class TestExtractBatchCovar(BotorchTestCase):
def test_extract_batch_covar(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
base_covar = torch.tensor(
[[1.0, 0.6, 0.9], [0.6, 1.0, 0.5], [0.9, 0.5, 1.0]], **tkwargs
)
lazy_covar = to_linear_operator(
torch.stack([base_covar, base_covar * 2], dim=0)
)
block_diag_covar = BlockDiagLinearOperator(lazy_covar)
mt_mvn = MultitaskMultivariateNormal(
torch.zeros(3, 2, **tkwargs), block_diag_covar
)
batch_covar = extract_batch_covar(mt_mvn=mt_mvn)
self.assertTrue(torch.equal(batch_covar.to_dense(), lazy_covar.to_dense()))
# test non BlockDiagLinearOperator
mt_mvn = MultitaskMultivariateNormal(
torch.zeros(3, 2, **tkwargs), block_diag_covar.to_dense()
)
with self.assertRaises(BotorchError):
extract_batch_covar(mt_mvn=mt_mvn)
class TestSampleCachedCholesky(BotorchTestCase):
def test_sample_cached_cholesky(self):
torch.manual_seed(0)
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
train_X = torch.rand(10, 2, **tkwargs)
train_Y = torch.randn(10, 2, **tkwargs)
for m in (1, 2):
model_list_values = (True, False) if m == 2 else (False,)
for use_model_list in model_list_values:
if use_model_list:
model = ModelListGP(
SingleTaskGP(
train_X,
train_Y[..., :1],
),
SingleTaskGP(
train_X,
train_Y[..., 1:],
),
)
else:
model = SingleTaskGP(
train_X,
train_Y[:, :m],
)
sampler = IIDNormalSampler(sample_shape=torch.Size([3]))
base_sampler = IIDNormalSampler(sample_shape=torch.Size([3]))
for q in (1, 3, 9):
# test batched baseline_L
for train_batch_shape in (
torch.Size([]),
torch.Size([3]),
torch.Size([3, 2]),
):
# test batched test points
for test_batch_shape in (
torch.Size([]),
torch.Size([4]),
torch.Size([4, 2]),
):
if len(train_batch_shape) > 0:
train_X_ex = train_X.unsqueeze(0).expand(
train_batch_shape + train_X.shape
)
else:
train_X_ex = train_X
if len(test_batch_shape) > 0:
test_X = train_X_ex.unsqueeze(0).expand(
test_batch_shape + train_X_ex.shape
)
else:
test_X = train_X_ex
with torch.no_grad():
base_posterior = model.posterior(
train_X_ex[..., :-q, :]
)
mvn = base_posterior.distribution
lazy_covar = mvn.lazy_covariance_matrix
if m == 2:
lazy_covar = lazy_covar.base_linear_op
baseline_L = lazy_covar.root_decomposition()
baseline_L = baseline_L.root.to_dense()
# Sample with base sampler to construct
# the base samples.
baseline_samples = base_sampler(base_posterior)
test_X = test_X.clone().requires_grad_(True)
new_posterior = model.posterior(test_X)
# Mimicking _set_sampler to update base
# samples of the sampler.
sampler._update_base_samples(
posterior=new_posterior, base_sampler=base_sampler
)
samples = sampler(new_posterior)
samples[..., -q:, :].sum().backward()
test_X2 = test_X.detach().clone().requires_grad_(True)
new_posterior2 = model.posterior(test_X2)
q_samples = sample_cached_cholesky(
posterior=new_posterior2,
baseline_L=baseline_L,
q=q,
base_samples=sampler.base_samples.detach().clone(),
sample_shape=sampler.sample_shape,
)
q_samples.sum().backward()
all_close_kwargs = (
{
"atol": 1e-4,
"rtol": 1e-2,
}
if dtype == torch.float
else {}
)
self.assertTrue(
torch.allclose(
q_samples.detach(),
samples[..., -q:, :].detach(),
**all_close_kwargs,
)
)
self.assertTrue(
torch.allclose(
test_X2.grad[..., -q:, :],
test_X.grad[..., -q:, :],
**all_close_kwargs,
)
)
# Test that adding a new point and base_sample
# did not change posterior samples for previous points.
# This tests that we properly account for not
# interleaving.
new_batch_shape = samples.shape[
1 : -baseline_samples.ndim + 1
]
expanded_baseline_samples = baseline_samples.view(
baseline_samples.shape[0],
*[1] * len(new_batch_shape),
*baseline_samples.shape[1:],
).expand(
baseline_samples.shape[0],
*new_batch_shape,
*baseline_samples.shape[1:],
)
self.assertTrue(
torch.allclose(
expanded_baseline_samples,
samples[..., :-q, :],
**all_close_kwargs,
)
)
# test nans
with torch.no_grad():
test_posterior = model.posterior(test_X2)
test_posterior.distribution.loc = torch.full_like(
test_posterior.distribution.loc, float("nan")
)
with self.assertRaises(NanError):
sample_cached_cholesky(
posterior=test_posterior,
baseline_L=baseline_L,
q=q,
base_samples=sampler.base_samples.detach().clone(),
sample_shape=sampler.sample_shape,
)
# test infs
test_posterior.distribution.loc = torch.full_like(
test_posterior.distribution.loc, float("inf")
)
with self.assertRaises(NanError):
sample_cached_cholesky(
posterior=test_posterior,
baseline_L=baseline_L,
q=q,
base_samples=sampler.base_samples.detach().clone(),
sample_shape=sampler.sample_shape,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from string import ascii_lowercase
import torch
from botorch.utils.context_managers import (
delattr_ctx,
module_rollback_ctx,
parameter_rollback_ctx,
requires_grad_ctx,
TensorCheckpoint,
zero_grad_ctx,
)
from botorch.utils.testing import BotorchTestCase
from torch.nn import Module, Parameter
class TestContextManagers(BotorchTestCase):
def setUp(self):
super().setUp()
module = self.module = Module()
for i, name in enumerate(ascii_lowercase[:3], start=1):
values = torch.rand(2).to(torch.float16)
param = Parameter(values.to(torch.float64), requires_grad=bool(i % 2))
module.register_parameter(name, param)
def test_delattr_ctx(self):
# Test temporary removal of attributes
a = self.module.a
b = self.module.b
with delattr_ctx(self.module, "a", "b"):
self.assertIsNone(getattr(self.module, "a", None))
self.assertIsNone(getattr(self.module, "b", None))
self.assertTrue(self.module.c is not None)
# Test that removed attributes get restored
self.assertTrue(self.module.a.equal(a))
self.assertTrue(self.module.b.equal(b))
with self.assertRaisesRegex(ValueError, "Attribute .* missing"):
with delattr_ctx(self.module, "z", enforce_hasattr=True):
pass # pragma: no cover
def test_requires_grad_ctx(self):
# Test temporary setting of requires_grad field
with requires_grad_ctx(self.module, assignments={"a": False, "b": True}):
self.assertTrue(not self.module.a.requires_grad)
self.assertTrue(self.module.b.requires_grad)
self.assertTrue(self.module.c.requires_grad)
# Test that requires_grad fields get restored
self.assertTrue(self.module.a.requires_grad)
self.assertTrue(not self.module.b.requires_grad)
self.assertTrue(self.module.c.requires_grad)
def test_parameter_rollback_ctx(self):
# Test that only unfiltered parameters get rolled back
a = self.module.a.detach().clone()
b = self.module.b.detach().clone()
c = self.module.c.detach().clone()
parameters = dict(self.module.named_parameters())
with parameter_rollback_ctx(parameters, dtype=torch.float16) as ckpt:
for (tnsr, _, __) in ckpt.values(): # test whether dtype is obeyed
self.assertEqual(torch.float16, tnsr.dtype)
self.module.a.data[...] = 0
self.module.b.data[...] = 0
self.module.c.data[...] = 0
del ckpt["c"] # test whether changes to checkpoint dict are respected
self.assertTrue(self.module.a.equal(a))
self.assertTrue(self.module.b.equal(b))
self.assertTrue(self.module.c.eq(0).all())
# Test rolling back to a user-provided checkpoint
with parameter_rollback_ctx(
parameters, checkpoint={"c": TensorCheckpoint(c, c.device, c.dtype)}
):
pass
self.assertTrue(self.module.c.equal(c))
def test_module_rollback_ctx(self):
# Test that only unfiltered objects get rolled back
a = self.module.a.detach().clone()
b = self.module.b.detach().clone()
c = self.module.c.detach().clone()
with module_rollback_ctx(
self.module, lambda name: name == "a", dtype=torch.float16
) as ckpt:
for (tnsr, _, __) in ckpt.values(): # test whether dtype is obeyed
self.assertEqual(torch.float16, tnsr.dtype)
self.module.a.data[...] = 0
self.module.b.data[...] = 0
self.module.c.data[...] = 0
self.assertTrue(self.module.a.equal(a))
self.assertTrue(self.module.b.eq(0).all())
self.assertTrue(self.module.c.eq(0).all())
# Test that changes to checkpoint dict are reflected in rollback state
with module_rollback_ctx(self.module) as ckpt:
self.module.a.data[...] = 1
self.module.b.data[...] = 1
self.module.c.data[...] = 1
del ckpt["a"]
self.assertTrue(self.module.a.eq(1).all())
self.assertTrue(self.module.b.eq(0).all())
self.assertTrue(self.module.c.eq(0).all())
# Test rolling back to a user-provided checkpoint
checkpoint = {
"a": TensorCheckpoint(a, a.device, a.dtype),
"b": TensorCheckpoint(b, b.device, b.dtype),
"c": TensorCheckpoint(c, c.device, c.dtype),
}
with module_rollback_ctx(module=self.module, checkpoint=checkpoint):
pass
self.assertTrue(self.module.a.equal(a))
self.assertTrue(self.module.b.equal(b))
self.assertTrue(self.module.c.equal(c))
# Test that items in checkpoint get inserted into state_dict
with delattr_ctx(self.module, "a"):
with self.assertRaisesRegex( # should fail when attempting to rollback
RuntimeError, r'Unexpected key\(s\) in state_dict: "a"'
):
with module_rollback_ctx(module=self.module, checkpoint=checkpoint):
pass
def test_zero_grad_ctx(self):
params = (Parameter(torch.rand(1)), Parameter(torch.rand(1)))
sum(params).backward()
with zero_grad_ctx(params, zero_on_enter=False, zero_on_exit=True):
self.assertFalse(any(x.grad.eq(0).all() for x in params))
self.assertTrue(all(x.grad.eq(0).all() for x in params))
sum(params).backward()
with zero_grad_ctx(params, zero_on_enter=True, zero_on_exit=False):
self.assertTrue(all(x.grad.eq(0).all() for x in params))
sum(params).backward()
self.assertFalse(any(x.grad.eq(0).all() for x in params))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import itertools
import math
from abc import abstractmethod
from itertools import combinations, product
from typing import Callable
import torch
from botorch.exceptions import UnsupportedError
from botorch.utils import safe_math
from botorch.utils.constants import get_constants_like
from botorch.utils.objective import compute_smoothed_feasibility_indicator
from botorch.utils.safe_math import (
_pareto,
cauchy,
fatmax,
fatmaximum,
fatminimum,
fatmoid,
fatplus,
log_fatmoid,
log_fatplus,
log_softplus,
logdiffexp,
logexpit,
logmeanexp,
logplusexp,
sigmoid,
smooth_amax,
)
from botorch.utils.testing import BotorchTestCase
from torch import finfo, Tensor
from torch.nn.functional import softplus
INF = float("inf")
def sum_constraint(samples: Tensor) -> Tensor:
"""Represents the constraint `samples.sum(dim=-1) > 0`.
Args:
samples: A `b x q x m`-dim Tensor.
Returns:
A `b x q`-dim Tensor representing constraint feasibility.
"""
return -samples.sum(dim=-1)
class UnaryOpTestMixin:
op: Callable[[Tensor], Tensor]
safe_op: Callable[[Tensor], Tensor]
def __init_subclass__(cls, op: Callable, safe_op: Callable):
cls.op = staticmethod(op)
cls.safe_op = staticmethod(safe_op)
def test_generic(self, m: int = 3, n: int = 4):
for dtype in (torch.float32, torch.float64):
# Test forward
x = torch.rand(n, m, dtype=dtype, requires_grad=True, device=self.device)
y = self.safe_op(x)
_x = x.detach().clone().requires_grad_(True)
_y = self.op(_x)
self.assertTrue(y.equal(_y))
# Test backward
y.sum().backward()
_y.sum().backward()
self.assertTrue(x.grad.equal(_x.grad))
# Test passing in pre-allocated `out`
with torch.no_grad():
y.zero_()
self.safe_op(x, out=y)
self.assertTrue(y.equal(_y))
@abstractmethod
def test_special(self):
pass # pragma: no cover
class BinaryOpTestMixin:
op: Callable[[Tensor, Tensor], Tensor]
safe_op: Callable[[Tensor, Tensor], Tensor]
def __init_subclass__(cls, op: Callable, safe_op: Callable):
cls.op = staticmethod(op)
cls.safe_op = staticmethod(safe_op)
def test_generic(self, m: int = 3, n: int = 4):
for dtype in (torch.float32, torch.float64):
# Test equality for generic cases
a = torch.rand(n, m, dtype=dtype, requires_grad=True, device=self.device)
b = torch.rand(n, m, dtype=dtype, requires_grad=True, device=self.device)
y = self.safe_op(a, b)
_a = a.detach().clone().requires_grad_(True)
_b = b.detach().clone().requires_grad_(True)
_y = self.op(_a, _b)
self.assertTrue(y.equal(_y))
# Test backward
y.sum().backward()
_y.sum().backward()
self.assertTrue(a.grad.equal(_a.grad))
self.assertTrue(b.grad.equal(_b.grad))
@abstractmethod
def test_special(self):
pass # pragma: no cover
class TestSafeExp(
BotorchTestCase, UnaryOpTestMixin, op=torch.exp, safe_op=safe_math.exp
):
def test_special(self):
for dtype in (torch.float32, torch.float64):
x = torch.full([], INF, dtype=dtype, requires_grad=True, device=self.device)
y = self.safe_op(x)
self.assertEqual(
y, get_constants_like(math.log(finfo(dtype).max) - 1e-4, x).exp()
)
y.backward()
self.assertEqual(x.grad, 0)
class TestSafeLog(
BotorchTestCase, UnaryOpTestMixin, op=torch.log, safe_op=safe_math.log
):
def test_special(self):
for dtype in (torch.float32, torch.float64):
x = torch.zeros([], dtype=dtype, requires_grad=True, device=self.device)
y = self.safe_op(x)
self.assertEqual(y, math.log(finfo(dtype).tiny))
y.backward()
self.assertEqual(x.grad, 0)
class TestSafeAdd(
BotorchTestCase, BinaryOpTestMixin, op=torch.add, safe_op=safe_math.add
):
def test_special(self):
for dtype in (torch.float32, torch.float64):
for _a in (INF, -INF):
a = torch.tensor(
_a, dtype=dtype, requires_grad=True, device=self.device
)
b = torch.tensor(
INF, dtype=dtype, requires_grad=True, device=self.device
)
out = self.safe_op(a, b)
self.assertEqual(out, 0 if a != b else b)
out.backward()
self.assertEqual(a.grad, 0 if a != b else 1)
self.assertEqual(b.grad, 0 if a != b else 1)
class TestSafeSub(
BotorchTestCase, BinaryOpTestMixin, op=torch.sub, safe_op=safe_math.sub
):
def test_special(self):
for dtype in (torch.float32, torch.float64):
for _a in (INF, -INF):
a = torch.tensor(
_a, dtype=dtype, requires_grad=True, device=self.device
)
b = torch.tensor(
INF, dtype=dtype, requires_grad=True, device=self.device
)
out = self.safe_op(a, b)
self.assertEqual(out, 0 if a == b else -b)
out.backward()
self.assertEqual(a.grad, 0 if a == b else 1)
self.assertEqual(b.grad, 0 if a == b else -1)
class TestSafeMul(
BotorchTestCase, BinaryOpTestMixin, op=torch.mul, safe_op=safe_math.mul
):
def test_special(self):
for dtype in (torch.float32, torch.float64):
for _a, _b in product([0, 2], [INF, -INF]):
a = torch.tensor(
_a, dtype=dtype, requires_grad=True, device=self.device
)
b = torch.tensor(
_b, dtype=dtype, requires_grad=True, device=self.device
)
out = self.safe_op(a, b)
self.assertEqual(out, a if a == 0 else b)
out.backward()
self.assertEqual(a.grad, 0 if a == 0 else b)
self.assertEqual(b.grad, 0 if a == 0 else a)
class TestSafeDiv(
BotorchTestCase, BinaryOpTestMixin, op=torch.div, safe_op=safe_math.div
):
def test_special(self):
for dtype in (torch.float32, torch.float64):
for _a, _b in combinations([0, INF, -INF], 2):
a = torch.tensor(
_a, dtype=dtype, requires_grad=True, device=self.device
)
b = torch.tensor(
_b, dtype=dtype, requires_grad=True, device=self.device
)
out = self.safe_op(a, b)
if a == b:
self.assertEqual(out, 1)
elif a == -b:
self.assertEqual(out, -1)
else:
self.assertEqual(out, a / b)
out.backward()
if ((a == 0) & (b == 0)) | (a.isinf() & b.isinf()):
self.assertEqual(a.grad, 0)
self.assertEqual(b.grad, 0)
else:
self.assertEqual(a.grad, 1 / b)
self.assertEqual(b.grad, -a * b**-2)
class TestLogMeanExp(BotorchTestCase):
def test_log_mean_exp(self):
for dtype in (torch.float32, torch.float64):
X = torch.rand(3, 2, 5, dtype=dtype, device=self.device) + 0.1
# test single-dimension reduction
self.assertAllClose(logmeanexp(X.log(), dim=-1).exp(), X.mean(dim=-1))
self.assertAllClose(logmeanexp(X.log(), dim=-2).exp(), X.mean(dim=-2))
# test tuple of dimensions
self.assertAllClose(
logmeanexp(X.log(), dim=(0, -1)).exp(), X.mean(dim=(0, -1))
)
# with keepdim
self.assertAllClose(
logmeanexp(X.log(), dim=-1, keepdim=True).exp(),
X.mean(dim=-1, keepdim=True),
)
self.assertAllClose(
logmeanexp(X.log(), dim=-2, keepdim=True).exp(),
X.mean(dim=-2, keepdim=True),
)
self.assertAllClose(
logmeanexp(X.log(), dim=(0, -1), keepdim=True).exp(),
X.mean(dim=(0, -1), keepdim=True),
)
class TestSmoothNonLinearities(BotorchTestCase):
def test_smooth_non_linearities(self):
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
n = 17
X = torch.randn(n, **tkwargs)
self.assertAllClose(cauchy(X), 1 / (X.square() + 1))
# test monotonicity of pareto for X < 0
a = 10.0
n = 32
X = torch.arange(-a, a, step=2 * a / n, requires_grad=True, **tkwargs)
pareto_X = _pareto(X, alpha=2.0, check=False)
self.assertTrue((pareto_X > 0).all())
pareto_X.sum().backward()
self.assertTrue((X.grad[X >= 0] < 0).all())
self.assertFalse(
(X.grad[X < 0] >= 0).all() or (X.grad[X < 0] <= 0).all()
) # only monotonic for X >= 0.
zero = torch.tensor(0, requires_grad=True, **tkwargs)
pareto_zero = _pareto(zero, alpha=2.0, check=False)
# testing that value and first two derivatives are one at x = 0.
self.assertAllClose(pareto_zero, torch.ones_like(zero))
zero.backward()
self.assertAllClose(zero.grad, torch.ones_like(zero))
H = torch.autograd.functional.hessian(
lambda X: _pareto(X, alpha=2.0, check=False), zero
)
self.assertAllClose(H, torch.ones_like(zero))
# testing non-negativity check
with self.assertRaisesRegex(
ValueError, "Argument `x` must be non-negative"
):
_pareto(torch.tensor(-1, **tkwargs), alpha=2.0, check=True)
# testing softplus and fatplus
tau = 1e-2
fatplus_X = fatplus(X, tau=tau)
self.assertAllClose(fatplus_X, X.clamp(0), atol=tau)
self.assertTrue((fatplus_X > 0).all())
self.assertAllClose(fatplus_X.log(), log_fatplus(X, tau=tau))
self.assertAllClose(
softplus(X, beta=1 / tau), log_softplus(X, tau=tau).exp()
)
# testing fatplus differentiability
X = torch.randn(n, **tkwargs)
X.requires_grad = True
log_fatplus(X, tau=tau).sum().backward()
self.assertFalse(X.grad.isinf().any())
self.assertFalse(X.grad.isnan().any())
# always increasing, could also test convexity (mathematically guaranteed)
self.assertTrue((X.grad > 0).all())
X_soft = X.detach().clone()
X_soft.requires_grad = True
log_softplus(X_soft, tau=tau).sum().backward()
# for positive values away from zero, log_softplus and log_fatplus are close
is_positive = X > 100 * tau # i.e. 1 for tau = 1e-2
self.assertAllClose(X.grad[is_positive], 1 / X[is_positive], atol=tau)
self.assertAllClose(X_soft.grad[is_positive], 1 / X[is_positive], atol=tau)
is_negative = X < -100 * tau # i.e. -1
# the softplus has very large gradients, which can saturate the smooth
# approximation to the maximum over the q-batch.
asym_val = torch.full_like(X_soft.grad[is_negative], 1 / tau)
self.assertAllClose(X_soft.grad[is_negative], asym_val, atol=tau, rtol=tau)
# the fatplus on the other hand has smaller, though non-vanishing gradients.
self.assertTrue((X_soft.grad[is_negative] > X.grad[is_negative]).all())
# testing smoothmax and fatmax
for test_max in (smooth_amax, fatmax):
with self.subTest(test_max=test_max):
n, q, d = 7, 5, 3
X = torch.randn(n, q, d, **tkwargs)
for dim, keepdim in itertools.product(
(-1, -2, -3, (-1, -2), (0, 2), (0, 1, 2)), (True, False)
):
test_max_X = test_max(X, dim=dim, keepdim=keepdim, tau=tau)
# getting the number of elements that are reduced over, required
# to set an accurate tolerance parameter for the test below.
numel = (
X.shape[dim]
if isinstance(dim, int)
else math.prod(X.shape[i] for i in dim)
)
self.assertAllClose(
test_max_X,
X.amax(dim=dim, keepdim=keepdim),
atol=math.log(numel) * tau,
)
# special case for d = 1
d = 1
X = torch.randn(n, q, d, **tkwargs)
tau = 1.0
test_max_X = test_max(X, dim=-1, tau=tau)
self.assertAllClose(test_max_X, X[..., 0])
# testing fatmax differentiability
n = 64
a = 10.0
X = torch.arange(-a, a, step=2 * a / n, **tkwargs)
X.requires_grad = True
test_max(X, dim=-1, tau=tau).sum().backward()
self.assertFalse(X.grad.isinf().any())
self.assertFalse(X.grad.isnan().any())
self.assertTrue(X.grad.min() > 0)
# derivative should be increasing function of the input
X_sorted, sort_indices = X.sort()
self.assertTrue((X.grad[sort_indices].diff() > 0).all())
# the gradient of the fat approximation is a soft argmax, similar to
# how the gradient of logsumexp is the canonical softmax function.
places = 12 if dtype == torch.double else 6
self.assertAlmostEqual(X.grad.sum().item(), 1.0, places=places)
# testing special cases with infinities
# case 1: all inputs are positive infinity
n = 5
X = torch.full((n,), torch.inf, **tkwargs, requires_grad=True)
test_max_X = test_max(X, dim=-1, tau=tau)
self.assertAllClose(test_max_X, torch.tensor(torch.inf, **tkwargs))
test_max_X.backward()
self.assertFalse(X.grad.isnan().any())
# since all elements are equal, their gradients should be equal too
self.assertAllClose(X.grad, torch.ones_like(X.grad))
# case 2: there's a mix of positive and negative infinity
X = torch.randn((n,), **tkwargs)
X[1] = torch.inf
X[2] = -torch.inf
X.requires_grad = True
test_max_X = test_max(X, dim=-1, tau=tau)
self.assertAllClose(test_max_X, torch.tensor(torch.inf, **tkwargs))
test_max_X.backward()
expected_grad = torch.zeros_like(X.grad)
expected_grad[1] = 1
self.assertAllClose(X.grad, expected_grad)
# case 3: all inputs are negative infinity
X = torch.full((n,), -torch.inf, **tkwargs, requires_grad=True)
test_max_X = test_max(X, dim=-1, tau=tau)
self.assertAllClose(test_max_X, torch.tensor(-torch.inf, **tkwargs))
# since all elements are equal, their gradients should be equal too
test_max_X.backward()
self.assertAllClose(X.grad, torch.ones_like(X.grad))
# testing logplusexp
n = 17
x, y = torch.randn(n, d, **tkwargs), torch.randn(n, d, **tkwargs)
tol = 1e-12 if dtype == torch.double else 1e-6
self.assertAllClose(logplusexp(x, y), (x.exp() + y.exp()).log(), atol=tol)
# testing logdiffexp
y = 2 * x.abs()
self.assertAllClose(logdiffexp(x, y), (y.exp() - x.exp()).log(), atol=tol)
# testing fatmaximum
tau = 1e-2
self.assertAllClose(fatmaximum(x, y, tau=tau), x.maximum(y), atol=tau)
# testing fatminimum
self.assertAllClose(fatminimum(x, y, tau=tau), x.minimum(y), atol=tau)
# testing fatmoid
X = torch.arange(-a, a, step=2 * a / n, requires_grad=True, **tkwargs)
fatmoid_X = fatmoid(X, tau=tau)
# output is in [0, 1]
self.assertTrue((fatmoid_X > 0).all())
self.assertTrue((fatmoid_X < 1).all())
# skew symmetry
atol = 1e-6 if dtype == torch.float32 else 1e-12
self.assertAllClose(1 - fatmoid_X, fatmoid(-X, tau=tau), atol=atol)
zero = torch.tensor(0.0, **tkwargs)
half = torch.tensor(0.5, **tkwargs)
self.assertAllClose(fatmoid(zero), half, atol=atol)
self.assertAllClose(fatmoid_X.log(), log_fatmoid(X, tau=tau))
is_center = X.abs() < 100 * tau
self.assertAllClose(
fatmoid_X[~is_center], (X[~is_center] > 0).to(fatmoid_X), atol=1e-3
)
# testing differentiability
X.requires_grad = True
log_fatmoid(X, tau=tau).sum().backward()
self.assertFalse(X.grad.isinf().any())
self.assertFalse(X.grad.isnan().any())
self.assertTrue((X.grad > 0).all())
# testing constraint indicator
constraints = [sum_constraint]
b = 3
q = 4
m = 5
samples = torch.randn(b, q, m, **tkwargs)
eta = 1e-3
fat = True
log_feas_vals = compute_smoothed_feasibility_indicator(
constraints=constraints,
samples=samples,
eta=eta,
log=True,
fat=fat,
)
self.assertTrue(log_feas_vals.shape == torch.Size([b, q]))
expected_feas_vals = sum_constraint(samples) < 0
hard_feas_vals = log_feas_vals.exp() > 1 / 2
self.assertAllClose(hard_feas_vals, expected_feas_vals)
# with deterministic inputs:
samples = torch.ones(1, 1, m, **tkwargs) # sum is greater than 0
log_feas_vals = compute_smoothed_feasibility_indicator(
constraints=constraints,
samples=samples,
eta=eta,
log=True,
fat=fat,
)
self.assertTrue((log_feas_vals.exp() > 1 / 2).item())
# with deterministic inputs:
samples = -torch.ones(1, 1, m, **tkwargs) # sum is smaller than 0
log_feas_vals = compute_smoothed_feasibility_indicator(
constraints=constraints,
samples=samples,
eta=eta,
log=True,
fat=fat,
)
self.assertFalse((log_feas_vals.exp() > 1 / 2).item())
# testing sigmoid wrapper function
X = torch.randn(3, 4, 5, **tkwargs)
sigmoid_X = torch.sigmoid(X)
self.assertAllClose(sigmoid(X), sigmoid_X)
self.assertAllClose(sigmoid(X, log=True), logexpit(X))
self.assertAllClose(sigmoid(X, log=True).exp(), sigmoid_X)
fatmoid_X = fatmoid(X)
self.assertAllClose(sigmoid(X, fat=True), fatmoid_X)
self.assertAllClose(sigmoid(X, log=True, fat=True).exp(), fatmoid_X)
with self.assertRaisesRegex(UnsupportedError, "Only dtypes"):
log_softplus(torch.randn(2, dtype=torch.float16))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils import get_outcome_constraint_transforms
from botorch.utils.constraints import get_monotonicity_constraints
from botorch.utils.testing import BotorchTestCase
class TestConstraintUtils(BotorchTestCase):
def setUp(self):
super().setUp()
self.A = torch.tensor([[-1.0, 0.0, 0.0], [0.0, 1.0, 1.0]])
self.b = torch.tensor([[-0.5], [1.0]])
self.Ys = torch.tensor([[0.75, 1.0, 0.5], [0.25, 1.5, 1.0]]).unsqueeze(0)
self.results = torch.tensor([[-0.25, 0.5], [0.25, 1.5]]).view(1, 2, 2)
def test_get_outcome_constraint_transforms(self):
# test None
self.assertIsNone(get_outcome_constraint_transforms(None))
# test basic evaluation
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
A = self.A.to(**tkwargs)
b = self.b.to(**tkwargs)
Ys = self.Ys.to(**tkwargs)
results = self.results.to(**tkwargs)
ocs = get_outcome_constraint_transforms((A, b))
self.assertEqual(len(ocs), 2)
for i in (0, 1):
for j in (0, 1):
self.assertTrue(torch.equal(ocs[j](Ys[:, i]), results[:, i, j]))
# test broadcasted evaluation
k, t = 3, 4
mc_samples, b, q = 6, 4, 5
A_ = torch.randn(k, t, **tkwargs)
b_ = torch.randn(k, 1, **tkwargs)
Y = torch.randn(mc_samples, b, q, t, **tkwargs)
ocs = get_outcome_constraint_transforms((A_, b_))
self.assertEqual(len(ocs), k)
self.assertEqual(ocs[0](Y).shape, torch.Size([mc_samples, b, q]))
def test_get_monotonicity_constraints(self):
for dtype in (torch.float, torch.double):
tkwargs = {"dtype": dtype, "device": self.device}
for d in (3, 17):
with self.subTest(dtype=dtype, d=d):
A, b = get_monotonicity_constraints(d, **tkwargs)
self.assertEqual(A.shape, (d - 1, d))
self.assertEqual(A.dtype, dtype)
self.assertEqual(A.device.type, self.device.type)
self.assertEqual(b.shape, (d - 1, 1))
self.assertEqual(b.dtype, dtype)
self.assertEqual(b.device.type, self.device.type)
unique_vals = torch.tensor([-1, 0, 1], **tkwargs)
self.assertAllClose(A.unique(), unique_vals)
self.assertAllClose(b, torch.zeros_like(b))
self.assertTrue(
torch.equal(A.sum(dim=-1), torch.zeros(d - 1, **tkwargs))
)
n_test = 3
X_test = torch.randn(d, n_test, **tkwargs)
X_diff_true = -X_test.diff(dim=0) # x[i] - x[i+1] < 0
X_diff = A @ X_test
self.assertAllClose(X_diff, X_diff_true)
is_monotonic_true = (X_diff_true < 0).all(dim=0)
is_monotonic = (X_diff < b).all(dim=0)
self.assertAllClose(is_monotonic, is_monotonic_true)
Ad, bd = get_monotonicity_constraints(d, descending=True, **tkwargs)
self.assertAllClose(Ad, -A)
self.assertAllClose(bd, b)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from unittest.mock import patch
import torch
from botorch.utils.containers import BotorchContainer, DenseContainer, SliceContainer
from botorch.utils.testing import BotorchTestCase
from torch import Size
@dataclass
class BadContainer(BotorchContainer):
def __call__(self) -> None:
pass # pragma: nocover
def __eq__(self, other) -> bool:
pass # pragma: nocover
@property
def shape(self):
pass # pragma: nocover
@property
def device(self):
pass # pragma: nocover
@property
def dtype(self):
pass # pragma: nocover
class TestContainers(BotorchTestCase):
def test_base(self):
with self.assertRaisesRegex(TypeError, "Can't instantiate abstract class"):
BotorchContainer()
with self.assertRaisesRegex(AttributeError, "Missing .* `event_shape`"):
BadContainer()
with patch.multiple(BotorchContainer, __abstractmethods__=set()):
container = BotorchContainer()
with self.assertRaises(NotImplementedError):
container()
with self.assertRaises(NotImplementedError):
container.device
with self.assertRaises(NotImplementedError):
container.dtype
with self.assertRaises(NotImplementedError):
container.shape
with self.assertRaises(NotImplementedError):
container.__eq__(None)
def test_dense(self):
for values in (
torch.rand(3, 2, dtype=torch.float16),
torch.rand(5, 4, 3, 2, dtype=torch.float64),
):
event_shape = values.shape[values.ndim // 2 :]
# Test some invalid shapes
with self.assertRaisesRegex(ValueError, "Shape .* incompatible"):
X = DenseContainer(values=values, event_shape=Size([3]))
with self.assertRaisesRegex(ValueError, "Shape .* incompatible"):
X = DenseContainer(values=values, event_shape=torch.Size([2, 3]))
# Test some basic propeties
X = DenseContainer(values=values, event_shape=event_shape)
self.assertEqual(X.device, values.device)
self.assertEqual(X.dtype, values.dtype)
# Test `shape` property
self.assertEqual(X.shape, values.shape)
# Test `__eq__`
self.assertEqual(X, DenseContainer(values, event_shape))
self.assertNotEqual(X, DenseContainer(torch.rand_like(values), event_shape))
# Test `__call__`
self.assertTrue(X().equal(values))
def test_slice(self):
for arity in (2, 4):
for vals in (
torch.rand(8, 2, dtype=torch.float16),
torch.rand(8, 3, 2, dtype=torch.float16),
):
indices = torch.stack(
[torch.randperm(len(vals))[:arity] for _ in range(4)]
)
event_shape = (arity * vals.shape[1],) + vals.shape[2:]
with self.assertRaisesRegex(ValueError, "Shapes .* incompatible"):
SliceContainer(
values=vals,
indices=indices,
event_shape=(10 * event_shape[0],) + event_shape[1:],
)
# Test some basic propeties
groups = SliceContainer(vals, indices, event_shape=event_shape)
self.assertEqual(groups.device, vals.device)
self.assertEqual(groups.dtype, vals.dtype)
self.assertEqual(groups.shape, groups().shape)
# Test `__eq__`
self.assertEqual(groups, SliceContainer(vals, indices, event_shape))
self.assertNotEqual(
groups, SliceContainer(torch.rand_like(vals), indices, event_shape)
)
# Test `__call__`
dense = groups()
index = int(torch.randint(high=len(dense), size=()))
other = torch.cat([vals[i] for i in indices[index]])
self.assertTrue(dense[index].equal(other))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils import apply_constraints, get_objective_weights_transform
from botorch.utils.objective import (
compute_feasibility_indicator,
compute_smoothed_feasibility_indicator,
soft_eval_constraint,
)
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
def ones_f(samples: Tensor) -> Tensor:
return torch.ones(samples.shape[0:-1], device=samples.device, dtype=samples.dtype)
def zeros_f(samples: Tensor) -> Tensor:
return torch.zeros(samples.shape[0:-1], device=samples.device, dtype=samples.dtype)
def nonzeros_f(samples: Tensor) -> Tensor:
t = torch.zeros(samples.shape[0:-1], device=samples.device, dtype=samples.dtype)
t[:] = 0.1
return t
def minus_one_f(samples: Tensor) -> Tensor:
return -(
torch.ones(samples.shape[0:-1], device=samples.device, dtype=samples.dtype)
)
class TestApplyConstraints(BotorchTestCase):
def test_apply_constraints(self):
# nonnegative objective, one constraint
samples = torch.randn(1)
obj = ones_f(samples)
obj = apply_constraints(
obj=obj, constraints=[zeros_f], samples=samples, infeasible_cost=0.0
)
self.assertTrue(torch.equal(obj, ones_f(samples) * 0.5))
# nonnegative objective, two constraint
samples = torch.randn(1)
obj = ones_f(samples)
obj = apply_constraints(
obj=obj,
constraints=[zeros_f, zeros_f],
samples=samples,
infeasible_cost=0.0,
)
self.assertTrue(torch.equal(obj, ones_f(samples) * 0.5 * 0.5))
# negative objective, one constraint, infeasible_cost
samples = torch.randn(1)
obj = minus_one_f(samples)
obj = apply_constraints(
obj=obj, constraints=[zeros_f], samples=samples, infeasible_cost=2.0
)
self.assertTrue(torch.equal(obj, ones_f(samples) * 0.5 - 2.0))
# nonnegative objective, one constraint, eta = 0
samples = torch.randn(1)
obj = ones_f(samples)
with self.assertRaisesRegex(ValueError, "eta must be positive."):
apply_constraints(
obj=obj,
constraints=[zeros_f],
samples=samples,
infeasible_cost=0.0,
eta=0.0,
)
# soft_eval_constraint is not in the path of apply_constraints, adding this test
# for coverage.
with self.assertRaisesRegex(ValueError, "eta must be positive."):
soft_eval_constraint(lhs=obj, eta=0.0)
ind = soft_eval_constraint(lhs=ones_f(samples), eta=1e-6)
self.assertAllClose(ind, torch.zeros_like(ind))
ind = soft_eval_constraint(lhs=-ones_f(samples), eta=1e-6)
self.assertAllClose(ind, torch.ones_like(ind))
def test_apply_constraints_multi_output(self):
# nonnegative objective, one constraint
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
samples = torch.rand(3, 2, **tkwargs)
obj = samples.clone()
obj = apply_constraints(
obj=obj, constraints=[zeros_f], samples=samples, infeasible_cost=0.0
)
self.assertTrue(torch.equal(obj, samples * 0.5))
# nonnegative objective, two constraint
obj = samples.clone()
obj = apply_constraints(
obj=obj,
constraints=[zeros_f, zeros_f],
samples=samples,
infeasible_cost=0.0,
)
self.assertTrue(torch.equal(obj, samples * 0.5 * 0.5))
# nonnegative objective, two constraint explicit eta
obj = samples.clone()
obj = apply_constraints(
obj=obj,
constraints=[zeros_f, zeros_f],
samples=samples,
infeasible_cost=0.0,
eta=torch.tensor([10e-3, 10e-3]).to(**tkwargs),
)
self.assertTrue(torch.equal(obj, samples * 0.5 * 0.5))
# nonnegative objective, two constraint explicit different eta
obj = samples.clone()
obj = apply_constraints(
obj=obj,
constraints=[nonzeros_f, nonzeros_f],
samples=samples,
infeasible_cost=0.0,
eta=torch.tensor([10e-1, 10e-2]).to(**tkwargs),
)
self.assertTrue(
torch.allclose(
obj,
samples
* torch.sigmoid(torch.as_tensor(-0.1) / 10e-1)
* torch.sigmoid(torch.as_tensor(-0.1) / 10e-2),
)
)
# nonnegative objective, two constraint explicit different eta
# use ones_f
obj = samples.clone()
obj = apply_constraints(
obj=obj,
constraints=[ones_f, ones_f],
samples=samples,
infeasible_cost=0.0,
eta=torch.tensor([1, 10]).to(**tkwargs),
)
self.assertTrue(
torch.allclose(
obj,
samples
* torch.sigmoid(torch.as_tensor(-1.0) / 1.0)
* torch.sigmoid(torch.as_tensor(-1.0) / 10.0),
)
)
# negative objective, one constraint, infeasible_cost
obj = samples.clone().clamp_min(-1.0)
obj = apply_constraints(
obj=obj, constraints=[zeros_f], samples=samples, infeasible_cost=2.0
)
self.assertAllClose(obj, samples.clamp_min(-1.0) * 0.5 - 1.0)
# negative objective, one constraint, infeasible_cost, explicit eta
obj = samples.clone().clamp_min(-1.0)
obj = apply_constraints(
obj=obj,
constraints=[zeros_f],
samples=samples,
infeasible_cost=2.0,
eta=torch.tensor([10e-3]).to(**tkwargs),
)
self.assertAllClose(obj, samples.clamp_min(-1.0) * 0.5 - 1.0)
# nonnegative objective, one constraint, eta = 0
obj = samples
with self.assertRaisesRegex(ValueError, "eta must be positive"):
apply_constraints(
obj=obj,
constraints=[zeros_f],
samples=samples,
infeasible_cost=0.0,
eta=0.0,
)
def test_apply_constraints_wrong_eta_dim(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
samples = torch.rand(3, 2, **tkwargs)
obj = samples.clone()
with self.assertRaisesRegex(ValueError, "Number of provided constraints"):
obj = apply_constraints(
obj=obj,
constraints=[zeros_f, zeros_f],
samples=samples,
infeasible_cost=0.0,
eta=torch.tensor([0.1]).to(**tkwargs),
)
with self.assertRaisesRegex(ValueError, "Number of provided constraints"):
obj = apply_constraints(
obj=obj,
constraints=[zeros_f, zeros_f],
samples=samples,
infeasible_cost=0.0,
eta=torch.tensor([0.1, 0.1, 0.3]).to(**tkwargs),
)
def test_constraint_indicators(self):
# nonnegative objective, one constraint
samples = torch.randn(1)
ind = compute_feasibility_indicator(constraints=[zeros_f], samples=samples)
self.assertAllClose(ind, torch.zeros_like(ind))
self.assertEqual(ind.dtype, torch.bool)
smoothed_ind = compute_smoothed_feasibility_indicator(
constraints=[zeros_f], samples=samples, eta=1e-3
)
self.assertAllClose(smoothed_ind, ones_f(samples) / 2)
# two constraints
samples = torch.randn(1)
smoothed_ind = compute_smoothed_feasibility_indicator(
constraints=[zeros_f, zeros_f],
samples=samples,
eta=1e-3,
)
self.assertAllClose(smoothed_ind, ones_f(samples) * 0.5 * 0.5)
# feasible
samples = torch.randn(1)
ind = compute_feasibility_indicator(
constraints=[minus_one_f],
samples=samples,
)
self.assertAllClose(ind, torch.ones_like(ind))
smoothed_ind = compute_smoothed_feasibility_indicator(
constraints=[minus_one_f], samples=samples, eta=1e-3
)
self.assertTrue((smoothed_ind > 3 / 4).all())
with self.assertRaisesRegex(ValueError, "Number of provided constraints"):
compute_smoothed_feasibility_indicator(
constraints=[zeros_f, zeros_f],
samples=samples,
eta=torch.tensor([0.1], device=self.device),
)
class TestGetObjectiveWeightsTransform(BotorchTestCase):
def test_NoWeights(self):
Y = torch.ones(5, 2, 4, 1)
objective_transform = get_objective_weights_transform(None)
Y_transformed = objective_transform(Y)
self.assertTrue(torch.equal(Y.squeeze(-1), Y_transformed))
def test_OneWeightBroadcasting(self):
Y = torch.ones(5, 2, 4, 1)
objective_transform = get_objective_weights_transform(torch.tensor([0.5]))
Y_transformed = objective_transform(Y)
self.assertTrue(torch.equal(0.5 * Y.sum(dim=-1), Y_transformed))
def test_IncompatibleNumberOfWeights(self):
Y = torch.ones(5, 2, 4, 3)
objective_transform = get_objective_weights_transform(torch.tensor([1.0, 2.0]))
with self.assertRaises(RuntimeError):
objective_transform(Y)
def test_MultiTaskWeights(self):
Y = torch.ones(5, 2, 4, 2)
objective_transform = get_objective_weights_transform(torch.tensor([1.0, 1.0]))
Y_transformed = objective_transform(Y)
self.assertTrue(torch.equal(torch.sum(Y, dim=-1), Y_transformed))
def test_NoMCSamples(self):
Y = torch.ones(2, 4, 2)
objective_transform = get_objective_weights_transform(torch.tensor([1.0, 1.0]))
Y_transformed = objective_transform(Y)
self.assertTrue(torch.equal(torch.sum(Y, dim=-1), Y_transformed))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from math import pi
from unittest import mock
import torch
from botorch.models.converter import batched_to_model_list
from botorch.models.deterministic import DeterministicModel
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.model import ModelList
from botorch.models.multitask import MultiTaskGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.utils.gp_sampling import (
get_deterministic_model,
get_deterministic_model_list,
get_deterministic_model_multi_samples,
get_gp_samples,
get_weights_posterior,
GPDraw,
RandomFourierFeatures,
)
from botorch.utils.testing import BotorchTestCase
from botorch.utils.transforms import is_fully_bayesian
from gpytorch.kernels import MaternKernel, PeriodicKernel, RBFKernel, ScaleKernel
from torch.distributions import MultivariateNormal
def _get_model(
dtype, device, multi_output=False, use_transforms=False, batched_inputs=False
):
tkwargs = {"dtype": dtype, "device": device}
train_X = torch.tensor(
[
[-0.1000],
[0.4894],
[1.0788],
[1.6681],
[2.2575],
[2.8469],
[3.4363],
[4.0257],
[4.6150],
[5.2044],
[5.7938],
[6.3832],
],
**tkwargs,
)
train_Y = torch.tensor(
[
[-0.0274],
[0.2612],
[0.8114],
[1.1916],
[1.4870],
[0.8611],
[-0.9226],
[-0.5916],
[-1.3301],
[-1.8847],
[0.0647],
[1.0900],
],
**tkwargs,
)
state_dict = {
"likelihood.noise_covar.raw_noise": torch.tensor([0.0214], **tkwargs),
"likelihood.noise_covar.noise_prior.concentration": torch.tensor(
1.1000, **tkwargs
),
"likelihood.noise_covar.noise_prior.rate": torch.tensor(0.0500, **tkwargs),
"mean_module.raw_constant": torch.tensor(0.1398, **tkwargs),
"covar_module.raw_outputscale": torch.tensor(0.6933, **tkwargs),
"covar_module.base_kernel.raw_lengthscale": torch.tensor(
[[-0.0444]], **tkwargs
),
"covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor(
3.0, **tkwargs
),
"covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0, **tkwargs),
"covar_module.outputscale_prior.concentration": torch.tensor(2.0, **tkwargs),
"covar_module.outputscale_prior.rate": torch.tensor(0.1500, **tkwargs),
}
if multi_output:
train_Y2 = torch.tensor(
[
[0.9723],
[1.0652],
[0.7667],
[-0.5542],
[-0.6266],
[-0.5350],
[-0.8854],
[-1.3024],
[1.0408],
[0.2485],
[1.4924],
[1.5393],
],
**tkwargs,
)
train_Y = torch.cat([train_Y, train_Y2], dim=-1)
state_dict["likelihood.noise_covar.raw_noise"] = torch.stack(
[
state_dict["likelihood.noise_covar.raw_noise"],
torch.tensor([0.0745], **tkwargs),
]
)
state_dict["mean_module.raw_constant"] = torch.stack(
[state_dict["mean_module.raw_constant"], torch.tensor(0.3276, **tkwargs)]
)
state_dict["covar_module.raw_outputscale"] = torch.stack(
[
state_dict["covar_module.raw_outputscale"],
torch.tensor(0.4394, **tkwargs),
],
dim=-1,
)
state_dict["covar_module.base_kernel.raw_lengthscale"] = torch.stack(
[
state_dict["covar_module.base_kernel.raw_lengthscale"],
torch.tensor([[-0.4617]], **tkwargs),
]
)
if batched_inputs:
# both are supported but not included in units.
assert not (multi_output or use_transforms)
state_dict["likelihood.noise_covar.raw_noise"] = torch.tensor(
[[0.0214], [0.001]], **tkwargs
)
state_dict["mean_module.raw_constant"] = torch.tensor([0.1398, 0.5], **tkwargs)
state_dict["covar_module.raw_outputscale"] = torch.tensor(
[0.6933, 1.0], **tkwargs
)
state_dict["covar_module.base_kernel.raw_lengthscale"] = torch.tensor(
[[[-0.0444]], [[5.0]]], **tkwargs
)
train_X = train_X.expand(2, -1, -1)
train_Y = train_Y.expand(2, -1, -1)
if use_transforms:
bounds = torch.zeros(2, 1, **tkwargs)
bounds[1] = 10.0
intf = Normalize(d=1, bounds=bounds)
octf = Standardize(m=train_Y.shape[-1])
state_dict["likelihood.noise_covar.raw_noise"] = torch.tensor(
[[0.1743], [0.3132]] if multi_output else [0.1743], **tkwargs
)
state_dict["mean_module.raw_constant"] = torch.tensor(
[0.2560, 0.6714] if multi_output else 0.2555, **tkwargs
)
state_dict["covar_module.raw_outputscale"] = torch.tensor(
[2.4396, 2.6821] if multi_output else 2.4398, **tkwargs
)
state_dict["covar_module.base_kernel.raw_lengthscale"] = torch.tensor(
[[[-1.6197]], [[-1.0532]]] if multi_output else [[-1.6198]], **tkwargs
)
state_dict["outcome_transform.means"] = torch.tensor(
[[0.0842, 0.2685]] if multi_output else [[0.0842]], **tkwargs
)
state_dict["outcome_transform.stdvs"] = torch.tensor(
[[1.0757, 1.0005]] if multi_output else [[1.0757]], **tkwargs
)
state_dict["outcome_transform._stdvs_sq"] = torch.tensor(
[[1.1572, 1.0010]] if multi_output else [[1.1572]], **tkwargs
)
else:
intf = None
octf = None
model = SingleTaskGP(
train_X, train_Y, outcome_transform=octf, input_transform=intf
).eval()
model.load_state_dict(state_dict, strict=False)
return model, train_X, train_Y
class TestGPDraw(BotorchTestCase):
def test_gp_draw_single_output(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model, _, _ = _get_model(**tkwargs)
mean = model.mean_module.raw_constant.detach().clone()
gp = GPDraw(model)
# test initialization
self.assertIsNone(gp.Xs)
self.assertIsNone(gp.Ys)
self.assertIsNotNone(gp._seed)
# make sure model is actually deepcopied
model.mean_module.constant = float("inf")
self.assertTrue(torch.equal(gp._model.mean_module.raw_constant, mean))
# test basic functionality
test_X1 = torch.rand(1, 1, **tkwargs, requires_grad=True)
Y1 = gp(test_X1)
self.assertEqual(Y1.shape, torch.Size([1, 1]))
Y1.backward()
self.assertIsNotNone(test_X1.grad)
initial_base_samples = gp._base_samples
with torch.no_grad():
Y2 = gp(torch.rand(1, 1, **tkwargs))
self.assertEqual(Y2.shape, torch.Size([1, 1]))
new_base_samples = gp._base_samples
self.assertTrue(
torch.equal(initial_base_samples, new_base_samples[..., :1, :])
)
# evaluate in batch mode (need a new model for this!)
model, _, _ = _get_model(**tkwargs)
gp = GPDraw(model)
with torch.no_grad():
Y_batch = gp(torch.rand(2, 1, 1, **tkwargs))
self.assertEqual(Y_batch.shape, torch.Size([2, 1, 1]))
# test random seed
test_X = torch.rand(1, 1, **tkwargs)
model, _, _ = _get_model(**tkwargs)
gp_a = GPDraw(model=model, seed=0)
self.assertEqual(int(gp_a._seed), 0)
with torch.no_grad():
Ya = gp_a(test_X)
self.assertEqual(int(gp_a._seed), 1)
model, _, _ = _get_model(**tkwargs)
gp_b = GPDraw(model=model, seed=0)
with torch.no_grad():
Yb = gp_b(test_X)
self.assertAlmostEqual(Ya, Yb)
def test_gp_draw_multi_output(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model, _, _ = _get_model(**tkwargs, multi_output=True)
mean = model.mean_module.raw_constant.detach().clone()
gp = GPDraw(model)
# test initialization
self.assertIsNone(gp.Xs)
self.assertIsNone(gp.Ys)
# make sure model is actually deepcopied
model.mean_module.constant = float("inf")
self.assertTrue(torch.equal(gp._model.mean_module.raw_constant, mean))
# test basic functionality
test_X1 = torch.rand(1, 1, **tkwargs, requires_grad=True)
Y1 = gp(test_X1)
self.assertEqual(Y1.shape, torch.Size([1, 2]))
Y1[:, 1].backward()
self.assertIsNotNone(test_X1.grad)
initial_base_samples = gp._base_samples
with torch.no_grad():
Y2 = gp(torch.rand(1, 1, **tkwargs))
self.assertEqual(Y2.shape, torch.Size([1, 2]))
new_base_samples = gp._base_samples
self.assertTrue(
torch.equal(initial_base_samples, new_base_samples[..., :1, :])
)
# evaluate in batch mode (need a new model for this!)
model = model, _, _ = _get_model(**tkwargs, multi_output=True)
gp = GPDraw(model)
with torch.no_grad():
Y_batch = gp(torch.rand(2, 1, 1, **tkwargs))
self.assertEqual(Y_batch.shape, torch.Size([2, 1, 2]))
class TestRandomFourierFeatures(BotorchTestCase):
def test_random_fourier_features(self):
# test kernel that is not Scale, RBF, or Matern
with self.assertRaises(NotImplementedError):
RandomFourierFeatures(
kernel=PeriodicKernel(),
input_dim=2,
num_rff_features=3,
)
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
# test init
# test ScaleKernel
base_kernel = RBFKernel(ard_num_dims=2)
kernel = ScaleKernel(base_kernel).to(**tkwargs)
rff = RandomFourierFeatures(
kernel=kernel,
input_dim=2,
num_rff_features=3,
)
self.assertTrue(torch.equal(rff.outputscale, kernel.outputscale))
# check that rff makes a copy
self.assertFalse(rff.outputscale is kernel.outputscale)
self.assertTrue(torch.equal(rff.lengthscale, base_kernel.lengthscale))
# check that rff makes a copy
self.assertFalse(rff.lengthscale is kernel.lengthscale)
for sample_shape in [torch.Size(), torch.Size([5])]:
# test not ScaleKernel
rff = RandomFourierFeatures(
kernel=base_kernel,
input_dim=2,
num_rff_features=3,
sample_shape=sample_shape,
)
self.assertTrue(
torch.equal(rff.outputscale, torch.tensor(1, **tkwargs))
)
self.assertTrue(torch.equal(rff.lengthscale, base_kernel.lengthscale))
# check that rff makes a copy
self.assertFalse(rff.lengthscale is kernel.lengthscale)
self.assertEqual(rff.weights.shape, torch.Size([*sample_shape, 2, 3]))
self.assertEqual(rff.bias.shape, torch.Size([*sample_shape, 3]))
self.assertTrue(((rff.bias <= 2 * pi) & (rff.bias >= 0.0)).all())
# test forward
for sample_shape in [torch.Size(), torch.Size([7])]:
rff = RandomFourierFeatures(
kernel=kernel,
input_dim=2,
num_rff_features=3,
sample_shape=sample_shape,
)
for input_batch_shape in [torch.Size([]), torch.Size([5])]:
X = torch.rand(*input_batch_shape, *sample_shape, 1, 2, **tkwargs)
Y = rff(X)
self.assertTrue(
Y.shape, torch.Size([*input_batch_shape, *sample_shape, 1, 1])
)
_constant = torch.sqrt(2 * rff.outputscale / rff.weights.shape[-1])
_arg_to_cos = X / base_kernel.lengthscale @ rff.weights
_bias_expanded = rff.bias.unsqueeze(-2)
expected_Y = _constant * (torch.cos(_arg_to_cos + _bias_expanded))
self.assertAllClose(Y, expected_Y)
# test get_weights
for sample_shape in [torch.Size(), torch.Size([5])]:
with mock.patch("torch.randn", wraps=torch.randn) as mock_randn:
rff._get_weights(
base_kernel=base_kernel,
input_dim=2,
num_rff_features=3,
sample_shape=sample_shape,
)
mock_randn.assert_called_once_with(
*sample_shape,
2,
3,
dtype=base_kernel.lengthscale.dtype,
device=base_kernel.lengthscale.device,
)
# test get_weights with Matern kernel
with mock.patch(
"torch.randn", wraps=torch.randn
) as mock_randn, mock.patch(
"torch.distributions.Gamma", wraps=torch.distributions.Gamma
) as mock_gamma:
base_kernel = MaternKernel(ard_num_dims=2).to(**tkwargs)
rff._get_weights(
base_kernel=base_kernel,
input_dim=2,
num_rff_features=3,
sample_shape=sample_shape,
)
mock_randn.assert_called_once_with(
*sample_shape,
2,
3,
dtype=base_kernel.lengthscale.dtype,
device=base_kernel.lengthscale.device,
)
mock_gamma.assert_called_once_with(
base_kernel.nu,
base_kernel.nu,
)
def test_get_deterministic_model(self):
tkwargs = {"device": self.device}
# test is known to be non-flaky for each of these seeds
torch.manual_seed(torch.randint(10, torch.Size([])).item())
for dtype, m in product((torch.float, torch.double), (1, 2)):
tkwargs["dtype"] = dtype
use_model_list_vals = [False]
if m == 2:
use_model_list_vals.append(True)
for use_model_list in use_model_list_vals:
weights = []
bases = []
get_model = get_deterministic_model
if use_model_list:
get_model = get_deterministic_model_list
for i in range(m):
num_rff = 2 * (i + 2)
weights.append(torch.rand(num_rff, **tkwargs))
kernel = ScaleKernel(RBFKernel(ard_num_dims=2)).to(**tkwargs)
kernel.outputscale = 0.3 + torch.rand(1, **tkwargs).view(
kernel.outputscale.shape
)
kernel.base_kernel.lengthscale = 0.3 + torch.rand(
2, **tkwargs
).view(kernel.base_kernel.lengthscale.shape)
bases.append(
RandomFourierFeatures(
kernel=kernel,
input_dim=2,
num_rff_features=num_rff,
)
)
model = get_model(weights=weights, bases=bases)
self.assertIsInstance(
model, DeterministicModel if not use_model_list else ModelList
)
self.assertEqual(model.num_outputs, m)
for batch_shape in (torch.Size([]), torch.Size([3])):
X = torch.rand(*batch_shape, 1, 2, **tkwargs)
Y = model.posterior(X).mean
expected_Y = torch.stack(
[basis(X) @ w for w, basis in zip(weights, bases)], dim=-1
)
self.assertAllClose(Y, expected_Y, atol=1e-7, rtol=2e-5)
self.assertEqual(Y.shape, torch.Size([*batch_shape, 1, m]))
def test_get_deterministic_model_multi_samples(self):
tkwargs = {"device": self.device}
n_samples = 5
for dtype, m, batch_shape_w, batch_shape_x in (
(torch.float, 1, torch.Size([]), torch.Size([])),
(torch.double, 2, torch.Size([]), torch.Size([3])),
(torch.double, 1, torch.Size([3]), torch.Size([3])),
(torch.float, 2, torch.Size([3]), torch.Size([5, 3])),
):
tkwargs["dtype"] = dtype
with self.subTest(
dtype=dtype,
m=m,
batch_shape_w=batch_shape_w,
batch_shape_x=batch_shape_x,
):
weights = []
bases = []
for i in range(m):
num_rff = 2 * (i + 2)
# we require weights to be of shape
# `n_samples x (batch_shape) x num_rff`
weights.append(
torch.rand(*batch_shape_w, n_samples, num_rff, **tkwargs)
)
kernel = ScaleKernel(RBFKernel(ard_num_dims=2)).to(**tkwargs)
kernel.outputscale = 0.3 + torch.rand(1, **tkwargs).view(
kernel.outputscale.shape
)
kernel.base_kernel.lengthscale = 0.3 + torch.rand(
2, **tkwargs
).view(kernel.base_kernel.lengthscale.shape)
bases.append(
RandomFourierFeatures(
kernel=kernel,
input_dim=2,
num_rff_features=num_rff,
sample_shape=torch.Size([n_samples]),
)
)
model = get_deterministic_model_multi_samples(
weights=weights, bases=bases
)
self.assertIsInstance(model, DeterministicModel)
self.assertEqual(model.num_outputs, m)
X = torch.rand(*batch_shape_x, n_samples, 1, 2, **tkwargs)
Y = model(X)
for i in range(m):
expected_Yi = (bases[i](X) @ weights[i].unsqueeze(-1)).squeeze(-1)
self.assertAllClose(Y[..., i], expected_Yi)
self.assertEqual(
Y.shape,
torch.Size([*batch_shape_x, n_samples, 1, m]),
)
def test_get_weights_posterior(self):
tkwargs = {"device": self.device}
sigma = 0.01
input_dim = 2
for dtype, input_batch_shape, sample_shape in (
(torch.float, torch.Size(), torch.Size()),
(torch.double, torch.Size(), torch.Size([5])),
(torch.float, torch.Size([3]), torch.Size()),
(torch.double, torch.Size([3]), torch.Size([5])),
):
with self.subTest(
dype=dtype,
input_batch_shape=input_batch_shape,
sample_shape=sample_shape,
):
tkwargs["dtype"] = dtype
X = torch.rand(*input_batch_shape, 40, input_dim, **tkwargs)
w = torch.rand(*sample_shape, input_dim, **tkwargs)
# We have to share each sample of weights with the X.
# Therefore, the effective size of w is
# (sample_shape) x (input_batch_shape) x input_dim.
for _ in range(len(input_batch_shape)):
w.unsqueeze_(-2)
w = w.expand(*sample_shape, *input_batch_shape, input_dim)
Y_true = (X @ w.unsqueeze(-1)).squeeze(-1)
Y = Y_true + sigma * torch.randn_like(Y_true)
posterior = get_weights_posterior(X=X, y=Y, sigma_sq=sigma**2)
self.assertIsInstance(posterior, MultivariateNormal)
self.assertAllClose(w, posterior.mean, atol=1e-1)
w_samp = posterior.sample()
self.assertEqual(w_samp.shape, w.shape)
def test_get_gp_samples(self):
# test multi-task model
with torch.random.fork_rng():
torch.manual_seed(0)
X = torch.stack([torch.rand(3), torch.tensor([1.0, 0.0, 1.0])], dim=-1)
Y = torch.rand(3, 1)
with self.assertRaises(NotImplementedError):
gp_samples = get_gp_samples(
model=MultiTaskGP(X, Y, task_feature=1),
num_outputs=1,
n_samples=20,
num_rff_features=512,
)
tkwargs = {"device": self.device}
for dtype, m, use_tf, use_batch_model, batched_inputs, n_samples in (
(torch.float, 1, True, False, False, 20),
(torch.float, 1, False, True, False, 20),
(torch.float, 1, False, False, True, 20),
(torch.double, 2, False, True, False, 10),
(torch.double, 2, True, False, False, 30),
):
with self.subTest(
dtype=dtype,
m=m,
use_tf=use_tf,
use_batch_model=use_batch_model,
batched_inputs=batched_inputs,
n_samples=n_samples,
):
tkwargs["dtype"] = dtype
model, X, Y = _get_model(
**tkwargs,
multi_output=m == 2,
use_transforms=use_tf,
batched_inputs=batched_inputs,
)
with torch.random.fork_rng():
torch.manual_seed(0)
gp_samples = get_gp_samples(
model=batched_to_model_list(model)
if ((not use_batch_model) and (m > 1))
else model,
num_outputs=m,
n_samples=n_samples,
num_rff_features=512,
)
samples = gp_samples.posterior(X).mean
self.assertEqual(samples.shape[0], n_samples)
if batched_inputs:
self.assertEqual(samples.shape[1], 2)
self.assertIsInstance(
gp_samples,
ModelList
if ((not use_batch_model) and (m > 1))
else DeterministicModel,
)
Y_hat_rff = samples.mean(dim=0)
with torch.no_grad():
Y_hat = model.posterior(X).mean
self.assertAllClose(Y_hat_rff, Y_hat, atol=5e-1)
# test batched evaluation
test_X = torch.randn(13, n_samples, 3, X.shape[-1], **tkwargs)
if batched_inputs:
test_X = test_X.unsqueeze(-3)
expected_shape = torch.Size([13, n_samples, 2, 3, m])
else:
expected_shape = torch.Size([13, n_samples, 3, m])
Y_batched = gp_samples.posterior(test_X).mean
self.assertEqual(Y_batched.shape, expected_shape)
if use_tf:
# check transforms on sample
if isinstance(gp_samples, DeterministicModel):
self.assertEqual(
model.outcome_transform, gp_samples.outcome_transform
)
self.assertEqual(
model.input_transform, gp_samples.input_transform
)
elif isinstance(gp_samples, ModelList):
model_list = batched_to_model_list(model)
for i in range(model_list.num_outputs):
self.assertTrue(
torch.equal(
model_list.models[i].outcome_transform.means,
gp_samples.models[i].outcome_transform.means,
)
)
self.assertTrue(
torch.equal(
model_list.models[i].outcome_transform.stdvs,
gp_samples.models[i].outcome_transform.stdvs,
)
)
self.assertEqual(
model_list.models[i].input_transform,
gp_samples.models[i].input_transform,
)
# test incorrect batch shape check
with self.assertRaises(ValueError):
gp_samples.posterior(
torch.randn(13, 23, 3, X.shape[-1], **tkwargs)
).mean
# test single sample
means = []
with torch.random.fork_rng():
torch.manual_seed(28)
for _ in range(10):
gp_samples = get_gp_samples(
model=batched_to_model_list(model)
if ((not use_batch_model) and (m > 1))
else model,
num_outputs=m,
n_samples=1,
num_rff_features=512,
)
with torch.no_grad():
means.append(model.posterior(X).mean)
samples = gp_samples.posterior(X).mean
self.assertEqual(samples.shape[:-1], X.shape[:-1])
self.assertIsInstance(gp_samples, ModelList) if (
(not use_batch_model) and (m > 1)
) else DeterministicModel
Y_hat_rff = torch.stack(means, dim=0).mean(dim=0)
with torch.no_grad():
Y_hat = model.posterior(X).mean
self.assertAllClose(Y_hat_rff, Y_hat, atol=5e-1)
# test batched evaluation
test_X = torch.randn(13, 5, 3, X.shape[-1], **tkwargs)
if batched_inputs:
test_X = test_X.unsqueeze(-3)
expected = torch.Size([13, 5, 2, 3, m])
else:
expected = torch.Size([13, 5, 3, m])
Y_batched = gp_samples.posterior(test_X).mean
self.assertEqual(Y_batched.shape, expected)
def test_with_fixed_noise(self):
for n_samples in (1, 20):
gp_samples = get_gp_samples(
model=FixedNoiseGP(
torch.rand(5, 3, dtype=torch.double),
torch.randn(5, 1, dtype=torch.double),
torch.rand(5, 1, dtype=torch.double) * 0.1,
),
num_outputs=1,
n_samples=n_samples,
)
samples = gp_samples(torch.rand(2, 3))
expected_shape = (
torch.Size([2, 1]) if n_samples == 1 else torch.Size([n_samples, 2, 1])
)
self.assertEqual(samples.shape, expected_shape)
def test_with_saas_models(self):
# Construct a SAAS model.
tkwargs = {"dtype": torch.double, "device": self.device}
num_samples = 4
model = SaasFullyBayesianSingleTaskGP(
train_X=torch.rand(10, 4, **tkwargs), train_Y=torch.randn(10, 1, **tkwargs)
)
mcmc_samples = {
"lengthscale": torch.rand(num_samples, 1, 4, **tkwargs),
"outputscale": torch.rand(num_samples, **tkwargs),
"mean": torch.randn(num_samples, **tkwargs),
"noise": torch.rand(num_samples, 1, **tkwargs),
}
model.load_mcmc_samples(mcmc_samples)
# Test proper setup & sampling support.
gp_samples = get_gp_samples(
model=model,
num_outputs=1,
n_samples=1,
)
self.assertTrue(is_fully_bayesian(gp_samples))
# Non-batch evaluation.
samples = gp_samples(torch.rand(2, 4, **tkwargs))
self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
# Batch evaluation.
samples = gp_samples(torch.rand(5, 2, 4, **tkwargs))
self.assertEqual(samples.shape, torch.Size([5, 4, 2, 1]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
import torch
from botorch.models import (
GenericDeterministicModel,
ModelList,
ModelListGP,
SaasFullyBayesianSingleTaskGP,
SingleTaskGP,
)
from botorch.models.model import Model
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from botorch.utils.transforms import (
_verify_output_shape,
concatenate_pending_points,
is_fully_bayesian,
match_batch_shape,
normalize,
normalize_indices,
standardize,
t_batch_mode_transform,
unnormalize,
)
from torch import Tensor
class TestStandardize(BotorchTestCase):
def test_standardize(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
Y = torch.tensor([0.0, 0.0], **tkwargs)
self.assertTrue(torch.equal(Y, standardize(Y)))
Y2 = torch.tensor([0.0, 1.0, 1.0, 1.0], **tkwargs)
expected_Y2_stdized = torch.tensor([-1.5, 0.5, 0.5, 0.5], **tkwargs)
self.assertTrue(torch.equal(expected_Y2_stdized, standardize(Y2)))
Y3 = torch.tensor(
[[0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], **tkwargs
).transpose(1, 0)
Y3_stdized = standardize(Y3)
self.assertTrue(torch.equal(Y3_stdized[:, 0], expected_Y2_stdized))
self.assertTrue(torch.equal(Y3_stdized[:, 1], torch.zeros(4, **tkwargs)))
Y4 = torch.cat([Y3, Y2.unsqueeze(-1)], dim=-1)
Y4_stdized = standardize(Y4)
self.assertTrue(torch.equal(Y4_stdized[:, 0], expected_Y2_stdized))
self.assertTrue(torch.equal(Y4_stdized[:, 1], torch.zeros(4, **tkwargs)))
self.assertTrue(torch.equal(Y4_stdized[:, 2], expected_Y2_stdized))
class TestNormalizeAndUnnormalize(BotorchTestCase):
def test_normalize_unnormalize(self):
for dtype in (torch.float, torch.double):
X = torch.tensor([0.0, 0.25, 0.5], device=self.device, dtype=dtype).view(
-1, 1
)
expected_X_normalized = torch.tensor(
[0.0, 0.5, 1.0], device=self.device, dtype=dtype
).view(-1, 1)
bounds = torch.tensor([0.0, 0.5], device=self.device, dtype=dtype).view(
-1, 1
)
X_normalized = normalize(X, bounds=bounds)
self.assertTrue(torch.equal(expected_X_normalized, X_normalized))
self.assertTrue(torch.equal(X, unnormalize(X_normalized, bounds=bounds)))
X2 = torch.tensor(
[[0.25, 0.125, 0.0], [0.25, 0.0, 0.5]], device=self.device, dtype=dtype
).transpose(1, 0)
expected_X2_normalized = torch.tensor(
[[1.0, 0.5, 0.0], [0.5, 0.0, 1.0]], device=self.device, dtype=dtype
).transpose(1, 0)
bounds2 = torch.tensor(
[[0.0, 0.0], [0.25, 0.5]], device=self.device, dtype=dtype
)
X2_normalized = normalize(X2, bounds=bounds2)
self.assertTrue(torch.equal(X2_normalized, expected_X2_normalized))
self.assertTrue(torch.equal(X2, unnormalize(X2_normalized, bounds=bounds2)))
class BMIMTestClass(BotorchTestCase):
@t_batch_mode_transform(assert_output_shape=False)
def q_method(self, X: Tensor) -> None:
return X
@t_batch_mode_transform(expected_q=1, assert_output_shape=False)
def q1_method(self, X: Tensor) -> None:
return X
@t_batch_mode_transform(assert_output_shape=False)
def kw_method(self, X: Tensor, dummy_arg: Any = None):
self.assertIsNotNone(dummy_arg)
return X
@t_batch_mode_transform(assert_output_shape=True)
def wrong_shape_method(self, X: Tensor):
return X
@t_batch_mode_transform(assert_output_shape=True)
def correct_shape_method(self, X: Tensor):
return X.mean(dim=(-1, -2)).squeeze(-1)
@concatenate_pending_points
def dummy_method(self, X: Tensor) -> Tensor:
return X
@t_batch_mode_transform(assert_output_shape=True)
def broadcast_batch_shape_method(self, X: Tensor):
return X.mean(dim=(-1, -2)).repeat(2, *[1] * (X.dim() - 2))
class NotSoAbstractBaseModel(Model):
def posterior(self, X, output_indices, observation_noise, **kwargs):
pass
@property
def batch_shape(self) -> torch.Size():
if hasattr(self, "_batch_shape"):
return self._batch_shape
else:
return super().batch_shape
class TestBatchModeTransform(BotorchTestCase):
def test_verify_output_shape(self):
# output shape matching t-batch shape of X
self.assertTrue(
_verify_output_shape(acqf=None, X=torch.ones(3, 2, 1), output=torch.ones(3))
)
# output shape is [], t-batch shape of X is [1]
X = torch.ones(1, 1, 1)
self.assertTrue(_verify_output_shape(acqf=None, X=X, output=torch.tensor(1)))
# shape mismatch and cls does not have model attribute
acqf = BMIMTestClass()
with self.assertWarns(RuntimeWarning):
self.assertTrue(_verify_output_shape(acqf=acqf, X=X, output=X))
# shape mismatch and cls.model does not define batch shape
acqf.model = NotSoAbstractBaseModel()
with self.assertWarns(RuntimeWarning):
self.assertTrue(_verify_output_shape(acqf=acqf, X=X, output=X))
# Output matches model batch shape.
acqf.model._batch_shape = torch.Size([3, 5])
self.assertTrue(_verify_output_shape(acqf=acqf, X=X, output=torch.empty(3, 5)))
# Output has additional dimensions beyond model batch shape.
for X_batch in [(2, 3, 5), (2, 1, 5), (2, 1, 1)]:
self.assertTrue(
_verify_output_shape(
acqf=acqf,
X=torch.empty(*X_batch, 1, 1),
output=torch.empty(2, 3, 5),
)
)
def test_t_batch_mode_transform(self):
c = BMIMTestClass()
# test with q != 1
# non-batch
X = torch.rand(3, 2)
Xout = c.q_method(X)
self.assertTrue(torch.equal(Xout, X.unsqueeze(0)))
# test with expected_q = 1
with self.assertRaises(AssertionError):
c.q1_method(X)
# batch
X = X.unsqueeze(0)
Xout = c.q_method(X)
self.assertTrue(torch.equal(Xout, X))
# test with expected_q = 1
with self.assertRaises(AssertionError):
c.q1_method(X)
# test with q = 1
X = torch.rand(1, 2)
Xout = c.q_method(X)
self.assertTrue(torch.equal(Xout, X.unsqueeze(0)))
# test with expected_q = 1
Xout = c.q1_method(X)
self.assertTrue(torch.equal(Xout, X.unsqueeze(0)))
# batch
X = X.unsqueeze(0)
Xout = c.q_method(X)
self.assertTrue(torch.equal(Xout, X))
# test with expected_q = 1
Xout = c.q1_method(X)
self.assertTrue(torch.equal(Xout, X))
# test single-dim
X = torch.zeros(1)
with self.assertRaises(ValueError):
c.q_method(X)
# test with kwargs
X = torch.rand(1, 2)
with self.assertRaises(AssertionError):
c.kw_method(X)
Xout = c.kw_method(X, dummy_arg=5)
self.assertTrue(torch.equal(Xout, X.unsqueeze(0)))
# test assert_output_shape
X = torch.rand(5, 1, 2)
with self.assertWarns(RuntimeWarning):
c.wrong_shape_method(X)
Xout = c.correct_shape_method(X)
self.assertEqual(Xout.shape, X.shape[:-2])
# test when output shape is torch.Size()
Xout = c.correct_shape_method(torch.rand(1, 2))
self.assertEqual(Xout.shape, torch.Size())
# test with model batch shape
c.model = MockModel(MockPosterior(mean=X))
with self.assertRaises(AssertionError):
c.broadcast_batch_shape_method(X)
c.model = MockModel(MockPosterior(mean=X.repeat(2, *[1] * X.dim())))
Xout = c.broadcast_batch_shape_method(X)
self.assertEqual(Xout.shape, c.model.batch_shape)
# test with non-tensor argument
X = ((3, 4), {"foo": True})
Xout = c.q_method(X)
self.assertEqual(X, Xout)
class TestConcatenatePendingPoints(BotorchTestCase):
def test_concatenate_pending_points(self):
c = BMIMTestClass()
# test if no pending points
c.X_pending = None
X = torch.rand(1, 2)
self.assertTrue(torch.equal(c.dummy_method(X), X))
# basic test
X_pending = torch.rand(2, 2)
c.X_pending = X_pending
X_expected = torch.cat([X, X_pending], dim=-2)
self.assertTrue(torch.equal(c.dummy_method(X), X_expected))
# batch test
X = torch.rand(2, 1, 2)
X_expected = torch.cat([X, X_pending.expand(2, 2, 2)], dim=-2)
self.assertTrue(torch.equal(c.dummy_method(X), X_expected))
class TestMatchBatchShape(BotorchTestCase):
def test_match_batch_shape(self):
X = torch.rand(3, 2)
Y = torch.rand(1, 3, 2)
X_tf = match_batch_shape(X, Y)
self.assertTrue(torch.equal(X_tf, X.unsqueeze(0)))
X = torch.rand(1, 3, 2)
Y = torch.rand(2, 3, 2)
X_tf = match_batch_shape(X, Y)
self.assertTrue(torch.equal(X_tf, X.repeat(2, 1, 1)))
X = torch.rand(2, 3, 2)
Y = torch.rand(1, 3, 2)
with self.assertRaises(RuntimeError):
match_batch_shape(X, Y)
def test_match_batch_shape_multi_dim(self):
X = torch.rand(1, 3, 2)
Y = torch.rand(5, 4, 3, 2)
X_tf = match_batch_shape(X, Y)
self.assertTrue(torch.equal(X_tf, X.expand(5, 4, 3, 2)))
X = torch.rand(4, 3, 2)
Y = torch.rand(5, 4, 3, 2)
X_tf = match_batch_shape(X, Y)
self.assertTrue(torch.equal(X_tf, X.repeat(5, 1, 1, 1)))
X = torch.rand(2, 1, 3, 2)
Y = torch.rand(2, 4, 3, 2)
X_tf = match_batch_shape(X, Y)
self.assertTrue(torch.equal(X_tf, X.repeat(1, 4, 1, 1)))
X = torch.rand(4, 2, 3, 2)
Y = torch.rand(4, 3, 3, 2)
with self.assertRaises(RuntimeError):
match_batch_shape(X, Y)
class TorchNormalizeIndices(BotorchTestCase):
def test_normalize_indices(self):
self.assertIsNone(normalize_indices(None, 3))
indices = [0, 2]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, indices)
nlzd_indices = normalize_indices(indices, 4)
self.assertEqual(nlzd_indices, indices)
indices = [0, -1]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, [0, 2])
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([3], 3)
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([-4], 3)
class TestIsFullyBayesian(BotorchTestCase):
def test_is_fully_bayesian(self):
X, Y = torch.rand(3, 2), torch.randn(3, 1)
saas = SaasFullyBayesianSingleTaskGP(train_X=X, train_Y=Y)
vanilla_gp = SingleTaskGP(train_X=X, train_Y=Y)
deterministic = GenericDeterministicModel(f=lambda x: x)
# Single model
self.assertTrue(is_fully_bayesian(model=saas))
self.assertFalse(is_fully_bayesian(model=vanilla_gp))
self.assertFalse(is_fully_bayesian(model=deterministic))
# ModelListGP
self.assertTrue(is_fully_bayesian(model=ModelListGP(saas, saas)))
self.assertTrue(is_fully_bayesian(model=ModelListGP(saas, vanilla_gp)))
self.assertFalse(is_fully_bayesian(model=ModelListGP(vanilla_gp, vanilla_gp)))
# ModelList
self.assertTrue(is_fully_bayesian(model=ModelList(saas, saas)))
self.assertTrue(is_fully_bayesian(model=ModelList(saas, deterministic)))
self.assertFalse(is_fully_bayesian(model=ModelList(vanilla_gp, deterministic)))
# Nested ModelList
self.assertTrue(is_fully_bayesian(model=ModelList(ModelList(saas), saas)))
self.assertTrue(
is_fully_bayesian(model=ModelList(ModelList(saas), deterministic))
)
self.assertFalse(
is_fully_bayesian(model=ModelList(ModelList(vanilla_gp), deterministic))
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from contextlib import redirect_stdout
from inspect import getsource, getsourcefile
from io import StringIO
from itertools import product
from unittest.mock import patch
from botorch.utils.dispatcher import Dispatcher, MDNotImplementedError
from botorch.utils.testing import BotorchTestCase
def _helper_test_source(val):
"""Helper method for testing `Dispatcher._source`."""
... # pragma: nocover
class TestDispatcher(BotorchTestCase):
def setUp(self):
super().setUp()
self.dispatcher = Dispatcher(name="test")
def test_encoder(self):
self.assertEqual((int, str, list), self.dispatcher.encode_args((1, "a", [])))
with patch.object(self.dispatcher, "_encoder", str.upper):
self.assertEqual(("A", "B"), self.dispatcher.encode_args(("a", "b")))
def test_getitem(self):
with patch.dict(self.dispatcher.funcs, {}):
self.dispatcher.add(signature=(int, str), func=lambda *_: None)
args = 0, "a"
types = self.dispatcher.encode_args(args)
with self.assertRaisesRegex(RuntimeError, "One of `args` or `types`"):
self.dispatcher.__getitem__(args=None, types=None)
with self.assertRaisesRegex(RuntimeError, "Only one of `args` or `types`"):
self.dispatcher.__getitem__(args=args, types=types)
self.assertEqual(
self.dispatcher[args], self.dispatcher.__getitem__(args=args)
)
self.assertEqual(
self.dispatcher[args], self.dispatcher.__getitem__(types=types)
)
def test_register(self):
signature = (int, float), (int, float)
with patch.dict(self.dispatcher.funcs, {}):
@self.dispatcher.register(*signature)
def _pow(a: int, b: int):
return a**b
for type_a, type_b in product(*signature):
args = type_a(2), type_b(3)
self.assertEqual(self.dispatcher[args], _pow)
retval = self.dispatcher(*args)
test_type = float if (type_a is float or type_b is float) else int
self.assertIs(type(retval), test_type)
self.assertEqual(retval, test_type(8))
def test_notImplemented(self):
with self.assertRaisesRegex(NotImplementedError, "Could not find signature"):
self.dispatcher[0]
with self.assertRaisesRegex(NotImplementedError, "Could not find signature"):
self.dispatcher(0)
def test_inheritance(self):
IntSubclass = type("IntSubclass", (int,), {})
with patch.dict(self.dispatcher.funcs, {}):
self.dispatcher.add(signature=(int,), func=lambda val: -val)
self.assertEqual(self.dispatcher(IntSubclass(1)), -1)
def test_MDNotImplementedError(self):
Parent = type("Parent", (int,), {})
Child = type("Child", (Parent,), {})
with patch.dict(self.dispatcher.funcs, {}):
@self.dispatcher.register(Parent)
def _method_parent(val) -> str:
if val < 0:
raise MDNotImplementedError # defer to nothing
return "parent"
@self.dispatcher.register(Child)
def _method_child(val) -> str:
if val % 2:
return "child"
raise MDNotImplementedError # defer to parent
self.assertEqual(self.dispatcher(Child(1)), "child")
self.assertEqual(self.dispatcher(Child(2)), "parent")
self.assertEqual(self.dispatcher(Child(-1)), "child")
with self.assertRaisesRegex(NotImplementedError, "none completed"):
self.dispatcher(Child(-2))
def test_help(self):
with patch.dict(self.dispatcher.funcs, {}):
@self.dispatcher.register(int)
def _method(val) -> None:
"""docstring"""
... # pragma: nocover
self.assertEqual(self.dispatcher._help(0), "docstring")
with redirect_stdout(StringIO()) as buffer:
self.dispatcher.help(0)
self.assertEqual(buffer.getvalue().rstrip(), "docstring")
def test_source(self):
source = (
f"File: {getsourcefile(_helper_test_source)}"
f"\n\n{getsource(_helper_test_source)}"
)
with patch.dict(self.dispatcher.funcs, {}):
self.dispatcher.add(signature=(int,), func=_helper_test_source)
self.assertEqual(self.dispatcher._source(0), source)
with redirect_stdout(StringIO()) as buffer:
self.dispatcher.source(0)
# buffer.getvalue() has two newlines at the end, one due to `print`
self.assertEqual(buffer.getvalue()[:-1], source)
with self.assertRaisesRegex(TypeError, "No function found"):
self.dispatcher._source(0.5)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import itertools
import warnings
from typing import Any, Dict, Type
from unittest import mock
import numpy as np
import torch
from botorch.exceptions.errors import BotorchError
from botorch.models import FixedNoiseGP
from botorch.sampling.pathwise import draw_matheron_paths
from botorch.utils.sampling import (
_convert_bounds_to_inequality_constraints,
batched_multinomial,
DelaunayPolytopeSampler,
draw_sobol_samples,
find_interior_point,
get_polytope_samples,
HitAndRunPolytopeSampler,
manual_seed,
normalize_linear_constraints,
optimize_posterior_samples,
PolytopeSampler,
sample_hypersphere,
sample_simplex,
sparse_to_dense_constraints,
)
from botorch.utils.testing import BotorchTestCase
class TestManualSeed(BotorchTestCase):
def test_manual_seed(self):
initial_state = torch.random.get_rng_state()
with manual_seed():
self.assertTrue(torch.all(torch.random.get_rng_state() == initial_state))
with manual_seed(1234):
self.assertFalse(torch.all(torch.random.get_rng_state() == initial_state))
self.assertTrue(torch.all(torch.random.get_rng_state() == initial_state))
class TestSampleUtils(BotorchTestCase):
def test_draw_sobol_samples(self):
batch_shapes = [None, [3, 5], torch.Size([2]), (5, 3, 2, 3), []]
for d, q, n, batch_shape, seed, dtype in itertools.product(
(1, 3),
(1, 2),
(2, 5),
batch_shapes,
(None, 1234),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
bounds = torch.stack([torch.rand(d), 1 + torch.rand(d)]).to(**tkwargs)
samples = draw_sobol_samples(
bounds=bounds, n=n, q=q, batch_shape=batch_shape, seed=seed
)
batch_shape = batch_shape or torch.Size()
self.assertEqual(samples.shape, torch.Size([n, *batch_shape, q, d]))
self.assertTrue(torch.all(samples >= bounds[0]))
self.assertTrue(torch.all(samples <= bounds[1]))
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.dtype, dtype)
def test_sample_simplex(self):
for d, n, qmc, seed, dtype in itertools.product(
(1, 2, 3), (2, 5), (False, True), (None, 1234), (torch.float, torch.double)
):
samples = sample_simplex(
d=d, n=n, qmc=qmc, seed=seed, device=self.device, dtype=dtype
)
self.assertEqual(samples.shape, torch.Size([n, d]))
self.assertTrue(torch.all(samples >= 0))
self.assertTrue(torch.all(samples <= 1))
self.assertTrue(torch.max((samples.sum(dim=-1) - 1).abs()) < 1e-5)
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.dtype, dtype)
def test_sample_hypersphere(self):
for d, n, qmc, seed, dtype in itertools.product(
(1, 2, 3), (2, 5), (False, True), (None, 1234), (torch.float, torch.double)
):
samples = sample_hypersphere(
d=d, n=n, qmc=qmc, seed=seed, device=self.device, dtype=dtype
)
self.assertEqual(samples.shape, torch.Size([n, d]))
self.assertTrue(torch.max((samples.pow(2).sum(dim=-1) - 1).abs()) < 1e-5)
self.assertEqual(samples.device.type, self.device.type)
self.assertEqual(samples.dtype, dtype)
def test_batched_multinomial(self):
num_categories = 5
num_samples = 4
Trulse = (True, False)
for batch_shape, dtype, replacement, use_gen, use_out in itertools.product(
([], [3], [2, 3]), (torch.float, torch.double), Trulse, Trulse, Trulse
):
weights = torch.rand(*batch_shape, num_categories, dtype=dtype)
out = None
if use_out:
out = torch.empty(*batch_shape, num_samples, dtype=torch.long)
samples = batched_multinomial(
weights,
num_samples,
replacement=replacement,
generator=torch.Generator() if use_gen else None,
out=out,
)
self.assertEqual(samples.shape, torch.Size([*batch_shape, num_samples]))
if use_out:
self.assertTrue(torch.equal(samples, out))
if not replacement:
for s in samples.view(-1, num_samples):
self.assertTrue(torch.unique(s).size(0), num_samples)
def test_convert_bounds_to_inequality_constraints(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# test basic case with no indefinite bounds
lower_bounds = torch.rand(3, **tkwargs)
upper_bounds = torch.rand_like(lower_bounds) + lower_bounds
bounds = torch.stack([lower_bounds, upper_bounds], dim=0)
A, b = _convert_bounds_to_inequality_constraints(bounds=bounds)
identity = torch.eye(3, **tkwargs)
self.assertTrue(torch.equal(A[:3], -identity))
self.assertTrue(torch.equal(A[3:], identity))
self.assertTrue(torch.equal(b[:3], -bounds[:1].t()))
self.assertTrue(torch.equal(b[3:], bounds[1:].t()))
# test filtering of indefinite bounds
inf = float("inf")
bounds = torch.tensor(
[[-3.0, -inf, -inf], [inf, 2.0, inf]],
**tkwargs,
)
A, b = _convert_bounds_to_inequality_constraints(bounds=bounds)
A_xpct = torch.tensor([[-1.0, -0.0, -0.0], [0.0, 1.0, 0.0]], **tkwargs)
b_xpct = torch.tensor([[3.0], [2.0]], **tkwargs)
self.assertTrue(torch.equal(A, A_xpct))
self.assertTrue(torch.equal(b, b_xpct))
def test_sparse_to_dense_constraints(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
inequality_constraints = [
(
torch.tensor([3], **tkwargs),
torch.tensor([4], **tkwargs),
3,
)
]
(A, b) = sparse_to_dense_constraints(
d=4, constraints=inequality_constraints
)
expected_A = torch.tensor([[0.0, 0.0, 0.0, 4.0]], **tkwargs)
self.assertTrue(torch.equal(A, expected_A))
expected_b = torch.tensor([[3.0]], **tkwargs)
self.assertTrue(torch.equal(b, expected_b))
def test_normalize_linear_constraints(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
constraints = [
(
torch.tensor([1, 2, 0], dtype=torch.int64, device=self.device),
torch.tensor([1.0, 1.0, 1.0], **tkwargs),
1.0,
)
]
bounds = torch.tensor(
[[0.1, 0.3, 0.1, 30.0], [0.6, 0.7, 0.7, 700.0]], **tkwargs
)
new_constraints = normalize_linear_constraints(bounds, constraints)
expected_coefficients = torch.tensor([0.4000, 0.6000, 0.5000], **tkwargs)
self.assertTrue(
torch.allclose(new_constraints[0][1], expected_coefficients)
)
expected_rhs = 0.5
self.assertAlmostEqual(new_constraints[0][-1], expected_rhs)
def test_normalize_linear_constraints_wrong_dtype(self):
for dtype in (torch.float, torch.double):
with self.subTest(dtype=dtype):
tkwargs = {"device": self.device, "dtype": dtype}
constraints = [
(
torch.ones(3, dtype=torch.float, device=self.device),
torch.ones(3, **tkwargs),
1.0,
)
]
bounds = torch.zeros(2, 4, **tkwargs)
msg = "tensors used as indices must be long, byte or bool tensors"
with self.assertRaises(IndexError, msg=msg):
normalize_linear_constraints(bounds, constraints)
def test_find_interior_point(self):
# basic problem: 1 <= x_1 <= 2, 2 <= x_2 <= 3
A = np.concatenate([np.eye(2), -np.eye(2)], axis=0)
b = np.array([2.0, 3.0, -1.0, -2.0])
x = find_interior_point(A=A, b=b)
self.assertTrue(np.allclose(x, np.array([1.5, 2.5])))
# problem w/ negatives variables: -2 <= x_1 <= -1, -3 <= x_2 <= -2
b = np.array([-1.0, -2.0, 2.0, 3.0])
x = find_interior_point(A=A, b=b)
self.assertTrue(np.allclose(x, np.array([-1.5, -2.5])))
# problem with bound on a single variable: x_1 <= 0
A = np.array([[1.0, 0.0]])
b = np.zeros(1)
x = find_interior_point(A=A, b=b)
self.assertLessEqual(x[0].item(), 0.0)
# unbounded problem: x >= 3
A = np.array([[-1.0]])
b = np.array([-3.0])
x = find_interior_point(A=A, b=b)
self.assertAlmostEqual(x.item(), 5.0, places=4)
def test_get_polytope_samples(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
bounds = torch.zeros(2, 4, **tkwargs)
bounds[1] = 1
inequality_constraints = [
(
torch.tensor([3], dtype=torch.int64, device=self.device),
torch.tensor([-4], **tkwargs),
-3,
)
]
equality_constraints = [
(
torch.tensor([0], dtype=torch.int64, device=self.device),
torch.tensor([1], **tkwargs),
0.5,
)
]
dense_equality_constraints = sparse_to_dense_constraints(
d=4, constraints=equality_constraints
)
with manual_seed(0):
samps = get_polytope_samples(
n=5,
bounds=bounds,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
seed=0,
thinning=3,
n_burnin=2,
)
(A, b) = sparse_to_dense_constraints(
d=4, constraints=inequality_constraints
)
dense_inequality_constraints = (-A, -b)
with manual_seed(0):
expected_samps = HitAndRunPolytopeSampler(
bounds=bounds,
inequality_constraints=dense_inequality_constraints,
equality_constraints=dense_equality_constraints,
n_burnin=2,
).draw(15, seed=0)[::3]
self.assertTrue(torch.equal(samps, expected_samps))
# test no equality constraints
with manual_seed(0):
samps = get_polytope_samples(
n=5,
bounds=bounds,
inequality_constraints=inequality_constraints,
seed=0,
thinning=3,
n_burnin=2,
)
with manual_seed(0):
expected_samps = HitAndRunPolytopeSampler(
bounds=bounds,
inequality_constraints=dense_inequality_constraints,
n_burnin=2,
).draw(15, seed=0)[::3]
self.assertTrue(torch.equal(samps, expected_samps))
# test no inequality constraints
with manual_seed(0):
samps = get_polytope_samples(
n=5,
bounds=bounds,
equality_constraints=equality_constraints,
seed=0,
thinning=3,
n_burnin=2,
)
with manual_seed(0):
expected_samps = HitAndRunPolytopeSampler(
bounds=bounds,
equality_constraints=dense_equality_constraints,
n_burnin=2,
).draw(15, seed=0)[::3]
self.assertTrue(torch.equal(samps, expected_samps))
class PolytopeSamplerTestBase:
sampler_class: Type[PolytopeSampler]
sampler_kwargs: Dict[str, Any] = {}
def setUp(self):
super().setUp()
self.bounds = torch.zeros(2, 3, device=self.device)
self.bounds[1] = 1
self.A = torch.tensor(
[
[-1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 4.0, 1.0],
],
device=self.device,
)
self.b = torch.tensor([[0.0], [1.0], [0.0], [0.0], [1.0]], device=self.device)
self.x0 = torch.tensor([0.1, 0.1, 0.1], device=self.device).unsqueeze(-1)
def test_sample_polytope(self):
for dtype in (torch.float, torch.double):
A = self.A.to(dtype)
b = self.b.to(dtype)
x0 = self.x0.to(dtype)
bounds = self.bounds.to(dtype)
for interior_point in [x0, None]:
sampler = self.sampler_class(
inequality_constraints=(A, b),
bounds=bounds,
interior_point=interior_point,
**self.sampler_kwargs,
)
samples = sampler.draw(n=10, seed=1)
self.assertEqual(((A @ samples.t() - b) > 0).sum().item(), 0)
self.assertTrue((samples <= bounds[1]).all())
self.assertTrue((samples >= bounds[0]).all())
# make sure we can draw mulitple samples
more_samples = sampler.draw(n=5)
self.assertEqual(((A @ more_samples.t() - b) > 0).sum().item(), 0)
self.assertTrue((more_samples <= bounds[1]).all())
self.assertTrue((more_samples >= bounds[0]).all())
def test_sample_polytope_with_seed(self):
for dtype in (torch.float, torch.double):
A = self.A.to(dtype)
b = self.b.to(dtype)
x0 = self.x0.to(dtype)
bounds = self.bounds.to(dtype)
for interior_point in [x0, None]:
sampler1 = self.sampler_class(
inequality_constraints=(A, b),
bounds=bounds,
interior_point=interior_point,
**self.sampler_kwargs,
)
sampler2 = self.sampler_class(
inequality_constraints=(A, b),
bounds=bounds,
interior_point=interior_point,
**self.sampler_kwargs,
)
samples1 = sampler1.draw(n=10, seed=42)
samples2 = sampler2.draw(n=10, seed=42)
self.assertTrue(torch.allclose(samples1, samples2))
def test_sample_polytope_with_eq_constraints(self):
for dtype in (torch.float, torch.double):
A = self.A.to(dtype)
b = self.b.to(dtype)
x0 = self.x0.to(dtype)
bounds = self.bounds.to(dtype)
C = torch.tensor([[1.0, -1, 0.0]], device=self.device, dtype=dtype)
d = torch.zeros(1, 1, device=self.device, dtype=dtype)
for interior_point in [x0, None]:
sampler = self.sampler_class(
inequality_constraints=(A, b),
equality_constraints=(C, d),
bounds=bounds,
interior_point=interior_point,
**self.sampler_kwargs,
)
samples = sampler.draw(n=10, seed=1)
inequality_satisfied = ((A @ samples.t() - b) > 0).sum().item() == 0
equality_satisfied = (C @ samples.t() - d).abs().sum().item() < 1e-6
self.assertTrue(inequality_satisfied)
self.assertTrue(equality_satisfied)
self.assertTrue((samples <= bounds[1]).all())
self.assertTrue((samples >= bounds[0]).all())
# test no inequality constraints
sampler = self.sampler_class(
equality_constraints=(C, d),
bounds=bounds,
interior_point=interior_point,
**self.sampler_kwargs,
)
samples = sampler.draw(n=10, seed=1)
equality_satisfied = (C @ samples.t() - d).abs().sum().item() < 1e-6
self.assertTrue(equality_satisfied)
self.assertTrue((samples <= bounds[1]).all())
self.assertTrue((samples >= bounds[0]).all())
# test no inequality constraints or bounds
with self.assertRaises(BotorchError):
self.sampler_class(
equality_constraints=(C, d),
interior_point=interior_point,
**self.sampler_kwargs,
)
def test_sample_polytope_1d(self):
for dtype in (torch.float, torch.double):
A = torch.tensor(
[[-1.0, 0.0], [0.0, -1.0], [1.0, 1.0]], device=self.device, dtype=dtype
)
b = torch.tensor([[0.0], [0.0], [1.0]], device=self.device, dtype=dtype)
C = torch.tensor([[1.0, -1.0]], device=self.device, dtype=dtype)
x0 = torch.tensor([[0.1], [0.1]], device=self.device, dtype=dtype)
C = torch.tensor([[1.0, -1.0]], device=self.device, dtype=dtype)
d = torch.tensor([[0.0]], device=self.device, dtype=dtype)
bounds = self.bounds[:, :2].to(dtype=dtype)
for interior_point in [x0, None]:
sampler = self.sampler_class(
inequality_constraints=(A, b),
equality_constraints=(C, d),
bounds=bounds,
interior_point=interior_point,
**self.sampler_kwargs,
)
samples = sampler.draw(n=10, seed=1)
inequality_satisfied = ((A @ samples.t() - b) > 0).sum().item() == 0
equality_satisfied = (C @ samples.t() - d).abs().sum().item() < 1e-6
self.assertTrue(inequality_satisfied)
self.assertTrue(equality_satisfied)
self.assertTrue((samples <= bounds[1]).all())
self.assertTrue((samples >= bounds[0]).all())
def test_initial_point(self):
for dtype in (torch.float, torch.double):
A = torch.tensor(
[[0.0, -1.0, 0.0], [0.0, -1.0, 0.0], [0.0, 4.0, 0.0]],
device=self.device,
dtype=dtype,
)
b = torch.tensor([[0.0], [-1.0], [1.0]], device=self.device, dtype=dtype)
x0 = self.x0.to(dtype)
# testing for infeasibility of the initial point and
# infeasibility of the original LP (status 2 of the linprog output).
for interior_point in [x0, None]:
with self.assertRaises(ValueError):
self.sampler_class(
inequality_constraints=(A, b), interior_point=interior_point
)
class Result:
status = 1
message = "mock status 1"
# testing for only status 1 of the LP
with mock.patch("scipy.optimize.linprog") as mock_linprog:
mock_linprog.return_value = Result()
with self.assertRaises(ValueError):
self.sampler_class(inequality_constraints=(A, b))
class TestHitAndRunPolytopeSampler(PolytopeSamplerTestBase, BotorchTestCase):
sampler_class = HitAndRunPolytopeSampler
sampler_kwargs = {"n_burnin": 2}
class TestDelaunayPolytopeSampler(PolytopeSamplerTestBase, BotorchTestCase):
sampler_class = DelaunayPolytopeSampler
def test_sample_polytope_unbounded(self):
A = torch.tensor(
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [0.0, 4.0, 1.0]],
device=self.device,
)
b = torch.tensor([[0.0], [0.0], [0.0], [1.0]], device=self.device)
with self.assertRaises(ValueError):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.sampler_class(
inequality_constraints=(A, b),
interior_point=self.x0,
**self.sampler_kwargs,
)
class TestOptimizePosteriorSamples(BotorchTestCase):
def test_optimize_posterior_samples(self):
# Restrict the random seed to prevent flaky failures.
seed = torch.randint(high=5, size=(1,)).item()
torch.manual_seed(seed)
dims = 2
dtype = torch.float64
eps = 1e-6
for_testing_speed_kwargs = {"raw_samples": 512, "num_restarts": 10}
nums_optima = (1, 7)
batch_shapes = ((), (3,), (5, 2))
for num_optima, batch_shape in itertools.product(nums_optima, batch_shapes):
bounds = torch.tensor([[0, 1]] * dims, dtype=dtype).T
X = torch.rand(*batch_shape, 13, dims, dtype=dtype)
Y = torch.pow(X - 0.5, 2).sum(dim=-1, keepdim=True)
# having a noiseless model all but guarantees that the found optima
# will be better than the observations
model = FixedNoiseGP(X, Y, torch.full_like(Y, eps))
paths = draw_matheron_paths(
model=model, sample_shape=torch.Size([num_optima])
)
X_opt, f_opt = optimize_posterior_samples(
paths, bounds, **for_testing_speed_kwargs
)
correct_X_shape = (num_optima,) + batch_shape + (dims,)
correct_f_shape = (num_optima,) + batch_shape + (1,)
self.assertEqual(X_opt.shape, correct_X_shape)
self.assertEqual(f_opt.shape, correct_f_shape)
self.assertTrue(torch.all(X_opt >= bounds[0]))
self.assertTrue(torch.all(X_opt <= bounds[1]))
# Check that the all found optima are larger than the observations
# This is not 100% deterministic, but just about.
self.assertTrue(torch.all((f_opt > Y.max(dim=-2).values)))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import patch
import torch
from botorch.utils import constants
from botorch.utils.testing import BotorchTestCase
class TestConstants(BotorchTestCase):
def test_get_constants(self):
tkwargs = {"device": self.device, "dtype": torch.float16}
const = constants.get_constants(0.123, **tkwargs)
self.assertEqual(const, 0.123)
self.assertEqual(const.device.type, tkwargs["device"].type)
self.assertEqual(const.dtype, tkwargs["dtype"])
try: # test in-place modification
const.add_(1)
const2 = constants.get_constants(0.123, **tkwargs)
self.assertEqual(const2, 1.123)
finally:
const.sub_(1)
# Test fetching of multiple constants
const_tuple = constants.get_constants(values=(0, 1, 2), **tkwargs)
self.assertIsInstance(const_tuple, tuple)
self.assertEqual(len(const_tuple), 3)
for i, const in enumerate(const_tuple):
self.assertEqual(const, i)
def test_get_constants_like(self):
def mock_get_constants(values: torch.Tensor, **kwargs):
return kwargs
tkwargs = {"device": self.device, "dtype": torch.float16}
with patch.object(constants, "get_constants", new=mock_get_constants):
ref = torch.tensor([123], **tkwargs)
other = constants.get_constants_like(0.123, ref=ref)
self.assertEqual(other["device"].type, tkwargs["device"].type)
self.assertEqual(other["dtype"], tkwargs["dtype"])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestMock(BotorchTestCase):
def test_MockPosterior(self):
# test basic logic
mp = MockPosterior()
self.assertEqual(mp.device.type, "cpu")
self.assertEqual(mp.dtype, torch.float32)
self.assertEqual(mp._extended_shape(), torch.Size())
self.assertEqual(
MockPosterior(variance=torch.rand(2))._extended_shape(), torch.Size([2])
)
# test passing in tensors
mean = torch.rand(2)
variance = torch.eye(2)
samples = torch.rand(1, 2)
mp = MockPosterior(mean=mean, variance=variance, samples=samples)
self.assertEqual(mp.device.type, "cpu")
self.assertEqual(mp.dtype, torch.float32)
self.assertTrue(torch.equal(mp.mean, mean))
self.assertTrue(torch.equal(mp.variance, variance))
self.assertTrue(torch.all(mp.rsample() == samples.unsqueeze(0)))
self.assertTrue(
torch.all(mp.rsample(torch.Size([2])) == samples.repeat(2, 1, 1))
)
with self.assertRaises(RuntimeError):
mp.rsample(sample_shape=torch.Size([2]), base_samples=torch.rand(3))
def test_MockModel(self):
mp = MockPosterior()
mm = MockModel(mp)
X = torch.empty(0)
self.assertEqual(mm.posterior(X), mp)
self.assertEqual(mm.num_outputs, 0)
mm.state_dict()
mm.load_state_dict()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from functools import partial
from itertools import count
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
from unittest.mock import patch
import torch
from botorch.utils.probability.linalg import PivotedCholesky
from botorch.utils.probability.mvnxpb import MVNXPB
from botorch.utils.testing import BotorchTestCase
from linear_operator.utils.errors import NotPSDError
from torch import Tensor
def run_gaussian_estimator(
estimator: Callable[[Tensor], Tuple[Tensor, Union[Tensor, float, int]]],
sqrt_cov: Tensor,
num_samples: int,
batch_limit: Optional[int] = None,
seed: Optional[int] = None,
) -> Tensor:
if batch_limit is None:
batch_limit = num_samples
ndim = sqrt_cov.shape[-1]
tkwargs = {"dtype": sqrt_cov.dtype, "device": sqrt_cov.device}
counter = 0
numerator = 0
denominator = 0
with torch.random.fork_rng():
if seed:
torch.random.manual_seed(seed)
while counter < num_samples:
batch_size = min(batch_limit, num_samples - counter)
samples = torch.tensordot(
torch.randn(batch_size, ndim, **tkwargs),
sqrt_cov,
dims=([1], [-1]),
)
batch_numerator, batch_denominator = estimator(samples)
counter = counter + batch_size
numerator = numerator + batch_numerator
denominator = denominator + batch_denominator
return numerator / denominator, denominator
class TestMVNXPB(BotorchTestCase):
def setUp(
self,
ndims: Sequence[int] = (4, 8),
batch_shape: Sequence[int] = (4,),
bound_range: Tuple[float, float] = (-5.0, 5.0),
mc_num_samples: int = 100000,
mc_batch_limit: int = 10000,
mc_atol_multiplier: float = 4.0,
seed: int = 1,
dtype: torch.dtype = torch.float64,
):
super().setUp()
self.dtype = dtype
self.seed_generator = count(seed)
self.mc_num_samples = mc_num_samples
self.mc_batch_limit = mc_batch_limit
self.mc_atol_multiplier = mc_atol_multiplier
self.bounds = []
self.sqrt_covariances = []
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
for n in ndims:
self.bounds.append(self.gen_bounds(n, batch_shape, bound_range))
self.sqrt_covariances.append(
self.gen_covariances(n, batch_shape, as_sqrt=True)
)
# Create a toy MVNXPB instance for API testing
tril = torch.rand([4, 2, 3, 3], **self.tkwargs)
diag = torch.rand([4, 2, 3], **self.tkwargs)
perm = torch.stack([torch.randperm(3) for _ in range(8)])
perm = perm.reshape(4, 2, 3).to(**self.tkwargs)
self.toy_solver = MVNXPB.build(
step=0,
perm=perm.clone(),
bounds=torch.rand(4, 2, 3, 2, **self.tkwargs).cumsum(dim=-1),
piv_chol=PivotedCholesky(tril=tril, perm=perm, diag=diag, step=0),
plug_ins=torch.randn(4, 2, 3, **self.tkwargs),
log_prob=torch.rand(4, 2, **self.tkwargs),
)
def gen_covariances(
self,
ndim: int,
batch_shape: Sequence[int] = (),
as_sqrt: bool = False,
) -> Tensor:
shape = tuple(batch_shape) + (ndim, ndim)
eigvals = -torch.rand(shape[:-1], **self.tkwargs).log() # exponential rvs
orthmat = torch.linalg.svd(torch.randn(shape, **self.tkwargs)).U
sqrt_covar = orthmat * torch.sqrt(eigvals).unsqueeze(-2)
return sqrt_covar if as_sqrt else sqrt_covar @ sqrt_covar.transpose(-2, -1)
def gen_bounds(
self,
ndim: int,
batch_shape: Sequence[int] = (),
bound_range: Optional[Tuple[float, float]] = None,
) -> Tuple[Tensor, Tensor]:
shape = tuple(batch_shape) + (ndim,)
lower = torch.rand(shape, **self.tkwargs)
upper = lower + (1 - lower) * torch.rand_like(lower)
if bound_range is not None:
lower = bound_range[0] + (bound_range[1] - bound_range[0]) * lower
upper = bound_range[0] + (bound_range[1] - bound_range[0]) * upper
return torch.stack([lower, upper], dim=-1)
@property
def tkwargs(self) -> Dict[str, Any]:
return {"dtype": self.dtype, "device": self.device}
def assertEqualMXNBPB(self, A: MVNXPB, B: MVNXPB):
for key, a in A.asdict().items():
b = getattr(B, key)
if isinstance(a, PivotedCholesky):
continue
elif isinstance(a, torch.Tensor):
self.assertTrue(a.allclose(b, equal_nan=True))
else:
self.assertEqual(a, b)
for key in ("perm", "tril", "diag"):
a = getattr(A.piv_chol, key)
b = getattr(B.piv_chol, key)
self.assertTrue(a.allclose(b, equal_nan=True))
def test_solve(self):
r"""Monte Carlo unit test for `solve`."""
def _estimator(samples, bounds):
accept = torch.logical_and(
(samples > bounds[..., 0]).all(-1),
(samples < bounds[..., 1]).all(-1),
)
numerator = torch.count_nonzero(accept, dim=0).double()
denominator = len(samples)
return numerator, denominator
for sqrt_cov, bounds in zip(self.sqrt_covariances, self.bounds):
estimates, _ = run_gaussian_estimator(
estimator=partial(_estimator, bounds=bounds),
sqrt_cov=sqrt_cov,
num_samples=self.mc_num_samples,
batch_limit=self.mc_batch_limit,
seed=next(self.seed_generator),
)
cov = sqrt_cov @ sqrt_cov.transpose(-2, -1)
solver = MVNXPB(cov, bounds)
solver.solve()
atol = self.mc_atol_multiplier * (self.mc_num_samples**-0.5)
for est, prob in zip(estimates, solver.log_prob.exp()):
if est == 0.0:
continue
self.assertAllClose(est, prob, rtol=0, atol=atol)
def test_augment(self):
r"""Test `augment`."""
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
# Pick a set of subproblems at random
index = torch.randint(
low=0,
high=len(self.sqrt_covariances),
size=(),
device=self.device,
)
sqrt_cov = self.sqrt_covariances[index]
cov = sqrt_cov @ sqrt_cov.transpose(-2, -1)
bounds = self.bounds[index]
# Partially solve for `N`-dimensional integral
N = cov.shape[-1]
n = torch.randint(low=1, high=N - 2, size=())
full = MVNXPB(cov, bounds=bounds)
full.solve(num_steps=n)
# Compare with solver run using a pre-computed `piv_chol`
_perm = torch.arange(0, N, device=self.device)
other = MVNXPB.build(
step=0,
perm=_perm.expand(*cov.shape[:-2], N).clone(),
bounds=cov.diagonal(dim1=-2, dim2=-1).rsqrt().unsqueeze(-1)
* bounds.clone(),
piv_chol=full.piv_chol,
plug_ins=full.plug_ins,
log_prob=torch.zeros_like(full.log_prob),
)
other.solve(num_steps=n)
self.assertTrue(full.perm.equal(other.perm))
self.assertTrue(full.bounds.allclose(other.bounds))
self.assertTrue(full.log_prob.allclose(other.log_prob))
# Reorder terms according according to `full.perm`
perm = full.perm.detach().clone()
_cov = cov.gather(-2, perm.unsqueeze(-1).repeat(1, 1, N))
_cov = _cov.gather(-1, perm.unsqueeze(-2).repeat(1, N, 1))
_istd = _cov.diagonal(dim1=-2, dim2=-1).rsqrt()
_bounds = bounds.gather(-2, perm.unsqueeze(-1).repeat(1, 1, 2))
# Solve for same `n`-dimensional integral as `full.solve(num_steps=n)`
init = MVNXPB(_cov[..., :n, :n], _bounds[..., :n, :])
init.solve()
# Augment solver with adaptive pivoting disabled
with patch.object(init.piv_chol, "diag", new=None):
_corr = _istd[..., n:, None] * _cov[..., n:, :] * _istd[:, None, :]
temp = init.augment(
covariance_matrix=_corr[..., n:],
cross_covariance_matrix=_corr[..., :n],
bounds=_istd[..., n:, None] * _bounds[..., n:, :],
disable_pivoting=True,
)
self.assertTrue(temp.piv_chol.diag[..., :n].eq(1).all())
self.assertEqual(temp.step, n)
self.assertEqual(temp.piv_chol.step, N)
self.assertTrue(temp.piv_chol.perm[..., n:].eq(_perm[n:]).all())
del temp
# Augment solver again, this time with pivoting enabled
augm = init.clone().augment(
covariance_matrix=_cov[..., n:, n:],
cross_covariance_matrix=_cov[..., n:, :n],
bounds=_bounds[..., n:, :],
)
# Patch `perm` to account for different starting points
augm_perm = augm.perm
temp_perm = perm.gather(-1, augm_perm)
self.assertTrue(augm_perm.equal(augm.piv_chol.perm))
with patch.object(augm, "perm", new=temp_perm), patch.object(
augm.piv_chol, "perm", new=temp_perm
):
self.assertEqualMXNBPB(full, augm)
# Run both solvers
augm.piv_chol = full.piv_chol.clone()
augm.piv_chol.perm = augm_perm.clone()
full.solve()
augm.solve()
# Patch `perm` to account for different starting points
augm_perm = augm.perm
temp_perm = perm.gather(-1, augm_perm)
self.assertTrue(augm_perm.equal(augm.piv_chol.perm))
with patch.object(augm, "perm", new=temp_perm), patch.object(
augm.piv_chol, "perm", new=temp_perm
):
self.assertEqualMXNBPB(full, augm)
# testing errors
fake_init = deepcopy(init)
fake_init.piv_chol.step = fake_init.perm.shape[-1] + 1
error_msg = "Augmentation of incomplete solutions not implemented yet."
with self.assertRaisesRegex(NotImplementedError, error_msg):
augm = fake_init.augment(
covariance_matrix=_cov[..., n:, n:],
cross_covariance_matrix=_cov[..., n:, :n],
bounds=_bounds[..., n:, :],
)
# Testing that solver will try to recover if it encounters
# a non-psd matrix, even if it ultimately fails in this case
error_msg = (
"Matrix not positive definite after repeatedly adding jitter up to.*"
)
with self.assertRaisesRegex(NotPSDError, error_msg):
fake_cov = torch.ones_like(_cov[..., n:, n:])
augm = init.augment(
covariance_matrix=fake_cov,
cross_covariance_matrix=_cov[..., n:, :n],
bounds=_bounds[..., n:, :],
)
def test_getitem(self):
with torch.random.fork_rng():
torch.random.manual_seed(1)
mask = torch.rand(self.toy_solver.log_prob.shape) > 0.5
other = self.toy_solver[mask]
for key, b in other.asdict().items():
a = getattr(self.toy_solver, key)
if isinstance(b, PivotedCholesky):
continue
elif isinstance(b, torch.Tensor):
self.assertTrue(a[mask].equal(b))
else:
self.assertEqual(a, b)
for key in ("perm", "tril", "diag"):
a = getattr(self.toy_solver.piv_chol, key)[mask]
b = getattr(other.piv_chol, key)
self.assertTrue(a.equal(b))
fake_solver = deepcopy(self.toy_solver)
fake_solver.log_prob_extra = torch.tensor([-1])
fake_solver_1 = fake_solver[:1]
self.assertEqual(fake_solver_1.log_prob_extra, fake_solver.log_prob_extra[:1])
def test_concat(self):
split = len(self.toy_solver.log_prob) // 2
A = self.toy_solver[:split]
B = self.toy_solver[split:]
other = A.concat(B, dim=0)
self.assertEqualMXNBPB(self.toy_solver, other)
# Test exception handling
with patch.object(A, "step", new=A.step + 1), self.assertRaisesRegex(
ValueError, "`self.step` does not equal `other.step`."
):
A.concat(B, dim=0)
with self.assertRaisesRegex(ValueError, "not a valid batch dimension"):
A.concat(B, dim=9)
with self.assertRaisesRegex(ValueError, "not a valid batch dimension"):
A.concat(B, dim=-9)
with patch.object(A, "plug_ins", new=None), self.assertRaisesRegex(
TypeError, "Concatenation failed: `self.plug_ins` has type"
):
A.concat(B, dim=0)
def test_clone(self):
self.toy_solver.bounds.requires_grad_(True)
try:
other = self.toy_solver.clone()
self.assertEqualMXNBPB(self.toy_solver, other)
for key, a in self.toy_solver.asdict().items():
if a is None or isinstance(a, int):
continue
b = getattr(other, key)
self.assertFalse(a is b)
other.bounds.sum().backward()
self.assertTrue(self.toy_solver.bounds.grad.eq(1).all())
finally:
self.toy_solver.bounds.requires_grad_(False)
def test_detach(self):
self.toy_solver.bounds.requires_grad_(True)
try:
other = self.toy_solver.detach()
self.assertEqualMXNBPB(self.toy_solver, other)
for key, a in self.toy_solver.asdict().items():
if a is None or isinstance(a, int):
continue
b = getattr(other, key)
self.assertFalse(a is b)
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.bounds.sum().backward()
finally:
self.toy_solver.bounds.requires_grad_(False)
def test_expand(self):
other = self.toy_solver.expand(2, 4, 2)
self.assertEqualMXNBPB(self.toy_solver, other[0])
self.assertEqualMXNBPB(self.toy_solver, other[1])
def test_asdict(self):
for key, val in self.toy_solver.asdict().items():
self.assertTrue(val is getattr(self.toy_solver, key))
def test_build(self):
other = MVNXPB.build(**self.toy_solver.asdict())
self.assertEqualMXNBPB(self.toy_solver, other)
def test_exceptions(self):
# in solve
fake_solver = deepcopy(self.toy_solver)
fake_solver.step = fake_solver.piv_chol.step + 1
error_msg = "Invalid state: solver ran ahead of matrix decomposition."
with self.assertRaises(ValueError, msg=error_msg):
fake_solver.solve()
# in _pivot
with self.assertRaises(ValueError):
pivot = torch.LongTensor([-1]) # this will not be used before the raise
fake_solver.pivot_(pivot)
error_msg = f"Expected `other` to be {type(fake_solver)} typed but was.*"
with self.assertRaisesRegex(TypeError, error_msg):
fake_solver.concat(1, 1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
import torch
from botorch.utils.probability.linalg import augment_cholesky, PivotedCholesky
from botorch.utils.testing import BotorchTestCase
class TestPivotedCholesky(BotorchTestCase):
def setUp(self):
super().setUp()
n = 5
with torch.random.fork_rng():
torch.random.manual_seed(0)
matrix = torch.randn(2, n, n)
matrix = matrix @ matrix.transpose(-1, -2)
diag = matrix.diagonal(dim1=-2, dim2=-1).sqrt()
idiag = diag.reciprocal().unsqueeze(-1)
piv_chol = PivotedCholesky(
step=0,
tril=(idiag * matrix * idiag.transpose(-2, -1)).tril(),
perm=torch.arange(n)[None].expand(len(matrix), n).contiguous(),
diag=diag.clone(),
)
self.diag = diag
self.matrix = matrix
self.piv_chol = piv_chol
self.piv_chol.update_()
self.piv_chol.pivot_(torch.tensor([2, 3]))
self.piv_chol.update_()
def test_update_(self):
# Construct permuted matrices A
n = self.matrix.shape[-1]
A = (1 / self.diag).unsqueeze(-1) * self.matrix * (1 / self.diag).unsqueeze(-2)
A = A.gather(-1, self.piv_chol.perm.unsqueeze(-2).repeat(1, n, 1))
A = A.gather(-2, self.piv_chol.perm.unsqueeze(-1).repeat(1, 1, n))
# Test upper left block
L = torch.linalg.cholesky(A[..., :2, :2])
self.assertTrue(L.allclose(self.piv_chol.tril[..., :2, :2]))
# Test lower left block
beta = torch.linalg.solve_triangular(L, A[..., :2:, 2:], upper=False)
self.assertTrue(
beta.transpose(-1, -2).allclose(self.piv_chol.tril[..., 2:, :2])
)
# Test lower right block
schur = A[..., 2:, 2:] - beta.transpose(-1, -2) @ beta
self.assertTrue(schur.tril().allclose(self.piv_chol.tril[..., 2:, 2:]))
def test_pivot_(self):
piv_chol = deepcopy(self.piv_chol)
self.assertEqual(piv_chol.perm.tolist(), [[0, 2, 1, 3, 4], [0, 3, 2, 1, 4]])
piv_chol.pivot_(torch.tensor([2, 3]))
self.assertEqual(piv_chol.perm.tolist(), [[0, 2, 1, 3, 4], [0, 3, 1, 2, 4]])
self.assertTrue(piv_chol.tril[0].equal(self.piv_chol.tril[0]))
error_msg = "Argument `pivot` does to match with batch shape`."
with self.assertRaisesRegex(ValueError, error_msg):
piv_chol.pivot_(torch.tensor([1, 2, 3]))
A = self.piv_chol.tril[1]
B = piv_chol.tril[1]
self.assertTrue(A[2:4, :2].equal(B[2:4, :2].roll(1, 0)))
self.assertTrue(A[4:, 2:4].equal(B[4:, 2:4].roll(1, 1)))
def test_concat(self):
A = self.piv_chol.expand(2, 2)
B = self.piv_chol.expand(1, 2)
C = B.concat(B, dim=0)
for key in ("tril", "perm", "diag"):
self.assertTrue(getattr(A, key).equal(getattr(C, key)))
B.step = A.step + 1
error_msg = "Cannot conncatenate decompositions at different steps."
with self.assertRaisesRegex(ValueError, error_msg):
A.concat(B, dim=0)
B.step = A.step
B.perm = None
error_msg = "Types of field perm do not match."
with self.assertRaisesRegex(NotImplementedError, error_msg):
A.concat(B, dim=0)
def test_clone(self):
self.piv_chol.diag.requires_grad_(True)
try:
other = self.piv_chol.clone()
for key in ("tril", "perm", "diag"):
a = getattr(self.piv_chol, key)
b = getattr(other, key)
self.assertTrue(a.equal(b))
self.assertFalse(a is b)
other.diag.sum().backward()
self.assertTrue(self.piv_chol.diag.grad.eq(1).all())
finally:
self.piv_chol.diag.requires_grad_(False)
def test_detach(self):
self.piv_chol.diag.requires_grad_(True)
try:
other = self.piv_chol.detach()
for key in ("tril", "perm", "diag"):
a = getattr(self.piv_chol, key)
b = getattr(other, key)
self.assertTrue(a.equal(b))
self.assertFalse(a is b)
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.diag.sum().backward()
finally:
self.piv_chol.diag.requires_grad_(False)
def test_expand(self):
other = self.piv_chol.expand(3, 2)
for key in ("tril", "perm", "diag"):
a = getattr(self.piv_chol, key)
b = getattr(other, key)
self.assertEqual(b.shape[: -a.ndim], (3,))
self.assertTrue(b._base is a)
def test_augment(self):
K = self.matrix
n = K.shape[-1]
m = n // 2
Kaa = K[:, 0:m, 0:m]
Laa = torch.linalg.cholesky(Kaa)
Kbb = K[:, m:, m:]
error_msg = "One and only one of `Kba` or `Lba` must be provided."
with self.assertRaisesRegex(ValueError, error_msg):
augment_cholesky(Laa, Kbb)
Kba = K[:, m:, 0:m]
L_augmented = augment_cholesky(Laa, Kbb, Kba)
L = torch.linalg.cholesky(K)
self.assertAllClose(L_augmented, L)
# with jitter
jitter = 3e-2
Laa = torch.linalg.cholesky(Kaa + jitter * torch.eye(m).unsqueeze(0))
L_augmented = augment_cholesky(Laa, Kbb, Kba, jitter=jitter)
L = torch.linalg.cholesky(K + jitter * torch.eye(n).unsqueeze(0))
self.assertAllClose(L_augmented, L)
def test_errors(self):
matrix = self.matrix
diag = self.diag
diag = matrix.diagonal(dim1=-2, dim2=-1).sqrt()
idiag = diag.reciprocal().unsqueeze(-1)
n = matrix.shape[-1]
# testing with erroneous inputs
wrong_matrix = matrix[..., 0]
error_msg = "Expected square matrices but `matrix` has shape.*"
with self.assertRaisesRegex(ValueError, error_msg):
PivotedCholesky(
step=0,
tril=wrong_matrix,
perm=torch.arange(n)[None].expand(len(matrix), n).contiguous(),
diag=diag.clone(),
validate_init=True,
)
wrong_perm = torch.arange(n)[None].expand(2 * len(matrix), n).contiguous()
error_msg = "`perm` of shape .* incompatible with `matrix` of shape .*"
with self.assertRaisesRegex(ValueError, error_msg):
PivotedCholesky(
step=0,
tril=(idiag * matrix * idiag.transpose(-2, -1)).tril(),
perm=wrong_perm,
diag=diag.clone(),
)
wrong_diag = torch.ones(2 * len(diag))
error_msg = "`diag` of shape .* incompatible with `matrix` of shape .*"
with self.assertRaises(ValueError, msg=error_msg):
PivotedCholesky(
step=0,
tril=(idiag * matrix * idiag.transpose(-2, -1)).tril(),
perm=torch.arange(n)[None].expand(len(matrix), n).contiguous(),
diag=wrong_diag,
)
# testing without validation, should pass,
# even though input does not have correct shape
piv_chol = PivotedCholesky(
step=0,
tril=matrix[..., 0],
perm=torch.arange(n)[None].expand(len(matrix), n).contiguous(),
diag=diag.clone(),
validate_init=False,
)
self.assertTrue(isinstance(piv_chol, PivotedCholesky))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.utils.probability import ndtr, utils
from botorch.utils.probability.utils import (
log_erfc,
log_erfcx,
log_ndtr,
log_phi,
log_prob_normal_in,
phi,
standard_normal_log_hazard,
)
from botorch.utils.testing import BotorchTestCase
from numpy.polynomial.legendre import leggauss as numpy_leggauss
class TestProbabilityUtils(BotorchTestCase):
def test_case_dispatcher(self):
with torch.random.fork_rng():
torch.random.manual_seed(0)
values = torch.rand([32])
# Test default case
output = utils.case_dispatcher(
out=torch.full_like(values, float("nan")),
default=lambda mask: 0,
)
self.assertTrue(output.eq(0).all())
# Test randomized value assignments
levels = 0.25, 0.5, 0.75
cases = [ # switching cases
(lambda level=level: values < level, lambda mask, i=i: i)
for i, level in enumerate(levels)
]
cases.append( # dummy case whose predicate is always False
(lambda: torch.full(values.shape, False), lambda mask: float("nan"))
)
output = utils.case_dispatcher(
out=torch.full_like(values, float("nan")),
cases=cases,
default=lambda mask: len(levels),
)
self.assertTrue(output.isfinite().all())
active = torch.full(values.shape, True)
for i, level in enumerate(levels):
mask = active & (values < level)
self.assertTrue(output[mask].eq(i).all())
active[mask] = False
self.assertTrue(~active.any() or output[active].eq(len(levels)).all())
# testing mask.all() branch
edge_cases = [
(lambda: torch.full(values.shape, True), lambda mask: float("nan"))
]
output = utils.case_dispatcher(
out=torch.full_like(values, float("nan")),
cases=edge_cases,
default=lambda mask: len(levels),
)
# testing if not active.any() branch
pred = torch.full(values.shape, True)
pred[0] = False
edge_cases = [
(lambda: pred, lambda mask: False),
(lambda: torch.full(values.shape, True), lambda mask: False),
]
output = utils.case_dispatcher(
out=torch.full_like(values, float("nan")),
cases=edge_cases,
default=lambda mask: len(levels),
)
def test_build_positional_indices(self):
with torch.random.fork_rng():
torch.random.manual_seed(0)
values = torch.rand(3, 2, 5)
for dim in (values.ndim, -values.ndim - 1):
with self.assertRaisesRegex(ValueError, r"dim=(-?\d+) invalid for shape"):
utils.build_positional_indices(shape=values.shape, dim=dim)
start = utils.build_positional_indices(shape=values.shape, dim=-2)
self.assertEqual(start.shape, values.shape[:-1])
self.assertTrue(start.remainder(values.shape[-1]).eq(0).all())
max_values, max_indices = values.max(dim=-1)
self.assertTrue(values.view(-1)[start + max_indices].equal(max_values))
def test_leggaus(self):
for a, b in zip(utils.leggauss(20, dtype=torch.float64), numpy_leggauss(20)):
self.assertEqual(a.dtype, torch.float64)
self.assertTrue((a.numpy() == b).all())
def test_swap_along_dim_(self):
with torch.random.fork_rng():
torch.random.manual_seed(0)
values = torch.rand(3, 2, 5)
start = utils.build_positional_indices(shape=values.shape, dim=-2)
min_values, i = values.min(dim=-1)
max_values, j = values.max(dim=-1)
out = utils.swap_along_dim_(values.clone(), i=i, j=j, dim=-1)
# Verify that positions of minimum and maximum values were swapped
for vec, min_val, min_idx, max_val, max_idx in zip(
out.view(-1, values.shape[-1]),
min_values.ravel(),
i.ravel(),
max_values.ravel(),
j.ravel(),
):
self.assertEqual(vec[min_idx], max_val)
self.assertEqual(vec[max_idx], min_val)
start = utils.build_positional_indices(shape=values.shape, dim=-2)
i_lidx = (start + i).ravel()
j_lidx = (start + j).ravel()
# Test passing in a pre-allocated copy buffer
temp = values.view(-1).clone()[i_lidx]
buff = torch.empty_like(temp)
out2 = utils.swap_along_dim_(values.clone(), i=i, j=j, dim=-1, buffer=buff)
self.assertTrue(out.equal(out2))
self.assertTrue(temp.equal(buff))
# Test homogeneous swaps
temp = utils.swap_along_dim_(values.clone(), i=0, j=2, dim=-1)
self.assertTrue(values[..., 0].equal(temp[..., 2]))
self.assertTrue(values[..., 2].equal(temp[..., 0]))
# Test exception handling
with self.assertRaisesRegex(ValueError, "Batch shapes of `i`"):
utils.swap_along_dim_(values, i=i.unsqueeze(-1), j=j, dim=-1)
with self.assertRaisesRegex(ValueError, "Batch shapes of `j`"):
utils.swap_along_dim_(values, i=i, j=j.unsqueeze(-1), dim=-1)
with self.assertRaisesRegex(ValueError, "at most 1-dimensional"):
utils.swap_along_dim_(values.view(-1), i=i, j=j_lidx, dim=0)
with self.assertRaisesRegex(ValueError, "at most 1-dimensional"):
utils.swap_along_dim_(values.view(-1), i=i_lidx, j=j, dim=0)
def test_gaussian_probabilities(self) -> None:
# test passes for each possible seed
torch.manual_seed(torch.randint(high=1000, size=(1,)))
# testing Gaussian probability functions
for dtype in (torch.float, torch.double):
rtol = 1e-12 if dtype == torch.double else 1e-6
atol = rtol
n = 16
x = 3 * torch.randn(n, device=self.device, dtype=dtype)
# first, test consistency between regular and log versions
self.assertAllClose(phi(x), log_phi(x).exp(), atol=atol, rtol=rtol)
self.assertAllClose(ndtr(x), log_ndtr(x).exp(), atol=atol, rtol=rtol)
# test correctness of log_erfc and log_erfcx
for special_f, custom_log_f in zip(
(torch.special.erfc, torch.special.erfcx), (log_erfc, log_erfcx)
):
with self.subTest(custom_log_f.__name__):
# first, testing for moderate values
n = 16
x = torch.rand(n, dtype=dtype, device=self.device)
x = torch.cat((-x, x))
x.requires_grad = True
custom_log_fx = custom_log_f(x)
special_log_fx = special_f(x).log()
self.assertAllClose(
custom_log_fx, special_log_fx, atol=atol, rtol=rtol
)
# testing backward passes
custom_log_fx.sum().backward()
x_grad = x.grad
x.grad[:] = 0
special_log_fx.sum().backward()
special_x_grad = x.grad
self.assertAllClose(x_grad, special_x_grad, atol=atol, rtol=rtol)
# testing robustness of log_erfc for large inputs
# large positive numbers are difficult for a naive implementation
x = torch.tensor(
[1e100 if dtype == torch.float64 else 1e10],
dtype=dtype,
device=self.device,
)
x = torch.cat((-x, x)) # looking at both tails
x.requires_grad = True
custom_log_fx = custom_log_f(x)
self.assertAllClose(
custom_log_fx.exp(),
special_f(x),
atol=atol,
rtol=rtol,
)
self.assertFalse(custom_log_fx.isnan().any())
self.assertFalse(custom_log_fx.isinf().any())
# we can't just take the log of erfc because the tail will be -inf
self.assertTrue(special_f(x).log().isinf().any())
# testing that gradients are usable floats
custom_log_fx.sum().backward()
self.assertFalse(x.grad.isnan().any())
self.assertFalse(x.grad.isinf().any())
# test limit behavior of log_ndtr
digits = 100 if dtype == torch.float64 else 20
# zero = torch.tensor([0], dtype=dtype, device=self.device)
ten = torch.tensor(10, dtype=dtype, device=self.device)
digits_tensor = torch.arange(0, digits, dtype=dtype, device=self.device)
# large negative values
x_large_neg = -(ten ** digits_tensor.flip(-1)) # goes from -1e100 to -1
x_large_pos = ten**digits_tensor # goes from 1 to 1e100
x = torch.cat((x_large_neg, x_large_pos))
x.requires_grad = True
torch_log_ndtr_x = torch.special.log_ndtr(x)
log_ndtr_x = log_ndtr(x)
self.assertTrue(
torch.allclose(log_ndtr_x, torch_log_ndtr_x, atol=atol, rtol=rtol)
)
# let's test gradients too
# first, note that the standard implementation exhibits numerical problems:
# 1) it contains -Inf for reasonable parameter ranges, and
# 2) the gradient is not strictly increasing, even ignoring Infs, and
# takes non-sensical values (i.e. ~4e-01 at x = -1e100 in single precision,
# and similar for some large negative x in double precision).
torch_log_ndtr_x = torch.special.log_ndtr(x)
torch_log_ndtr_x.sum().backward()
torch_grad = x.grad.clone()
self.assertTrue(torch_grad.isinf().any())
# in contrast, our implementation permits numerically accurate gradients
# throughout the testest range:
x.grad[:] = 0 # zero out gradient
log_ndtr_x.sum().backward()
grad = x.grad.clone()
# it does not contain Infs or NaNs
self.assertFalse(grad.isinf().any())
self.assertFalse(grad.isnan().any())
# gradients are non-negative everywhere (approach zero as x goes to inf)
self.assertTrue((grad[:digits] > 0).all())
self.assertTrue((grad[digits:] >= 0).all())
# gradients are strictly decreasing for x < 0
self.assertTrue((grad.diff()[:digits] < 0).all())
self.assertTrue((grad.diff()[digits:] <= 0).all())
n = 16
# first test is easiest: a < 0 < b
a = -5 / 2 * torch.rand(n, dtype=dtype, device=self.device) - 1 / 2
b = 5 / 2 * torch.rand(n, dtype=dtype, device=self.device) + 1 / 2
self.assertTrue(
torch.allclose(
log_prob_normal_in(a, b).exp(),
ndtr(b) - ndtr(a),
atol=atol,
rtol=rtol,
)
)
# 0 < a < b, uses the a < b < 0 under the hood
a = ten ** digits_tensor[:-1]
b = ten ** digits_tensor[-1]
a.requires_grad, b.requires_grad = True, True
log_prob = log_prob_normal_in(a, b)
self.assertTrue((log_prob < 0).all())
self.assertTrue((log_prob.diff() < 0).all())
# test gradients
log_prob.sum().backward()
# checking that both gradients give non-Inf, non-NaN results everywhere
self.assertFalse(a.grad.isinf().any())
self.assertFalse(a.grad.isnan().any())
self.assertFalse(b.grad.isinf().any())
self.assertFalse(b.grad.isnan().any())
# since the upper bound is satisfied, relevant gradients are in lower bound
self.assertTrue((a.grad.diff() < 0).all())
# testing error raising for invalid inputs
a = torch.randn(3, 4, dtype=dtype, device=self.device)
b = torch.randn(3, 4, dtype=dtype, device=self.device)
a[2, 3] = b[2, 3]
with self.assertRaisesRegex(
ValueError,
"Received input tensors a, b for which not all a < b.",
):
log_prob_normal_in(a, b)
# testing gaussian hazard function
n = 16
x = torch.rand(n, dtype=dtype, device=self.device)
x = torch.cat((-x, x))
log_hx = standard_normal_log_hazard(x)
expected_log_hx = log_phi(x) - log_ndtr(-x)
self.assertAllClose(
expected_log_hx,
log_hx,
atol=1e-8 if dtype == torch.double else 1e-7,
) # correctness
# NOTE: Could extend tests here similarly to log_erfc(x) tests above, but
# since the hazard functions are built on log_erfcx, not urgent.
float16_msg = (
"only supports torch.float32 and torch.float64 dtypes, but received "
"x.dtype = torch.float16."
)
with self.assertRaisesRegex(TypeError, expected_regex=float16_msg):
log_erfc(torch.tensor(1.0, dtype=torch.float16, device=self.device))
with self.assertRaisesRegex(TypeError, expected_regex=float16_msg):
log_ndtr(torch.tensor(1.0, dtype=torch.float16, device=self.device))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from itertools import count
from typing import Any, Dict, Optional, Sequence, Tuple
import torch
from botorch.utils.probability.mvnxpb import MVNXPB
from botorch.utils.probability.truncated_multivariate_normal import (
TruncatedMultivariateNormal,
)
from botorch.utils.probability.unified_skew_normal import UnifiedSkewNormal
from botorch.utils.testing import BotorchTestCase
from linear_operator.operators import DenseLinearOperator
from torch import Tensor
from torch.distributions import MultivariateNormal
from torch.special import ndtri
class TestUnifiedSkewNormal(BotorchTestCase):
def setUp(
self,
ndims: Sequence[Tuple[int, int]] = ((1, 1), (2, 3), (3, 2), (3, 3)),
lower_quantile_max: float = 0.9, # if these get too far into the tail, naive
upper_quantile_min: float = 0.1, # MC methods will not produce any samples.
num_log_probs: int = 4,
mc_num_samples: int = 100000,
mc_num_rsamples: int = 1000,
mc_atol_multiplier: float = 4.0,
seed: int = 1,
dtype: torch.dtype = torch.float64,
device: Optional[torch.device] = None,
):
super().setUp()
self.dtype = dtype
self.seed_generator = count(seed)
self.num_log_probs = num_log_probs
self.mc_num_samples = mc_num_samples
self.mc_num_rsamples = mc_num_rsamples
self.mc_atol_multiplier = mc_atol_multiplier
self.distributions = []
self.sqrt_covariances = []
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
for ndim_x, ndim_y in ndims:
ndim_xy = ndim_x + ndim_y
sqrt_covariance = self.gen_covariances(ndim_xy, as_sqrt=True)
covariance = sqrt_covariance @ sqrt_covariance.transpose(-1, -2)
loc_x = torch.randn(ndim_x, **self.tkwargs)
cov_x = covariance[:ndim_x, :ndim_x]
std_x = cov_x.diag().sqrt()
lb = lower_quantile_max * torch.rand(ndim_x, **self.tkwargs)
ub = lb.clip(min=upper_quantile_min) # scratch variable
ub = ub + (1 - ub) * torch.rand(ndim_x, **self.tkwargs)
bounds_x = loc_x.unsqueeze(-1) + std_x.unsqueeze(-1) * ndtri(
torch.stack([lb, ub], dim=-1)
)
xcov = covariance[:ndim_x, ndim_x:]
trunc = TruncatedMultivariateNormal(
loc=loc_x,
covariance_matrix=cov_x,
bounds=bounds_x,
validate_args=True,
)
gauss = MultivariateNormal(
loc=torch.randn(ndim_y, **self.tkwargs),
covariance_matrix=covariance[ndim_x:, ndim_x:],
)
self.sqrt_covariances.append(sqrt_covariance)
self.distributions.append(
UnifiedSkewNormal(
trunc=trunc, gauss=gauss, cross_covariance_matrix=xcov
)
)
@property
def tkwargs(self) -> Dict[str, Any]:
return {"dtype": self.dtype, "device": self.device}
def gen_covariances(
self,
ndim: int,
batch_shape: Sequence[int] = (),
as_sqrt: bool = False,
) -> Tensor:
shape = tuple(batch_shape) + (ndim, ndim)
eigvals = -torch.rand(shape[:-1], **self.tkwargs).log() # exponential rvs
orthmat = torch.linalg.svd(torch.randn(shape, **self.tkwargs)).U
sqrt_covar = orthmat * torch.sqrt(eigvals).unsqueeze(-2)
return sqrt_covar if as_sqrt else sqrt_covar @ sqrt_covar.transpose(-2, -1)
def test_log_prob(self):
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
for usn in self.distributions:
shape = torch.Size([self.num_log_probs])
vals = usn.gauss.rsample(sample_shape=shape)
# Manually compute log probabilities
alpha = torch.cholesky_solve(
usn.cross_covariance_matrix.T, usn.gauss.scale_tril
)
loc_condx = usn.trunc.loc + (vals - usn.gauss.loc) @ alpha
cov_condx = (
usn.trunc.covariance_matrix - usn.cross_covariance_matrix @ alpha
)
solver = MVNXPB(
covariance_matrix=cov_condx.repeat(self.num_log_probs, 1, 1),
bounds=usn.trunc.bounds - loc_condx.unsqueeze(-1),
)
log_probs = (
solver.solve() + usn.gauss.log_prob(vals) - usn.trunc.log_partition
)
# Compare with log probabilities returned by class
self.assertTrue(log_probs.allclose(usn.log_prob(vals)))
# checking error handling when incorrectly shaped value is passed
wrong_vals = torch.cat((vals, vals), dim=-1)
error_msg = ".*with shape.*does not comply with the instance.*"
with self.assertRaisesRegex(ValueError, error_msg):
usn.log_prob(wrong_vals)
def test_rsample(self):
# TODO: Replace with e.g. two-sample test.
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
# Pick a USN distribution at random
index = torch.randint(low=0, high=len(self.distributions), size=())
usn = self.distributions[index]
sqrt_covariance = self.sqrt_covariances[index]
# Generate draws using `rsample`
samples_y = usn.rsample(sample_shape=torch.Size([self.mc_num_rsamples]))
means = samples_y.mean(0)
covar = samples_y.T.cov()
# Generate draws using rejection sampling
ndim = sqrt_covariance.shape[-1]
base_rvs = torch.randn(self.mc_num_samples, ndim, **self.tkwargs)
_samples_x, _samples_y = (base_rvs @ sqrt_covariance.T).split(
usn.trunc.event_shape + usn.gauss.event_shape, dim=-1
)
_accept = torch.logical_and(
(_samples_x > usn.trunc.bounds[..., 0] - usn.trunc.loc).all(-1),
(_samples_x < usn.trunc.bounds[..., 1] - usn.trunc.loc).all(-1),
)
_means = usn.gauss.loc + _samples_y[_accept].mean(0)
_covar = _samples_y[_accept].T.cov()
atol = self.mc_atol_multiplier * (
_accept.count_nonzero() ** -0.5 + self.mc_num_rsamples**-0.5
)
self.assertAllClose(_means, means, rtol=0, atol=atol)
self.assertAllClose(_covar, covar, rtol=0, atol=atol)
def test_expand(self):
usn = next(iter(self.distributions))
# calling these lazy properties to cached them and
# hit associated branches in expand
usn._orthogonalized_gauss
usn.covariance_matrix
other = usn.expand(torch.Size([2]))
for key in ("loc", "covariance_matrix"):
a = getattr(usn.gauss, key)
self.assertTrue(all(a.equal(b) for b in getattr(other.gauss, key).unbind()))
for key in ("loc", "covariance_matrix", "bounds", "log_partition"):
a = getattr(usn.trunc, key)
self.assertTrue(all(a.equal(b) for b in getattr(other.trunc, key).unbind()))
for b in other.cross_covariance_matrix.unbind():
self.assertTrue(usn.cross_covariance_matrix.equal(b))
fake_usn = deepcopy(usn)
fake_usn.covariance_matrix = -1
error_msg = (
f"Type {type(-1)} of UnifiedSkewNormal's lazy property "
"covariance_matrix not supported.*"
)
with self.assertRaisesRegex(TypeError, error_msg):
other = fake_usn.expand(torch.Size([2]))
def test_validate_args(self):
for d in self.distributions:
error_msg = ".*is only well-defined for positive definite.*"
with self.assertRaisesRegex(ValueError, error_msg):
gauss = deepcopy(d.gauss)
gauss.covariance_matrix *= -1
UnifiedSkewNormal(d.trunc, gauss, d.cross_covariance_matrix)
error_msg = ".*-dimensional `trunc` incompatible with.*-dimensional `gauss"
with self.assertRaisesRegex(ValueError, error_msg):
gauss = deepcopy(d.gauss)
gauss._event_shape = (*gauss._event_shape, 1)
UnifiedSkewNormal(d.trunc, gauss, d.cross_covariance_matrix)
error_msg = "Incompatible batch shapes"
with self.assertRaisesRegex(ValueError, error_msg):
gauss = deepcopy(d.gauss)
trunc = deepcopy(d.trunc)
gauss._batch_shape = (*gauss._batch_shape, 2)
trunc._batch_shape = (*trunc._batch_shape, 3)
UnifiedSkewNormal(trunc, gauss, d.cross_covariance_matrix)
def test_properties(self):
orth = "_orthogonalized_gauss"
scal = "scale_tril"
for d in self.distributions:
# testing calling orthogonalized_gauss and scale_tril
usn = UnifiedSkewNormal(
d.trunc, d.gauss, d.cross_covariance_matrix, validate_args=False
)
self.assertTrue(orth not in usn.__dict__)
self.assertTrue(scal not in usn.__dict__)
usn._orthogonalized_gauss
self.assertTrue(orth in usn.__dict__)
self.assertTrue(scal not in usn.__dict__)
usn.scale_tril
self.assertTrue(orth in usn.__dict__)
self.assertTrue(scal in usn.__dict__)
# testing calling orthogonalized_gauss and scale_tril in reverse order
usn = UnifiedSkewNormal(
d.trunc, d.gauss, d.cross_covariance_matrix, validate_args=False
)
usn.scale_tril
self.assertTrue(orth not in usn.__dict__)
self.assertTrue(scal in usn.__dict__)
usn._orthogonalized_gauss
self.assertTrue(orth in usn.__dict__)
self.assertTrue(scal in usn.__dict__)
def test_covariance_matrix(self):
for d in self.distributions:
cov = d.covariance_matrix
self.assertTrue(isinstance(cov, Tensor))
# testing for symmetry
self.assertAllClose(cov, cov.mT)
# testing for positive-definiteness
ispd = False
try:
torch.linalg.cholesky(cov)
ispd = True
except RuntimeError:
pass
self.assertTrue(ispd)
# checking that linear operator to tensor conversion
# leads to same covariance matrix
xcov_linop = DenseLinearOperator(d.cross_covariance_matrix)
usn_linop = UnifiedSkewNormal(
trunc=d.trunc, gauss=d.gauss, cross_covariance_matrix=xcov_linop
)
cov_linop = usn_linop.covariance_matrix
self.assertTrue(isinstance(cov_linop, Tensor))
self.assertAllClose(cov, cov_linop)
def test_repr(self):
for d in self.distributions:
r = repr(d)
self.assertTrue(f"trunc: {d.trunc}" in r)
self.assertTrue(f"gauss: {d.gauss}" in r)
self.assertTrue(
f"cross_covariance_matrix: {d.cross_covariance_matrix.shape}" in r
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import itertools
import math
from unittest.mock import patch
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.constraints import get_monotonicity_constraints
from botorch.utils.probability.lin_ess import LinearEllipticalSliceSampler
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
class TestLinearEllipticalSliceSampler(BotorchTestCase):
def test_univariate(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# test input validation
with self.assertRaises(BotorchError) as e:
LinearEllipticalSliceSampler()
self.assertTrue(
"requires either inequality constraints or bounds" in str(e)
)
# special case: N(0, 1) truncated to negative numbers
A = torch.ones(1, 1, **tkwargs)
b = torch.zeros(1, 1, **tkwargs)
x0 = -torch.rand(1, 1, **tkwargs)
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b), interior_point=x0
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.equal(sampler._x, x0))
self.assertTrue(torch.equal(sampler.x0, x0))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 1]))
self.assertLessEqual(samples.max().item(), 0.0)
self.assertFalse(torch.equal(sampler._x, x0))
# same case as above, but instantiated with bounds
sampler = LinearEllipticalSliceSampler(
bounds=torch.tensor([[-float("inf")], [0.0]], **tkwargs),
interior_point=x0,
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.equal(sampler._x, x0))
self.assertTrue(torch.equal(sampler.x0, x0))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 1]))
self.assertLessEqual(samples.max().item(), 0.0)
self.assertFalse(torch.equal(sampler._x, x0))
# same case as above, but with redundant constraints
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
bounds=torch.tensor([[-float("inf")], [1.0]], **tkwargs),
interior_point=x0,
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.equal(sampler._x, x0))
self.assertTrue(torch.equal(sampler.x0, x0))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 1]))
self.assertLessEqual(samples.max().item(), 0.0)
self.assertFalse(torch.equal(sampler._x, x0))
# narrow feasible region, automatically find interior point
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
bounds=torch.tensor([[-0.25], [float("inf")]], **tkwargs),
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.all(sampler._is_feasible(sampler.x0)))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 1]))
self.assertLessEqual(samples.max().item(), 0.0)
self.assertGreaterEqual(samples.min().item(), -0.25)
self.assertFalse(torch.equal(sampler._x, x0))
# non-standard mean / variance
mean = torch.tensor([[0.25]], **tkwargs)
covariance_matrix = torch.tensor([[4.0]], **tkwargs)
error_msg = ".*either covariance_matrix or covariance_root, not both.*"
with self.assertRaisesRegex(ValueError, error_msg):
LinearEllipticalSliceSampler(
bounds=torch.tensor([[0.0], [float("inf")]], **tkwargs),
covariance_matrix=covariance_matrix,
covariance_root=covariance_matrix.sqrt(),
)
error_msg = ".*Covariance matrix is not positive definite.*"
with self.assertRaisesRegex(ValueError, error_msg):
LinearEllipticalSliceSampler(
bounds=torch.tensor([[0.0], [float("inf")]], **tkwargs),
covariance_matrix=-covariance_matrix,
)
sampler = LinearEllipticalSliceSampler(
bounds=torch.tensor([[0.0], [float("inf")]], **tkwargs),
mean=mean,
covariance_matrix=covariance_matrix,
)
self.assertTrue(torch.equal(sampler._mean, mean))
self.assertTrue(
torch.equal(sampler._covariance_root, covariance_matrix.sqrt())
)
self.assertTrue(torch.all(sampler._is_feasible(sampler.x0)))
samples = sampler.draw(n=4)
self.assertEqual(samples.shape, torch.Size([4, 1]))
self.assertGreaterEqual(samples.min().item(), 0.0)
self.assertFalse(torch.equal(sampler._x, x0))
def test_bivariate(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# special case: N(0, I) truncated to positive numbers
A = -torch.eye(2, **tkwargs)
b = torch.zeros(2, 1, **tkwargs)
sampler = LinearEllipticalSliceSampler(inequality_constraints=(A, b))
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.all(sampler._is_feasible(sampler.x0)))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 2]))
self.assertGreaterEqual(samples.min().item(), 0.0)
self.assertFalse(torch.equal(sampler._x, sampler.x0))
# same case as above, but instantiated with bounds
sampler = LinearEllipticalSliceSampler(
bounds=torch.tensor(
[[0.0, 0.0], [float("inf"), float("inf")]], **tkwargs
),
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.all(sampler._is_feasible(sampler.x0)))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 2]))
self.assertGreaterEqual(samples.min().item(), 0.0)
self.assertFalse(torch.equal(sampler._x, sampler.x0))
# A case with bounded domain and non-standard mean and covariance
mean = -3.0 * torch.ones(2, 1, **tkwargs)
covariance_matrix = torch.tensor([[4.0, 2.0], [2.0, 2.0]], **tkwargs)
bounds = torch.tensor(
[[-float("inf"), -float("inf")], [0.0, 0.0]], **tkwargs
)
A = torch.ones(1, 2, **tkwargs)
b = torch.tensor([[-2.0]], **tkwargs)
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
bounds=bounds,
mean=mean,
covariance_matrix=covariance_matrix,
)
self.assertTrue(torch.equal(sampler._mean, mean))
covar_root_xpct = torch.tensor([[2.0, 0.0], [1.0, 1.0]], **tkwargs)
self.assertTrue(torch.equal(sampler._covariance_root, covar_root_xpct))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, 2]))
self.assertTrue(sampler._is_feasible(samples.t()).all())
self.assertFalse(torch.equal(sampler._x, sampler.x0))
def test_multivariate(self):
torch.manual_seed(torch.randint(100, torch.Size([])).item())
rtol = 1e-3
for dtype, atol in zip((torch.float, torch.double), (2e-5, 1e-12)):
d = 5
tkwargs = {"device": self.device, "dtype": dtype}
# special case: N(0, I) truncated to greater than lower_bound
A = -torch.eye(d, **tkwargs)
lower_bound = 1
b = -torch.full((d, 1), lower_bound, **tkwargs)
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b), check_feasibility=True
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.all(sampler._is_feasible(sampler.x0)))
samples = sampler.draw(n=3)
self.assertEqual(samples.shape, torch.Size([3, d]))
self.assertGreaterEqual(samples.min().item(), lower_bound)
self.assertFalse(torch.equal(sampler._x, sampler.x0))
# same case as above, but instantiated with bounds
sampler = LinearEllipticalSliceSampler(
bounds=torch.tensor(
[[lower_bound for _ in range(d)], [float("inf") for _ in range(d)]],
**tkwargs,
),
)
self.assertIsNone(sampler._mean)
self.assertIsNone(sampler._covariance_root)
self.assertTrue(torch.all(sampler._is_feasible(sampler.x0)))
num_samples = 3
samples = sampler.draw(n=num_samples)
self.assertEqual(samples.shape, torch.Size([3, d]))
self.assertGreaterEqual(samples.min().item(), lower_bound)
self.assertFalse(torch.equal(sampler._x, sampler.x0))
self.assertEqual(sampler.lifetime_samples, num_samples)
samples = sampler.draw(n=num_samples)
self.assertEqual(sampler.lifetime_samples, 2 * num_samples)
# checking sampling from non-standard normal
lower_bound = -2
b = -torch.full((d, 1), lower_bound, **tkwargs)
mean = torch.arange(d, **tkwargs).view(d, 1)
cov_matrix = torch.randn(d, d, **tkwargs)
cov_matrix = cov_matrix @ cov_matrix.T
# normalizing to maximal unit variance so that sem math below applies
cov_matrix /= cov_matrix.max()
interior_point = torch.ones_like(mean)
means_and_covs = [
(None, None),
(mean, None),
(None, cov_matrix),
(mean, cov_matrix),
]
fixed_indices = [None, [1, 3]]
for (mean_i, cov_i), ff_i in itertools.product(
means_and_covs,
fixed_indices,
):
with self.subTest(mean=mean_i, cov=cov_i, fixed_indices=ff_i):
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
interior_point=interior_point,
check_feasibility=True,
mean=mean_i,
covariance_matrix=cov_i,
fixed_indices=ff_i,
)
# checking standardized system of constraints
mean_i = torch.zeros(d, 1, **tkwargs) if mean_i is None else mean_i
cov_i = torch.eye(d, **tkwargs) if cov_i is None else cov_i
# Transform the system to incorporate equality constraints and non-
# standard mean and covariance.
Az_i, bz_i = A, b
if ff_i is None:
is_fixed = []
not_fixed = range(d)
else:
is_fixed = sampler._is_fixed
not_fixed = sampler._not_fixed
self.assertIsInstance(is_fixed, Tensor)
self.assertIsInstance(not_fixed, Tensor)
self.assertEqual(is_fixed.shape, (len(ff_i),))
self.assertEqual(not_fixed.shape, (d - len(ff_i),))
self.assertTrue(all(i in ff_i for i in is_fixed))
self.assertFalse(any(i in ff_i for i in not_fixed))
# Modifications to constraint system
Az_i = A[:, not_fixed]
bz_i = b - A[:, is_fixed] @ interior_point[is_fixed]
mean_i = mean_i[not_fixed]
cov_i = cov_i[not_fixed.unsqueeze(-1), not_fixed.unsqueeze(0)]
cov_root_i = torch.linalg.cholesky_ex(cov_i)[0]
bz_i = bz_i - Az_i @ mean_i
Az_i = Az_i @ cov_root_i
self.assertAllClose(sampler._Az, Az_i, atol=atol)
self.assertAllClose(sampler._bz, bz_i, atol=atol)
# testing standardization of non-fixed elements
x = torch.randn_like(mean_i)
z = sampler._standardize(x)
self.assertAllClose(
z,
torch.linalg.solve_triangular(
cov_root_i, x - mean_i, upper=False
),
atol=atol,
)
self.assertAllClose(sampler._unstandardize(z), x, atol=atol)
# testing transformation
x = torch.randn(d, 1, **tkwargs)
x[is_fixed] = interior_point[is_fixed] # fixed dimensions
z = sampler._transform(x)
self.assertAllClose(
z,
torch.linalg.solve_triangular(
cov_root_i, x[not_fixed] - mean_i, upper=False
),
atol=atol,
)
self.assertAllClose(sampler._untransform(z), x, atol=atol)
# checking rejection-free property
num_samples = 32
samples = sampler.draw(num_samples)
self.assertEqual(len(samples.unique(dim=0)), num_samples)
# checking mean is approximately equal to unconstrained distribution
# mean if the constraint boundary is far away from the unconstrained
# mean. NOTE: Expected failure rate due to statistical fluctuations
# of 5 sigma is 1 in 1.76 million.
# sem ~ 0.7 -> can differentiate from zero mean
sem = 5 / math.sqrt(num_samples)
sample_mean = samples.mean(dim=0).unsqueeze(-1)
self.assertAllClose(sample_mean[not_fixed], mean_i, atol=sem)
# testing the samples have correctly fixed features
self.assertTrue(
torch.equal(sample_mean[is_fixed], interior_point[is_fixed])
)
# checking that transformation does not change feasibility values
X_test = 3 * torch.randn(d, num_samples, **tkwargs)
X_test[is_fixed] = interior_point[is_fixed]
self.assertAllClose(
sampler._Az @ sampler._transform(X_test) - sampler._bz,
A @ X_test - b,
atol=atol,
rtol=rtol,
)
self.assertAllClose(
sampler._is_feasible(
sampler._transform(X_test), transformed=True
),
sampler._is_feasible(X_test, transformed=False),
atol=atol,
)
# thining and burn-in tests
burnin = 7
thinning = 2
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
check_feasibility=True,
burnin=burnin,
thinning=thinning,
)
self.assertEqual(sampler.lifetime_samples, burnin)
num_samples = 2
samples = sampler.draw(n=num_samples)
self.assertEqual(samples.shape, torch.Size([num_samples, d]))
self.assertEqual(
sampler.lifetime_samples, burnin + num_samples * (thinning + 1)
)
samples = sampler.draw(n=num_samples)
self.assertEqual(
sampler.lifetime_samples, burnin + 2 * num_samples * (thinning + 1)
)
# two special cases of _find_intersection_angles below:
# 1) testing _find_intersection_angles with a proposal "nu"
# that ensures that the full ellipse is feasible
# setting lower bound below the mean to ensure there's no intersection
lower_bound = -2
b = -torch.full((d, 1), lower_bound, **tkwargs)
nu = torch.full((d, 1), lower_bound + 1, **tkwargs)
sampler = LinearEllipticalSliceSampler(
interior_point=nu,
inequality_constraints=(A, b),
check_feasibility=True,
)
nu = torch.full((d, 1), lower_bound + 2, **tkwargs)
theta_active = sampler._find_active_intersections(nu)
self.assertTrue(
torch.equal(theta_active, sampler._full_angular_range.view(-1))
)
rot_angle, slices = sampler._find_rotated_intersections(nu)
self.assertEqual(rot_angle, 0.0)
self.assertAllClose(
slices, torch.tensor([[0.0, 2 * torch.pi]], **tkwargs), atol=atol
)
# 2) testing tangential intersection of ellipse with constraint
nu = torch.full((d, 1), lower_bound, **tkwargs)
sampler = LinearEllipticalSliceSampler(
interior_point=nu,
inequality_constraints=(A, b),
check_feasibility=True,
)
nu = torch.full((d, 1), lower_bound + 1, **tkwargs)
# nu[1] += 1
theta_active = sampler._find_active_intersections(nu)
self.assertTrue(theta_active.numel() % 2 == 0)
# testing error message for infeasible sample
sampler.check_feasibility = True
infeasible_x = torch.full((d, 1), lower_bound - 1, **tkwargs)
with patch.object(
sampler, "_draw_angle", return_value=torch.tensor(0.0, **tkwargs)
):
with patch.object(
sampler,
"_get_cart_coords",
return_value=infeasible_x,
):
with self.assertRaisesRegex(
RuntimeError, "Sampling resulted in infeasible point"
):
sampler.step()
# testing error for fixed features with no interior point
with self.assertRaisesRegex(
ValueError,
".*an interior point must also be provided in order to infer feasible ",
):
LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
fixed_indices=[0],
)
with self.assertRaisesRegex(
ValueError,
"Provide either covariance_root or fixed_indices, not both.",
):
LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
interior_point=interior_point,
fixed_indices=[0],
covariance_root=torch.eye(d, **tkwargs),
)
# high dimensional test case
# Encodes order constraints on all d variables: Ax < b <-> x[i] < x[i + 1]
d = 128
A, b = get_monotonicity_constraints(d=d, **tkwargs)
interior_point = torch.arange(d, **tkwargs).unsqueeze(-1) / d - 1 / 2
sampler = LinearEllipticalSliceSampler(
inequality_constraints=(A, b),
interior_point=interior_point,
check_feasibility=True,
)
num_samples = 16
X_high_d = sampler.draw(n=num_samples)
self.assertEqual(X_high_d.shape, torch.Size([16, d]))
self.assertTrue(sampler._is_feasible(X_high_d.T).all())
self.assertEqual(sampler.lifetime_samples, num_samples)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import count
from typing import Sequence, Tuple
import torch
from botorch.utils.probability.mvnxpb import MVNXPB
from botorch.utils.probability.truncated_multivariate_normal import (
TruncatedMultivariateNormal,
)
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
from torch.distributions import MultivariateNormal
from torch.special import ndtri
class TestTruncatedMultivariateNormal(BotorchTestCase):
def setUp(
self,
ndims: Sequence[Tuple[int, int]] = (2, 4),
lower_quantile_max: float = 0.9, # if these get too far into the tail, naive
upper_quantile_min: float = 0.1, # MC methods will not produce any samples.
num_log_probs: int = 4,
seed: int = 1,
) -> None:
super().setUp()
self.seed_generator = count(seed)
self.num_log_probs = num_log_probs
tkwargs = {"dtype": torch.float64}
self.distributions = []
self.sqrt_covariances = []
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
for ndim in ndims:
loc = torch.randn(ndim, **tkwargs)
sqrt_covariance = self.gen_covariances(ndim, as_sqrt=True).to(**tkwargs)
covariance_matrix = sqrt_covariance @ sqrt_covariance.transpose(-1, -2)
std = covariance_matrix.diag().sqrt()
lb = lower_quantile_max * torch.rand(ndim, **tkwargs)
ub = lb.clip(min=upper_quantile_min) # scratch variable
ub = ub + (1 - ub) * torch.rand(ndim, **tkwargs)
bounds = loc.unsqueeze(-1) + std.unsqueeze(-1) * ndtri(
torch.stack([lb, ub], dim=-1)
)
self.distributions.append(
TruncatedMultivariateNormal(
loc=loc,
covariance_matrix=covariance_matrix,
bounds=bounds,
validate_args=True,
)
)
self.sqrt_covariances.append(sqrt_covariance)
def gen_covariances(
self,
ndim: int,
batch_shape: Sequence[int] = (),
as_sqrt: bool = False,
) -> Tensor:
shape = tuple(batch_shape) + (ndim, ndim)
eigvals = -torch.rand(shape[:-1]).log() # exponential rvs
orthmat = torch.linalg.svd(torch.randn(shape)).U
sqrt_covar = orthmat * torch.sqrt(eigvals).unsqueeze(-2)
return sqrt_covar if as_sqrt else sqrt_covar @ sqrt_covar.transpose(-2, -1)
def test_init(self):
trunc = next(iter(self.distributions))
with self.assertRaisesRegex(SyntaxError, "Missing required argument `bounds`"):
TruncatedMultivariateNormal(
loc=trunc.loc, covariance_matrix=trunc.covariance_matrix
)
with self.assertRaisesRegex(ValueError, r"Expected bounds.shape\[-1\] to be 2"):
TruncatedMultivariateNormal(
loc=trunc.loc,
covariance_matrix=trunc.covariance_matrix,
bounds=torch.empty(trunc.covariance_matrix.shape[:-1] + (1,)),
)
with self.assertRaisesRegex(ValueError, "`bounds` must be strictly increasing"):
TruncatedMultivariateNormal(
loc=trunc.loc,
covariance_matrix=trunc.covariance_matrix,
bounds=trunc.bounds.roll(shifts=1, dims=-1),
)
def test_solver(self):
for trunc in self.distributions:
# Test that solver was setup properly
solver = trunc.solver
self.assertIsInstance(solver, MVNXPB)
self.assertTrue(solver.perm.equal(solver.piv_chol.perm))
self.assertEqual(solver.step, trunc.covariance_matrix.shape[-1])
bounds = torch.gather(
trunc.covariance_matrix.diag().rsqrt().unsqueeze(-1)
* (trunc.bounds - trunc.loc.unsqueeze(-1)),
dim=-2,
index=solver.perm.unsqueeze(-1).expand(*trunc.bounds.shape),
)
self.assertTrue(solver.bounds.allclose(bounds))
# Test that (permuted) covariance matrices match
A = solver.piv_chol.diag.unsqueeze(-1) * solver.piv_chol.tril
A = A @ A.transpose(-2, -1)
n = A.shape[-1]
B = trunc.covariance_matrix
B = B.gather(-1, solver.perm.unsqueeze(-2).repeat(n, 1))
B = B.gather(-2, solver.perm.unsqueeze(-1).repeat(1, n))
self.assertTrue(A.allclose(B))
def test_log_prob(self):
with torch.random.fork_rng():
torch.random.manual_seed(next(self.seed_generator))
for trunc in self.distributions:
# Test generic values
vals = trunc.rsample(sample_shape=torch.Size([self.num_log_probs]))
test = MultivariateNormal.log_prob(trunc, vals) - trunc.log_partition
self.assertTrue(test.equal(trunc.log_prob(vals)))
# Test out of bounds
m = trunc.bounds.shape[-2] // 2
oob = torch.concat(
[trunc.bounds[..., :m, 0] - 1, trunc.bounds[..., m:, 1] + 1], dim=-1
)
self.assertTrue(trunc.log_prob(oob).eq(-float("inf")).all())
def test_expand(self):
trunc = next(iter(self.distributions))
other = trunc.expand(torch.Size([2]))
for key in ("loc", "covariance_matrix", "bounds", "log_partition"):
a = getattr(trunc, key)
self.assertTrue(all(a.allclose(b) for b in getattr(other, key).unbind()))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import count
from typing import Any, Callable, Dict, Optional, Tuple, Union
import torch
from botorch.exceptions import UnsupportedError
from botorch.utils.probability.bvn import (
_bvnu_polar,
_bvnu_taylor,
bvn,
bvnmom,
bvnu,
Phi,
)
from botorch.utils.testing import BotorchTestCase
from torch import Tensor
def run_gaussian_estimator(
estimator: Callable[[Tensor], Tuple[Tensor, Union[Tensor, float, int]]],
sqrt_cov: Tensor,
num_samples: int,
batch_limit: Optional[int] = None,
seed: Optional[int] = None,
) -> Tensor:
if batch_limit is None:
batch_limit = num_samples
ndim = sqrt_cov.shape[-1]
tkwargs = {"dtype": sqrt_cov.dtype, "device": sqrt_cov.device}
counter = 0
numerator = 0
denominator = 0
with torch.random.fork_rng():
if seed:
torch.random.manual_seed(seed)
while counter < num_samples:
batch_size = min(batch_limit, num_samples - counter)
samples = torch.tensordot(
torch.randn(batch_size, ndim, **tkwargs),
sqrt_cov,
dims=([1], [-1]),
)
batch_numerator, batch_denominator = estimator(samples)
counter = counter + batch_size
numerator = numerator + batch_numerator
denominator = denominator + batch_denominator
return numerator / denominator, denominator
class TestBVN(BotorchTestCase):
def setUp(
self,
nprobs_per_coeff: int = 3,
bound_range: Tuple[float, float] = (-3.0, 3.0),
mc_num_samples: int = 10000,
mc_batch_limit: int = 1000,
mc_atol_multiplier: float = 4.0,
seed: int = 1,
dtype: torch.dtype = torch.float64,
device: Optional[torch.device] = None,
):
super().setUp()
self.dtype = dtype
self.seed_generator = count(seed)
self.nprobs_per_coeff = nprobs_per_coeff
self.mc_num_samples = mc_num_samples
self.mc_batch_limit = mc_batch_limit
self.mc_atol_multiplier = mc_atol_multiplier
pos_coeffs = torch.cat(
[
torch.linspace(0, 1, 5, **self.tkwargs),
torch.tensor([0.01, 0.05, 0.924, 0.925, 0.99], **self.tkwargs),
]
)
self.correlations = torch.cat([pos_coeffs, -pos_coeffs[1:]])
with torch.random.fork_rng():
torch.manual_seed(next(self.seed_generator))
_lower = torch.rand(
nprobs_per_coeff, len(self.correlations), 2, **self.tkwargs
)
_upper = _lower + (1 - _lower) * torch.rand_like(_lower)
self.lower_bounds = bound_range[0] + (bound_range[1] - bound_range[0]) * _lower
self.upper_bounds = bound_range[0] + (bound_range[1] - bound_range[0]) * _upper
self.sqrt_covariances = torch.zeros(
len(self.correlations), 2, 2, **self.tkwargs
)
self.sqrt_covariances[:, 0, 0] = 1
self.sqrt_covariances[:, 1, 0] = self.correlations
self.sqrt_covariances[:, 1, 1] = (1 - self.correlations**2) ** 0.5
@property
def tkwargs(self) -> Dict[str, Any]:
return {"dtype": self.dtype, "device": self.device}
@property
def xl(self):
return self.lower_bounds[..., 0]
@property
def xu(self):
return self.upper_bounds[..., 0]
@property
def yl(self):
return self.lower_bounds[..., 1]
@property
def yu(self):
return self.upper_bounds[..., 1]
def test_bvnu_polar(self) -> None:
r"""Test special cases where bvnu admits closed-form solutions.
Note: inf should not be passed to _bvnu as bounds, use big numbers instead.
"""
use_polar = self.correlations.abs() < 0.925
r = self.correlations[use_polar]
xl = self.xl[..., use_polar]
yl = self.yl[..., use_polar]
with self.subTest(msg="exact_unconstrained"):
prob = _bvnu_polar(r, torch.full_like(r, -1e16), torch.full_like(r, -1e16))
self.assertAllClose(prob, torch.ones_like(prob))
with self.subTest(msg="exact_marginal"):
prob = _bvnu_polar(
r.expand_as(yl),
torch.full_like(xl, -1e16),
yl,
)
test = Phi(-yl) # same as: 1 - P(y < yl)
self.assertAllClose(prob, test)
with self.subTest(msg="exact_independent"):
prob = _bvnu_polar(torch.zeros_like(xl), xl, yl)
test = Phi(-xl) * Phi(-yl)
self.assertAllClose(prob, test)
def test_bvnu_taylor(self) -> None:
r"""Test special cases where bvnu admits closed-form solutions.
Note: inf should not be passed to _bvnu as bounds, use big numbers instead.
"""
use_taylor = self.correlations.abs() >= 0.925
r = self.correlations[use_taylor]
xl = self.xl[..., use_taylor]
yl = self.yl[..., use_taylor]
with self.subTest(msg="exact_unconstrained"):
prob = _bvnu_taylor(r, torch.full_like(r, -1e16), torch.full_like(r, -1e16))
self.assertAllClose(prob, torch.ones_like(prob))
with self.subTest(msg="exact_marginal"):
prob = _bvnu_taylor(
r.expand_as(yl),
torch.full_like(xl, -1e16),
yl,
)
test = Phi(-yl) # same as: 1 - P(y < yl)
self.assertAllClose(prob, test)
with self.subTest(msg="exact_independent"):
prob = _bvnu_polar(torch.zeros_like(xl), xl, yl)
test = Phi(-xl) * Phi(-yl)
self.assertAllClose(prob, test)
def test_bvn(self):
r"""Monte Carlo unit test for `bvn`."""
r = self.correlations.repeat(self.nprobs_per_coeff, 1)
solves = bvn(r, self.xl, self.yl, self.xu, self.yu)
with self.assertRaisesRegex(UnsupportedError, "same shape"):
bvn(r[..., :1], self.xl, self.yl, self.xu, self.yu)
with self.assertRaisesRegex(UnsupportedError, "same shape"):
bvnu(r[..., :1], r, r)
def _estimator(samples):
accept = torch.logical_and(
(samples > self.lower_bounds.unsqueeze(1)).all(-1),
(samples < self.upper_bounds.unsqueeze(1)).all(-1),
)
numerator = torch.count_nonzero(accept, dim=1).double()
denominator = len(samples)
return numerator, denominator
estimates, _ = run_gaussian_estimator(
estimator=_estimator,
sqrt_cov=self.sqrt_covariances,
num_samples=self.mc_num_samples,
batch_limit=self.mc_batch_limit,
seed=next(self.seed_generator),
)
atol = self.mc_atol_multiplier * (self.mc_num_samples**-0.5)
self.assertAllClose(estimates, solves, rtol=0, atol=atol)
def test_bvnmom(self):
r"""Monte Carlo unit test for `bvn`."""
r = self.correlations.repeat(self.nprobs_per_coeff, 1)
Ex, Ey = bvnmom(r, self.xl, self.yl, self.xu, self.yu)
with self.assertRaisesRegex(UnsupportedError, "same shape"):
bvnmom(r[..., :1], self.xl, self.yl, self.xu, self.yu)
def _estimator(samples):
accept = torch.logical_and(
(samples > self.lower_bounds.unsqueeze(1)).all(-1),
(samples < self.upper_bounds.unsqueeze(1)).all(-1),
)
numerator = torch.einsum("snd,psn->pnd", samples, accept.to(samples.dtype))
denominator = torch.count_nonzero(accept, dim=1).to(samples.dtype)
return numerator, denominator.unsqueeze(-1)
estimates, num_samples = run_gaussian_estimator(
estimator=_estimator,
sqrt_cov=self.sqrt_covariances,
num_samples=self.mc_num_samples,
batch_limit=self.mc_batch_limit,
seed=next(self.seed_generator),
)
for n, ex, ey, _ex, _ey in zip(
*map(torch.ravel, (num_samples.squeeze(-1), Ex, Ey, *estimates.unbind(-1)))
):
if n:
atol = self.mc_atol_multiplier * (n**-0.5)
self.assertAllClose(ex, _ex, rtol=0, atol=atol)
self.assertAllClose(ey, _ey, rtol=0, atol=atol)
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
from botorch.utils.testing import BotorchTestCase
from botorch.utils.transforms import normalize
class TestGetChebyshevScalarization(BotorchTestCase):
def test_get_chebyshev_scalarization(self):
tkwargs = {"device": self.device}
Y_train = torch.rand(4, 2, **tkwargs)
neg_Y_train = -Y_train
neg_Y_bounds = torch.stack(
[
neg_Y_train.min(dim=-2, keepdim=True).values,
neg_Y_train.max(dim=-2, keepdim=True).values,
],
dim=0,
)
for dtype in (torch.float, torch.double):
for batch_shape in (torch.Size([]), torch.Size([3])):
tkwargs["dtype"] = dtype
Y_test = torch.rand(batch_shape + torch.Size([5, 2]), **tkwargs)
neg_Y_test = -Y_test
Y_train = Y_train.to(**tkwargs)
neg_Y_bounds = neg_Y_bounds.to(**tkwargs)
normalized_neg_Y_test = normalize(neg_Y_test, neg_Y_bounds)
# test wrong shape
with self.assertRaises(BotorchTensorDimensionError):
get_chebyshev_scalarization(
weights=torch.zeros(3, **tkwargs), Y=Y_train
)
weights = torch.ones(2, **tkwargs)
# test batch Y
with self.assertRaises(NotImplementedError):
get_chebyshev_scalarization(weights=weights, Y=Y_train.unsqueeze(0))
# basic test
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = -(
normalized_neg_Y_test.max(dim=-1).values
+ 0.05 * normalized_neg_Y_test.sum(dim=-1)
)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# check that using negative objectives and negative weights
# yields an equivalent scalarized outcome
objective_transform2 = get_chebyshev_scalarization(
weights=-weights, Y=-Y_train
)
Y_transformed2 = objective_transform2(-Y_test)
self.assertAllClose(Y_transformed, Y_transformed2)
# test different alpha
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train, alpha=1.0
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = -(
normalized_neg_Y_test.max(dim=-1).values
+ normalized_neg_Y_test.sum(dim=-1)
)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# Test different weights
weights = torch.tensor([0.3, 0.7], **tkwargs)
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = -(
(weights * normalized_neg_Y_test).max(dim=-1).values
+ 0.05 * (weights * normalized_neg_Y_test).sum(dim=-1)
)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# test that when minimizing an objective (i.e. with a negative weight),
# normalized Y values are shifted from [0,1] to [-1,0]
weights = torch.tensor([0.3, -0.7], **tkwargs)
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train
)
Y_transformed = objective_transform(Y_test)
normalized_neg_Y_test[..., -1] = normalized_neg_Y_test[..., -1] - 1
expected_Y_transformed = -(
(weights * normalized_neg_Y_test).max(dim=-1).values
+ 0.05 * (weights * normalized_neg_Y_test).sum(dim=-1)
)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# test that with no observations there is no normalization
weights = torch.tensor([0.3, 0.7], **tkwargs)
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train[:0]
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = -(
(weights * neg_Y_test).max(dim=-1).values
+ 0.05 * (weights * neg_Y_test).sum(dim=-1)
)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# test that error is raised with negative weights and empty Y
with self.assertRaises(UnsupportedError):
get_chebyshev_scalarization(weights=-weights, Y=Y_train[:0])
# test that with one observation, we normalize by subtracting
# neg_Y_train
single_Y_train = Y_train[:1]
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=single_Y_train
)
Y_transformed = objective_transform(Y_test)
normalized_neg_Y_test = neg_Y_test + single_Y_train
expected_Y_transformed = -(
(weights * normalized_neg_Y_test).max(dim=-1).values
+ 0.05 * (weights * normalized_neg_Y_test).sum(dim=-1)
)
self.assertAllClose(Y_transformed, expected_Y_transformed)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchError, BotorchTensorDimensionError
from botorch.utils.multi_objective.hypervolume import Hypervolume, infer_reference_point
from botorch.utils.testing import BotorchTestCase
EPS = 1e-4
pareto_Y_5d = [
[
-0.42890000759972685,
-0.1446377658556118,
-0.10335085850913295,
-0.49502106785623134,
-0.7344368200145969,
],
[
-0.5124511265981003,
-0.5332028064973291,
-0.36775794432917486,
-0.5261970836251835,
-0.20238412378158688,
],
[
-0.5960106882406603,
-0.32491865590163566,
-0.5815435820797972,
-0.08375675085018466,
-0.44044408882261904,
],
[
-0.6135323874039154,
-0.5658986040644925,
-0.39684098121151284,
-0.3798488823307603,
-0.03960860698719982,
],
[
-0.3957157311550265,
-0.4045394517331393,
-0.07282417302694655,
-0.5699496614967537,
-0.5912790502720109,
],
[
-0.06392539039575441,
-0.17204800894814581,
-0.6620860391018546,
-0.7241037454151875,
-0.06024010111083461,
],
]
class TestHypervolume(BotorchTestCase):
def test_hypervolume(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
ref_point = torch.tensor([0.0, 0.0], **tkwargs)
hv = Hypervolume(ref_point)
# test ref point
self.assertTrue(torch.equal(ref_point, hv.ref_point))
self.assertTrue(torch.equal(-ref_point, hv._ref_point))
# test dimension errors
with self.assertRaises(BotorchTensorDimensionError):
hv.compute(pareto_Y=torch.empty(2, **tkwargs))
with self.assertRaises(BotorchTensorDimensionError):
hv.compute(pareto_Y=torch.empty(1, 1, 2, **tkwargs))
with self.assertRaises(BotorchTensorDimensionError):
hv.compute(pareto_Y=torch.empty(1, 3, **tkwargs))
# test no pareto points
pareto_Y = (ref_point - 1).view(1, 2)
volume = hv.compute(pareto_Y)
self.assertEqual(volume, 0.0)
# test 1-d
hv = Hypervolume(ref_point[:1])
volume = hv.compute(pareto_Y=torch.ones(1, 1, **tkwargs))
self.assertEqual(volume, 1.0)
# test m=2
hv = Hypervolume(ref_point)
pareto_Y = torch.tensor(
[[8.5, 3.0], [8.5, 3.5], [5.0, 5.0], [9.0, 1.0], [4.0, 5.0]], **tkwargs
)
volume = hv.compute(pareto_Y=pareto_Y)
self.assertTrue(abs(volume - 37.75) < EPS)
# test nonzero reference point
ref_point = torch.tensor([1.0, 0.5], **tkwargs)
hv = Hypervolume(ref_point)
volume = hv.compute(pareto_Y=pareto_Y)
self.assertTrue(abs(volume - 28.75) < EPS)
# test m=3
# ref_point = torch.tensor([-1.1, -1.1, -1.1], **tkwargs)
ref_point = torch.tensor([-2.1, -2.5, -2.3], **tkwargs)
hv = Hypervolume(ref_point)
pareto_Y = torch.tensor(
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], **tkwargs
)
volume = hv.compute(pareto_Y=pareto_Y)
self.assertTrue(abs(volume - 11.075) < EPS)
# self.assertTrue(abs(volume - 0.45980908291719647) < EPS)
# test m=4
ref_point = torch.tensor([-2.1, -2.5, -2.3, -2.0], **tkwargs)
hv = Hypervolume(ref_point)
pareto_Y = torch.tensor(
[
[-1.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 0.0, 0.0, -1.0],
],
**tkwargs,
)
volume = hv.compute(pareto_Y=pareto_Y)
self.assertTrue(abs(volume - 23.15) < EPS)
# test m=5
# this pareto front is from DTLZ2 and covers several edge cases
ref_point = torch.full(torch.Size([5]), -1.1, **tkwargs)
hv = Hypervolume(ref_point)
pareto_Y = torch.tensor(pareto_Y_5d, **tkwargs)
volume = hv.compute(pareto_Y)
self.assertTrue(abs(volume - 0.42127855991587) < EPS)
class TestGetReferencePoint(BotorchTestCase):
def test_infer_reference_point(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
Y = torch.tensor(
[
[-13.9599, -24.0326],
[-19.6755, -11.4721],
[-18.7742, -11.9193],
[-16.6614, -12.3283],
[-17.7663, -11.9941],
[-17.4367, -12.2948],
[-19.4244, -11.9158],
[-14.0806, -22.0004],
],
**tkwargs,
)
# test empty pareto_Y and no max_ref_point
with self.assertRaises(BotorchError):
infer_reference_point(pareto_Y=Y[:0])
# test max_ref_point does not change when there exists a better Y point
max_ref_point = Y.min(dim=0).values
ref_point = infer_reference_point(max_ref_point=max_ref_point, pareto_Y=Y)
self.assertTrue(torch.equal(max_ref_point, ref_point))
# test scale_max_ref_point
ref_point = infer_reference_point(
max_ref_point=max_ref_point, pareto_Y=Y, scale_max_ref_point=True
)
better_than_ref = (Y > max_ref_point).all(dim=-1)
Y_better_than_ref = Y[better_than_ref]
ideal_better_than_ref = Y_better_than_ref.max(dim=0).values
self.assertTrue(
torch.equal(
max_ref_point - 0.1 * (ideal_better_than_ref - max_ref_point),
ref_point,
)
)
# test case when there does not exist a better Y point
max_ref_point = torch.tensor([-2.2, -2.3], **tkwargs)
ref_point = infer_reference_point(max_ref_point=max_ref_point, pareto_Y=Y)
self.assertTrue((ref_point < Y).all(dim=-1).any())
nadir = Y.min(dim=0).values
ideal = Y.max(dim=0).values
expected_ref_point = nadir - 0.1 * (ideal - nadir)
self.assertAllClose(ref_point, expected_ref_point)
# test with scale
expected_ref_point = nadir - 0.2 * (ideal - nadir)
ref_point = infer_reference_point(
max_ref_point=max_ref_point, pareto_Y=Y, scale=0.2
)
self.assertAllClose(ref_point, expected_ref_point)
# test case when one objective is better than max_ref_point, and
# one objective is worse
max_ref_point = torch.tensor([-2.2, -12.1], **tkwargs)
expected_ref_point = nadir - 0.1 * (ideal - nadir)
expected_ref_point = torch.min(expected_ref_point, max_ref_point)
ref_point = infer_reference_point(max_ref_point=max_ref_point, pareto_Y=Y)
self.assertTrue(torch.equal(expected_ref_point, ref_point))
# test case when one objective is better than max_ref_point, and
# one objective is worse with scale_max_ref_point
ref_point = infer_reference_point(
max_ref_point=max_ref_point, pareto_Y=Y, scale_max_ref_point=True
)
nadir2 = torch.min(nadir, max_ref_point)
expected_ref_point = nadir2 - 0.1 * (ideal - nadir2)
self.assertTrue(torch.equal(expected_ref_point, ref_point))
# test case when size of pareto_Y is 0
ref_point = infer_reference_point(
max_ref_point=max_ref_point, pareto_Y=Y[:0]
)
self.assertTrue(torch.equal(max_ref_point, ref_point))
# test case when size of pareto_Y is 0 with scale_max_ref_point
ref_point = infer_reference_point(
max_ref_point=max_ref_point,
pareto_Y=Y[:0],
scale_max_ref_point=True,
scale=0.2,
)
self.assertTrue(
torch.equal(max_ref_point - 0.2 * max_ref_point.abs(), ref_point)
)
# test case when size of pareto_Y is 1
ref_point = infer_reference_point(
max_ref_point=max_ref_point, pareto_Y=Y[:1]
)
expected_ref_point = Y[0] - 0.1 * Y[0].abs()
self.assertTrue(torch.equal(expected_ref_point, ref_point))
# test case when size of pareto_Y is 1 with scale parameter
ref_point = infer_reference_point(
max_ref_point=max_ref_point, pareto_Y=Y[:1], scale=0.2
)
expected_ref_point = Y[0] - 0.2 * Y[0].abs()
self.assertTrue(torch.equal(expected_ref_point, ref_point))
# test no max_ref_point specified
expected_ref_point = nadir - 0.2 * (ideal - nadir)
ref_point = infer_reference_point(pareto_Y=Y, scale=0.2)
self.assertAllClose(ref_point, expected_ref_point)
ref_point = infer_reference_point(pareto_Y=Y)
expected_ref_point = nadir - 0.1 * (ideal - nadir)
self.assertAllClose(ref_point, expected_ref_point)
# Test all NaN max_ref_point.
ref_point = infer_reference_point(
pareto_Y=Y,
max_ref_point=torch.tensor([float("nan"), float("nan")], **tkwargs),
)
self.assertAllClose(ref_point, expected_ref_point)
# Test partial NaN, partial worse than nadir.
expected_ref_point = nadir.clone()
expected_ref_point[1] = -1e5
ref_point = infer_reference_point(
pareto_Y=Y,
max_ref_point=torch.tensor([float("nan"), -1e5], **tkwargs),
scale=0.0,
)
self.assertAllClose(ref_point, expected_ref_point)
# Test partial NaN, partial better than nadir.
expected_ref_point = nadir
ref_point = infer_reference_point(
pareto_Y=Y,
max_ref_point=torch.tensor([float("nan"), 1e5], **tkwargs),
scale=0.0,
)
self.assertAllClose(ref_point, expected_ref_point)
# Test partial NaN, partial worse than nadir with scale_max_ref_point.
expected_ref_point[1] = -1e5
expected_ref_point = expected_ref_point - 0.2 * (ideal - expected_ref_point)
ref_point = infer_reference_point(
pareto_Y=Y,
max_ref_point=torch.tensor([float("nan"), -1e5], **tkwargs),
scale=0.2,
scale_max_ref_point=True,
)
self.assertAllClose(ref_point, expected_ref_point)
# Test with single point in Pareto_Y, worse than ref point.
ref_point = infer_reference_point(
pareto_Y=Y[:1],
max_ref_point=torch.tensor([float("nan"), 1e5], **tkwargs),
)
expected_ref_point = Y[0] - 0.1 * Y[0].abs()
self.assertTrue(torch.equal(expected_ref_point, ref_point))
# Test with single point in Pareto_Y, better than ref point.
ref_point = infer_reference_point(
pareto_Y=Y[:1],
max_ref_point=torch.tensor([float("nan"), -1e5], **tkwargs),
scale_max_ref_point=True,
)
expected_ref_point[1] = -1e5 - 0.1 * Y[0, 1].abs()
self.assertTrue(torch.equal(expected_ref_point, ref_point))
# Empty pareto_Y with nan ref point.
with self.assertRaisesRegex(BotorchError, "ref point includes NaN"):
ref_point = infer_reference_point(
pareto_Y=Y[:0],
max_ref_point=torch.tensor([float("nan"), -1e5], **tkwargs),
)
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import product
from unittest.mock import patch
import torch
from botorch.utils.multi_objective.pareto import (
_is_non_dominated_loop,
is_non_dominated,
)
from botorch.utils.testing import BotorchTestCase
class TestPareto(BotorchTestCase):
def test_is_non_dominated(self) -> None:
tkwargs = {"device": self.device}
Y = torch.tensor(
[
[1.0, 5.0],
[10.0, 3.0],
[4.0, 5.0],
[4.0, 5.0],
[5.0, 5.0],
[8.5, 3.5],
[8.5, 3.5],
[8.5, 3.0],
[9.0, 1.0],
]
)
expected_nondom_Y = torch.tensor([[10.0, 3.0], [5.0, 5.0], [8.5, 3.5]])
Yb = Y.clone()
Yb[1] = 0
expected_nondom_Yb = torch.tensor([[5.0, 5.0], [8.5, 3.5], [9.0, 1.0]])
Y3 = torch.tensor(
[
[4.0, 2.0, 3.0],
[2.0, 4.0, 1.0],
[3.0, 5.0, 1.0],
[2.0, 4.0, 2.0],
[2.0, 4.0, 2.0],
[1.0, 3.0, 4.0],
[1.0, 2.0, 4.0],
[1.0, 2.0, 6.0],
]
)
Y3b = Y3.clone()
Y3b[0] = 0
expected_nondom_Y3 = torch.tensor(
[
[4.0, 2.0, 3.0],
[3.0, 5.0, 1.0],
[2.0, 4.0, 2.0],
[1.0, 3.0, 4.0],
[1.0, 2.0, 6.0],
]
)
expected_nondom_Y3b = expected_nondom_Y3[1:]
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
Y = Y.to(**tkwargs)
expected_nondom_Y = expected_nondom_Y.to(**tkwargs)
Yb = Yb.to(**tkwargs)
expected_nondom_Yb = expected_nondom_Yb.to(**tkwargs)
Y3 = Y3.to(**tkwargs)
expected_nondom_Y3 = expected_nondom_Y3.to(**tkwargs)
Y3b = Y3b.to(**tkwargs)
expected_nondom_Y3b = expected_nondom_Y3b.to(**tkwargs)
# test 2d
nondom_Y = Y[is_non_dominated(Y)]
self.assertTrue(torch.equal(expected_nondom_Y, nondom_Y))
# test deduplicate=False
expected_nondom_Y_no_dedup = torch.cat(
[expected_nondom_Y, expected_nondom_Y[-1:]], dim=0
)
nondom_Y = Y[is_non_dominated(Y, deduplicate=False)]
self.assertTrue(torch.equal(expected_nondom_Y_no_dedup, nondom_Y))
# test batch
batch_Y = torch.stack([Y, Yb], dim=0)
nondom_mask = is_non_dominated(batch_Y)
self.assertTrue(torch.equal(batch_Y[0][nondom_mask[0]], expected_nondom_Y))
self.assertTrue(torch.equal(batch_Y[1][nondom_mask[1]], expected_nondom_Yb))
# test deduplicate=False
expected_nondom_Yb_no_dedup = torch.cat(
[expected_nondom_Yb[:-1], expected_nondom_Yb[-2:]], dim=0
)
nondom_mask = is_non_dominated(batch_Y, deduplicate=False)
self.assertTrue(
torch.equal(batch_Y[0][nondom_mask[0]], expected_nondom_Y_no_dedup)
)
self.assertTrue(
torch.equal(batch_Y[1][nondom_mask[1]], expected_nondom_Yb_no_dedup)
)
# test 3d
nondom_Y3 = Y3[is_non_dominated(Y3)]
self.assertTrue(torch.equal(expected_nondom_Y3, nondom_Y3))
# test deduplicate=False
expected_nondom_Y3_no_dedup = torch.cat(
[expected_nondom_Y3[:3], expected_nondom_Y3[2:]], dim=0
)
nondom_Y3 = Y3[is_non_dominated(Y3, deduplicate=False)]
self.assertTrue(torch.equal(expected_nondom_Y3_no_dedup, nondom_Y3))
# test batch
batch_Y3 = torch.stack([Y3, Y3b], dim=0)
nondom_mask3 = is_non_dominated(batch_Y3)
self.assertTrue(
torch.equal(batch_Y3[0][nondom_mask3[0]], expected_nondom_Y3)
)
self.assertTrue(
torch.equal(batch_Y3[1][nondom_mask3[1]], expected_nondom_Y3b)
)
# test deduplicate=False
nondom_mask3 = is_non_dominated(batch_Y3, deduplicate=False)
self.assertTrue(
torch.equal(batch_Y3[0][nondom_mask3[0]], expected_nondom_Y3_no_dedup)
)
expected_nondom_Y3b_no_dedup = torch.cat(
[expected_nondom_Y3b[:2], expected_nondom_Y3b[1:]], dim=0
)
self.assertTrue(
torch.equal(batch_Y3[1][nondom_mask3[1]], expected_nondom_Y3b_no_dedup)
)
# test empty pareto
mask = is_non_dominated(Y3[:0])
expected_mask = torch.zeros(0, dtype=torch.bool, device=Y3.device)
self.assertTrue(torch.equal(expected_mask, mask))
mask = is_non_dominated(batch_Y3[:, :0])
expected_mask = torch.zeros(
*batch_Y3.shape[:-2], 0, dtype=torch.bool, device=Y3.device
)
self.assertTrue(torch.equal(expected_mask, mask))
with patch(
"botorch.utils.multi_objective.pareto._is_non_dominated_loop"
) as mock_is_non_dominated_loop:
y = torch.rand(1001, 2, dtype=dtype, device=Y3.device)
is_non_dominated(y)
mock_is_non_dominated_loop.assert_called_once()
cargs = mock_is_non_dominated_loop.call_args[0]
self.assertTrue(torch.equal(cargs[0], y))
def test_is_non_dominated_loop(self):
n = 20
tkwargs = {"device": self.device}
for dtype, batch_shape, m, maximize in product(
(torch.float, torch.double),
(torch.Size([]), torch.Size([2])),
(1, 2, 3),
(True, False),
):
tkwargs["dtype"] = dtype
Y = torch.rand(batch_shape + torch.Size([n, m]), **tkwargs)
pareto_mask = _is_non_dominated_loop(
# this is so that we can assume maximization in the test
# code
Y=Y if maximize else -Y,
maximize=maximize,
)
self.assertEqual(pareto_mask.shape, Y.shape[:-1])
self.assertEqual(pareto_mask.dtype, torch.bool)
self.assertEqual(pareto_mask.device.type, self.device.type)
if len(batch_shape) > 0:
pareto_masks = [pareto_mask[i] for i in range(pareto_mask.shape[0])]
else:
pareto_masks = [pareto_mask]
Y = Y.unsqueeze(0)
for i, mask in enumerate(pareto_masks):
pareto_Y = Y[i][mask]
pareto_indices = mask.nonzero().view(-1)
if pareto_Y.shape[0] > 1:
# compare against other pareto points
point_mask = torch.zeros(
pareto_Y.shape[0], dtype=torch.bool, device=self.device
)
Y_not_j_mask = torch.ones(
Y[i].shape[0], dtype=torch.bool, device=self.device
)
for j in range(pareto_Y.shape[0]):
point_mask[j] = True
# check each pareto point is non-dominated
Y_idx = pareto_indices[j].item()
Y_not_j_mask[Y_idx] = False
self.assertFalse(
(pareto_Y[point_mask] <= Y[i][Y_not_j_mask])
.all(dim=-1)
.any()
)
Y_not_j_mask[Y_idx] = True
if pareto_Y.shape[0] > 1:
# check that each point is better than
# pareto_Y[j] in some objective
j_better_than_Y = (
pareto_Y[point_mask] > pareto_Y[~point_mask]
)
best_obj_mask = torch.zeros(
m, dtype=torch.bool, device=self.device
)
for k in range(m):
best_obj_mask[k] = True
j_k_better_than_Y = j_better_than_Y[:, k]
if j_k_better_than_Y.any():
self.assertTrue(
(
pareto_Y[point_mask, ~best_obj_mask]
< pareto_Y[~point_mask][j_k_better_than_Y][
:, ~best_obj_mask
]
)
.any(dim=-1)
.all()
)
best_obj_mask[k] = False
point_mask[j] = False
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase
class TestDominatedPartitioning(BotorchTestCase):
def test_dominated_partitioning(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
ref_point = torch.zeros(2, **tkwargs)
partitioning = DominatedPartitioning(ref_point=ref_point)
# assert error is raised if pareto_Y has not been computed
with self.assertRaises(BotorchError):
partitioning.pareto_Y
partitioning = DominatedPartitioning(ref_point=ref_point)
# test _reset_pareto_Y
Y = torch.ones(1, 2, **tkwargs)
partitioning.update(Y=Y)
partitioning._neg_Y = -Y
partitioning.batch_shape = torch.Size([])
self.assertFalse(partitioning._reset_pareto_Y())
# test m=2
arange = torch.arange(3, 9, **tkwargs)
pareto_Y = torch.stack([arange, 11 - arange], dim=-1)
Y = torch.cat(
[
pareto_Y,
torch.tensor(
[[8.0, 2.0], [7.0, 1.0]], **tkwargs
), # add some non-pareto elements
],
dim=0,
)
partitioning = DominatedPartitioning(ref_point=ref_point, Y=Y)
sorting = torch.argsort(pareto_Y[:, 0], descending=True)
self.assertTrue(torch.equal(pareto_Y[sorting], partitioning.pareto_Y))
expected_cell_bounds = torch.tensor(
[
[
[0.0, 0.0],
[3.0, 0.0],
[4.0, 0.0],
[5.0, 0.0],
[6.0, 0.0],
[7.0, 0.0],
],
[
[3.0, 8.0],
[4.0, 7.0],
[5.0, 6.0],
[6.0, 5.0],
[7.0, 4.0],
[8.0, 3.0],
],
],
**tkwargs,
)
cell_bounds = partitioning.get_hypercell_bounds()
self.assertTrue(torch.equal(cell_bounds, expected_cell_bounds))
# test compute hypervolume
hv = partitioning.compute_hypervolume()
self.assertEqual(hv.item(), 49.0)
# test no pareto points better than the reference point
partitioning = DominatedPartitioning(
ref_point=pareto_Y.max(dim=-2).values + 1, Y=Y
)
self.assertTrue(torch.equal(partitioning.pareto_Y, Y[:0]))
self.assertEqual(partitioning.compute_hypervolume().item(), 0)
Y = torch.rand(3, 10, 2, **tkwargs)
# test batched m=2
partitioning = DominatedPartitioning(ref_point=ref_point, Y=Y)
cell_bounds = partitioning.get_hypercell_bounds()
partitionings = []
for i in range(Y.shape[0]):
partitioning_i = DominatedPartitioning(ref_point=ref_point, Y=Y[i])
partitionings.append(partitioning_i)
# check pareto_Y
pareto_set1 = {tuple(x) for x in partitioning_i.pareto_Y.tolist()}
pareto_set2 = {tuple(x) for x in partitioning.pareto_Y[i].tolist()}
self.assertEqual(pareto_set1, pareto_set2)
expected_cell_bounds_i = partitioning_i.get_hypercell_bounds()
# remove padding
no_padding_cell_bounds_i = cell_bounds[:, i][
:, ((cell_bounds[1, i] - cell_bounds[0, i]) != 0).all(dim=-1)
]
self.assertTrue(
torch.equal(expected_cell_bounds_i, no_padding_cell_bounds_i)
)
# test batch ref point
partitioning = DominatedPartitioning(
ref_point=ref_point.unsqueeze(0).expand(3, *ref_point.shape), Y=Y
)
cell_bounds2 = partitioning.get_hypercell_bounds()
self.assertTrue(torch.equal(cell_bounds, cell_bounds2))
# test batched where batches have different numbers of pareto points
partitioning = DominatedPartitioning(
ref_point=pareto_Y.max(dim=-2).values,
Y=torch.stack(
[pareto_Y, pareto_Y + pareto_Y.max(dim=-2).values], dim=0
),
)
hv = partitioning.compute_hypervolume()
self.assertEqual(hv[0].item(), 0.0)
self.assertEqual(hv[1].item(), 49.0)
cell_bounds = partitioning.get_hypercell_bounds()
self.assertEqual(cell_bounds.shape, torch.Size([2, 2, 6, 2]))
# test batched m>2
ref_point = torch.zeros(3, **tkwargs)
with self.assertRaises(NotImplementedError):
DominatedPartitioning(
ref_point=ref_point, Y=torch.cat([Y, Y[..., :1]], dim=-1)
)
# test m=3
pareto_Y = torch.tensor(
[[1.0, 6.0, 8.0], [2.0, 4.0, 10.0], [3.0, 5.0, 7.0]], **tkwargs
)
ref_point = torch.tensor([-1.0, -2.0, -3.0], **tkwargs)
partitioning = DominatedPartitioning(ref_point=ref_point, Y=pareto_Y)
self.assertTrue(torch.equal(pareto_Y, partitioning.pareto_Y))
# test compute hypervolume
hv = partitioning.compute_hypervolume()
self.assertEqual(hv.item(), 358.0)
# test no pareto points better than the reference point, non-batched
partitioning = DominatedPartitioning(
ref_point=pareto_Y.max(dim=-2).values + 1, Y=pareto_Y
)
self.assertTrue(torch.equal(partitioning.pareto_Y, pareto_Y[:0]))
self.assertEqual(
partitioning.get_hypercell_bounds().shape,
torch.Size([2, 1, pareto_Y.shape[-1]]),
)
self.assertEqual(partitioning.compute_hypervolume().item(), 0)
# Test that updating the partitioning does not lead to a buffer error.
partitioning = DominatedPartitioning(
ref_point=torch.zeros(3), Y=-torch.ones(1, 3)
)
self.assertTrue(
torch.equal(partitioning.hypercell_bounds, torch.zeros(2, 1, 3))
)
partitioning.update(Y=torch.ones(1, 3))
self.assertEqual(partitioning.compute_hypervolume().item(), 1)
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.utils.multi_objective.box_decompositions.utils import (
_expand_ref_point,
_pad_batch_pareto_frontier,
compute_dominated_hypercell_bounds_2d,
compute_local_upper_bounds,
compute_non_dominated_hypercell_bounds_2d,
get_partition_bounds,
update_local_upper_bounds_incremental,
)
from botorch.utils.testing import BotorchTestCase
class TestUtils(BotorchTestCase):
def test_expand_ref_point(self):
ref_point = torch.tensor([1.0, 2.0], device=self.device)
for dtype in (torch.float, torch.double):
ref_point = ref_point.to(dtype=dtype)
# test non-batch
self.assertTrue(
torch.equal(
_expand_ref_point(ref_point, batch_shape=torch.Size([])),
ref_point,
)
)
self.assertTrue(
torch.equal(
_expand_ref_point(ref_point, batch_shape=torch.Size([3])),
ref_point.unsqueeze(0).expand(3, -1),
)
)
# test ref point with wrong shape batch_shape
with self.assertRaises(BotorchTensorDimensionError):
_expand_ref_point(ref_point.unsqueeze(0), batch_shape=torch.Size([]))
with self.assertRaises(BotorchTensorDimensionError):
_expand_ref_point(ref_point.unsqueeze(0).expand(3, -1), torch.Size([2]))
def test_pad_batch_pareto_frontier(self):
for dtype in (torch.float, torch.double):
Y1 = torch.tensor(
[
[1.0, 5.0],
[10.0, 3.0],
[4.0, 5.0],
[4.0, 5.0],
[5.0, 5.0],
[8.5, 3.5],
[8.5, 3.5],
[8.5, 3.0],
[9.0, 1.0],
[8.0, 1.0],
],
dtype=dtype,
device=self.device,
)
Y2 = torch.tensor(
[
[1.0, 9.0],
[10.0, 3.0],
[4.0, 5.0],
[4.0, 5.0],
[5.0, 5.0],
[8.5, 3.5],
[8.5, 3.5],
[8.5, 3.0],
[9.0, 5.0],
[9.0, 4.0],
],
dtype=dtype,
device=self.device,
)
Y = torch.stack([Y1, Y2], dim=0)
ref_point = torch.full((2, 2), 2.0, dtype=dtype, device=self.device)
padded_pareto = _pad_batch_pareto_frontier(
Y=Y, ref_point=ref_point, is_pareto=False
)
expected_nondom_Y1 = torch.tensor(
[[10.0, 3.0], [5.0, 5.0], [8.5, 3.5]],
dtype=dtype,
device=self.device,
)
expected_padded_nondom_Y2 = torch.tensor(
[
[10.0, 3.0],
[9.0, 5.0],
[9.0, 5.0],
],
dtype=dtype,
device=self.device,
)
expected_padded_pareto = torch.stack(
[expected_nondom_Y1, expected_padded_nondom_Y2], dim=0
)
self.assertTrue(torch.equal(padded_pareto, expected_padded_pareto))
# test feasibility mask
feas = (Y >= 9.0).any(dim=-1)
expected_nondom_Y1 = torch.tensor(
[[10.0, 3.0], [10.0, 3.0]],
dtype=dtype,
device=self.device,
)
expected_padded_nondom_Y2 = torch.tensor(
[[10.0, 3.0], [9.0, 5.0]],
dtype=dtype,
device=self.device,
)
expected_padded_pareto = torch.stack(
[expected_nondom_Y1, expected_padded_nondom_Y2], dim=0
)
padded_pareto = _pad_batch_pareto_frontier(
Y=Y, ref_point=ref_point, feasibility_mask=feas, is_pareto=False
)
self.assertTrue(torch.equal(padded_pareto, expected_padded_pareto))
# test is_pareto=True
# one row of Y2 should be dropped because it is not better than the
# reference point
Y1 = torch.tensor(
[[10.0, 3.0], [5.0, 5.0], [8.5, 3.5]],
dtype=dtype,
device=self.device,
)
Y2 = torch.tensor(
[
[1.0, 9.0],
[10.0, 3.0],
[9.0, 5.0],
],
dtype=dtype,
device=self.device,
)
Y = torch.stack([Y1, Y2], dim=0)
expected_padded_pareto = torch.stack(
[
Y1,
torch.cat([Y2[1:], Y2[-1:]], dim=0),
],
dim=0,
)
padded_pareto = _pad_batch_pareto_frontier(
Y=Y, ref_point=ref_point, is_pareto=True
)
self.assertTrue(torch.equal(padded_pareto, expected_padded_pareto))
# test multiple batch dims
with self.assertRaises(UnsupportedError):
_pad_batch_pareto_frontier(
Y=Y.unsqueeze(0), ref_point=ref_point, is_pareto=False
)
def test_compute_hypercell_bounds_2d(self):
ref_point_raw = torch.zeros(2, device=self.device)
arange = torch.arange(3, 9, device=self.device)
pareto_Y_raw = torch.stack([arange, 11 - arange], dim=-1)
inf = float("inf")
for method in (
compute_non_dominated_hypercell_bounds_2d,
compute_dominated_hypercell_bounds_2d,
):
if method == compute_non_dominated_hypercell_bounds_2d:
expected_cell_bounds_raw = torch.tensor(
[
[
[0.0, 8.0],
[3.0, 7.0],
[4.0, 6.0],
[5.0, 5.0],
[6.0, 4.0],
[7.0, 3.0],
[8.0, 0.0],
],
[
[3.0, inf],
[4.0, inf],
[5.0, inf],
[6.0, inf],
[7.0, inf],
[8.0, inf],
[inf, inf],
],
],
device=self.device,
)
else:
expected_cell_bounds_raw = torch.tensor(
[
[
[0.0, 0.0],
[3.0, 0.0],
[4.0, 0.0],
[5.0, 0.0],
[6.0, 0.0],
[7.0, 0.0],
],
[
[3.0, 8.0],
[4.0, 7.0],
[5.0, 6.0],
[6.0, 5.0],
[7.0, 4.0],
[8.0, 3.0],
],
],
device=self.device,
)
for dtype in (torch.float, torch.double):
pareto_Y = pareto_Y_raw.to(dtype=dtype)
ref_point = ref_point_raw.to(dtype=dtype)
expected_cell_bounds = expected_cell_bounds_raw.to(dtype=dtype)
# test non-batch
cell_bounds = method(
pareto_Y_sorted=pareto_Y,
ref_point=ref_point,
)
self.assertTrue(torch.equal(cell_bounds, expected_cell_bounds))
# test batch
pareto_Y_batch = torch.stack(
[pareto_Y, pareto_Y + pareto_Y.max(dim=-2).values], dim=0
)
# filter out points that are not better than ref_point
ref_point = pareto_Y.max(dim=-2).values
pareto_Y_batch = _pad_batch_pareto_frontier(
Y=pareto_Y_batch, ref_point=ref_point, is_pareto=True
)
# sort pareto_Y_batch
pareto_Y_batch = pareto_Y_batch.gather(
index=torch.argsort(pareto_Y_batch[..., :1], dim=-2).expand(
pareto_Y_batch.shape
),
dim=-2,
)
cell_bounds = method(
ref_point=ref_point,
pareto_Y_sorted=pareto_Y_batch,
)
# check hypervolume
max_vals = (pareto_Y + pareto_Y).max(dim=-2).values
if method == compute_non_dominated_hypercell_bounds_2d:
clamped_cell_bounds = torch.min(cell_bounds, max_vals)
total_hv = (max_vals - ref_point).prod()
nondom_hv = (
(clamped_cell_bounds[1] - clamped_cell_bounds[0])
.prod(dim=-1)
.sum(dim=-1)
)
hv = total_hv - nondom_hv
else:
hv = (cell_bounds[1] - cell_bounds[0]).prod(dim=-1).sum(dim=-1)
self.assertEqual(hv[0].item(), 0.0)
self.assertEqual(hv[1].item(), 49.0)
class TestFastPartitioningUtils(BotorchTestCase):
"""
Test on the problem (with the simplying assumption on general position)
from Table 1 in:
https://www.sciencedirect.com/science/article/pii/S0305054816301538
"""
def setUp(self):
super().setUp()
self.ref_point = -torch.tensor([10.0, 10.0, 10.0], device=self.device)
self.U = -self.ref_point.clone().view(1, -1)
self.Z = torch.empty(1, 3, 3, device=self.device)
ideal_value = 0.0
for j in range(self.U.shape[-1]):
self.Z[0, j] = torch.full(
(1, self.U.shape[-1]),
ideal_value,
dtype=self.Z.dtype,
device=self.device,
)
self.Z[0, j, j] = self.U[0][j]
self.pareto_Y = -torch.tensor(
[
[3.0, 5.0, 7.0],
[6.0, 2.0, 4.0],
[4.0, 7.0, 3.0],
],
device=self.device,
)
self.expected_U_after_update = torch.tensor(
[
[3.0, 10.0, 10.0],
[6.0, 5.0, 10.0],
[10.0, 2.0, 10.0],
[4.0, 10.0, 7.0],
[6.0, 7.0, 7.0],
[10.0, 7.0, 4.0],
[10.0, 10.0, 3.0],
],
device=self.device,
)
self.expected_Z_after_update = torch.tensor(
[
[[3.0, 5.0, 7.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]],
[[6.0, 2.0, 4.0], [3.0, 5.0, 7.0], [0.0, 0.0, 10.0]],
[[10.0, 0.0, 0.0], [6.0, 2.0, 4.0], [0.0, 0.0, 10.0]],
[[4.0, 7.0, 3.0], [0.0, 10.0, 0.0], [3.0, 5.0, 7.0]],
[[6.0, 2.0, 4.0], [4.0, 7.0, 3.0], [3.0, 5.0, 7.0]],
[[10.0, 0.0, 0.0], [4.0, 7.0, 3.0], [6.0, 2.0, 4.0]],
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [4.0, 7.0, 3.0]],
],
device=self.device,
)
def test_local_upper_bounds_utils(self):
for dtype in (torch.float, torch.double):
U = self.U.to(dtype=dtype)
Z = self.Z.to(dtype=dtype)
pareto_Y = self.pareto_Y.to(dtype=dtype)
expected_U = self.expected_U_after_update.to(dtype=dtype)
expected_Z = self.expected_Z_after_update.to(dtype=dtype)
# test z dominates U
U_new, Z_new = compute_local_upper_bounds(U=U, Z=Z, z=-self.ref_point + 1)
self.assertTrue(torch.equal(U_new, U))
self.assertTrue(torch.equal(Z_new, Z))
# test compute_local_upper_bounds
for i in range(pareto_Y.shape[0]):
U, Z = compute_local_upper_bounds(U=U, Z=Z, z=-pareto_Y[i])
self.assertTrue(torch.equal(U, expected_U))
self.assertTrue(torch.equal(Z, expected_Z))
# test update_local_upper_bounds_incremental
# test that calling update_local_upper_bounds_incremental once with
# the entire Pareto set yields the same result
U2, Z2 = update_local_upper_bounds_incremental(
new_pareto_Y=-pareto_Y,
U=self.U.to(dtype=dtype),
Z=self.Z.to(dtype=dtype),
)
self.assertTrue(torch.equal(U2, expected_U))
self.assertTrue(torch.equal(Z2, expected_Z))
def test_get_partition_bounds(self):
expected_bounds_raw = torch.tensor(
[
[[3.0, 5.0, 7.0], [6.0, 2.0, 7.0], [4.0, 7.0, 3.0], [6.0, 2.0, 4.0]],
[
[10.0, 10.0, 10.0],
[10.0, 5.0, 10.0],
[10.0, 10.0, 7.0],
[10.0, 7.0, 7.0],
],
],
device=self.device,
)
for dtype in (torch.float, torch.double):
final_U = self.expected_U_after_update.to(dtype=dtype)
final_Z = self.expected_Z_after_update.to(dtype=dtype)
bounds = get_partition_bounds(
Z=final_Z, U=final_U, ref_point=-self.ref_point
)
expected_bounds = expected_bounds_raw.to(dtype=dtype)
self.assertTrue(torch.equal(bounds, expected_bounds))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import product
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.utils.multi_objective.box_decompositions.box_decomposition_list import (
BoxDecompositionList,
)
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase
class TestBoxDecompositionList(BotorchTestCase):
def test_box_decomposition_list(self):
ref_point_raw = torch.zeros(3, device=self.device)
pareto_Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
for m, dtype in product((2, 3), (torch.float, torch.double)):
ref_point = ref_point_raw[:m].to(dtype=dtype)
pareto_Y = pareto_Y_raw[:, :m].to(dtype=dtype)
pareto_Y_list = [pareto_Y[:0, :m], pareto_Y[:, :m]]
bds = [
FastNondominatedPartitioning(ref_point=ref_point, Y=Y)
for Y in pareto_Y_list
]
bd = BoxDecompositionList(*bds)
# test pareto Y
bd_pareto_Y_list = bd.pareto_Y
pareto_Y1 = pareto_Y_list[1]
expected_pareto_Y1 = (
pareto_Y1[torch.argsort(-pareto_Y1[:, 0])] if m == 2 else pareto_Y1
)
self.assertTrue(torch.equal(bd_pareto_Y_list[0], pareto_Y_list[0]))
self.assertTrue(torch.equal(bd_pareto_Y_list[1], expected_pareto_Y1))
# test ref_point
self.assertTrue(
torch.equal(bd.ref_point, ref_point.unsqueeze(0).expand(2, -1))
)
# test get_hypercell_bounds
cell_bounds = bd.get_hypercell_bounds()
expected_cell_bounds1 = bds[1].get_hypercell_bounds()
self.assertTrue(torch.equal(cell_bounds[:, 1], expected_cell_bounds1))
# the first pareto set in the list is empty so the cell bounds
# should contain one cell that spans the entire area (bounded by the
# ref_point) and then empty cells, bounded from above and below by the
# ref point.
expected_cell_bounds0 = torch.zeros_like(expected_cell_bounds1)
# set the upper bound for the first cell to be inf
expected_cell_bounds0[1, 0, :] = float("inf")
self.assertTrue(torch.equal(cell_bounds[:, 0], expected_cell_bounds0))
# test compute_hypervolume
expected_hv = torch.stack([b.compute_hypervolume() for b in bds], dim=0)
hv = bd.compute_hypervolume()
self.assertTrue(torch.equal(expected_hv, hv))
# test update with batched tensor
new_Y = torch.empty(2, 1, m, dtype=dtype, device=self.device)
new_Y[0] = 1
new_Y[1] = 3
bd.update(new_Y)
bd_pareto_Y_list = bd.pareto_Y
self.assertTrue(torch.equal(bd_pareto_Y_list[0], new_Y[0]))
self.assertTrue(torch.equal(bd_pareto_Y_list[1], new_Y[1]))
# test update with list
bd = BoxDecompositionList(*bds)
bd.update([new_Y[0], new_Y[1]])
bd_pareto_Y_list = bd.pareto_Y
self.assertTrue(torch.equal(bd_pareto_Y_list[0], new_Y[0]))
self.assertTrue(torch.equal(bd_pareto_Y_list[1], new_Y[1]))
# test update with wrong shape
bd = BoxDecompositionList(*bds)
with self.assertRaises(BotorchTensorDimensionError):
bd.update(new_Y.unsqueeze(0))
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import product
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase
class TestNonDominatedPartitioning(BotorchTestCase):
def test_non_dominated_partitioning(self):
tkwargs = {"device": self.device}
for dtype, partitioning_class in product(
(torch.float, torch.double),
(NondominatedPartitioning, FastNondominatedPartitioning),
):
tkwargs["dtype"] = dtype
ref_point = torch.zeros(2, **tkwargs)
partitioning = partitioning_class(ref_point=ref_point)
# assert error is raised if pareto_Y has not been computed
with self.assertRaises(BotorchError):
partitioning.pareto_Y
partitioning = partitioning_class(ref_point=ref_point)
# test _reset_pareto_Y
Y = torch.ones(1, 2, **tkwargs)
partitioning.update(Y=Y)
partitioning._neg_Y = -Y
partitioning.batch_shape = torch.Size([])
self.assertFalse(partitioning._reset_pareto_Y())
# test m=2
arange = torch.arange(3, 9, **tkwargs)
pareto_Y = torch.stack([arange, 11 - arange], dim=-1)
Y = torch.cat(
[
pareto_Y,
torch.tensor(
[[8.0, 2.0], [7.0, 1.0]], **tkwargs
), # add some non-pareto elements
],
dim=0,
)
partitioning = partitioning_class(ref_point=ref_point, Y=Y)
sorting = torch.argsort(pareto_Y[:, 0], descending=True)
self.assertTrue(torch.equal(pareto_Y[sorting], partitioning.pareto_Y))
inf = float("inf")
expected_cell_bounds = torch.tensor(
[
[
[8.0, 0.0],
[7.0, 3.0],
[6.0, 4.0],
[5.0, 5.0],
[4.0, 6.0],
[3.0, 7.0],
[0.0, 8.0],
],
[
[inf, inf],
[8.0, inf],
[7.0, inf],
[6.0, inf],
[5.0, inf],
[4.0, inf],
[3.0, inf],
],
],
**tkwargs,
)
cell_bounds = partitioning.get_hypercell_bounds()
num_matches = (
(cell_bounds.unsqueeze(0) == expected_cell_bounds.unsqueeze(1))
.all(dim=-1)
.any(dim=0)
.sum()
)
self.assertTrue(num_matches, 7)
# test compute hypervolume
hv = partitioning.compute_hypervolume()
self.assertEqual(hv.item(), 49.0)
# test no pareto points better than the reference point
partitioning = partitioning_class(
ref_point=pareto_Y.max(dim=-2).values + 1, Y=Y
)
self.assertTrue(torch.equal(partitioning.pareto_Y, Y[:0]))
self.assertEqual(partitioning.compute_hypervolume().item(), 0)
Y = torch.rand(3, 10, 2, **tkwargs)
if partitioning_class == NondominatedPartitioning:
# test batched m=2, no pareto points better than the reference point
partitioning = partitioning_class(
ref_point=Y.max(dim=-2).values + 1, Y=Y
)
self.assertTrue(torch.equal(partitioning.pareto_Y, Y[:, :0]))
self.assertTrue(
torch.equal(
partitioning.compute_hypervolume(),
torch.zeros(3, dtype=Y.dtype, device=Y.device),
)
)
# test batched, m=2 basic
partitioning = partitioning_class(ref_point=ref_point, Y=Y)
cell_bounds = partitioning.get_hypercell_bounds()
partitionings = []
for i in range(Y.shape[0]):
partitioning_i = partitioning_class(ref_point=ref_point, Y=Y[i])
partitionings.append(partitioning_i)
# check pareto_Y
pareto_set1 = {tuple(x) for x in partitioning_i.pareto_Y.tolist()}
pareto_set2 = {tuple(x) for x in partitioning.pareto_Y[i].tolist()}
self.assertEqual(pareto_set1, pareto_set2)
expected_cell_bounds_i = partitioning_i.get_hypercell_bounds()
# remove padding
no_padding_cell_bounds_i = cell_bounds[:, i][
:, ((cell_bounds[1, i] - cell_bounds[0, i]) != 0).all(dim=-1)
]
self.assertTrue(
torch.equal(expected_cell_bounds_i, no_padding_cell_bounds_i)
)
# test batch ref point
partitioning = NondominatedPartitioning(
ref_point=ref_point.unsqueeze(0).expand(3, *ref_point.shape), Y=Y
)
cell_bounds2 = partitioning.get_hypercell_bounds()
self.assertTrue(torch.equal(cell_bounds, cell_bounds2))
# test improper Y shape (too many batch dims)
with self.assertRaises(NotImplementedError):
NondominatedPartitioning(ref_point=ref_point, Y=Y.unsqueeze(0))
# test batched compute_hypervolume, m=2
hvs = partitioning.compute_hypervolume()
hvs_non_batch = torch.stack(
[
partitioning_i.compute_hypervolume()
for partitioning_i in partitionings
],
dim=0,
)
self.assertAllClose(hvs, hvs_non_batch)
# test batched m>2
ref_point = torch.zeros(3, **tkwargs)
with self.assertRaises(NotImplementedError):
partitioning_class(
ref_point=ref_point, Y=torch.cat([Y, Y[..., :1]], dim=-1)
)
# test batched, where some batches are have pareto points and
# some batches have empty pareto sets
partitioning = partitioning_class(
ref_point=pareto_Y.max(dim=-2).values,
Y=torch.stack(
[pareto_Y, pareto_Y + pareto_Y.max(dim=-2).values], dim=0
),
)
hv = partitioning.compute_hypervolume()
self.assertEqual(hv[0].item(), 0.0)
self.assertEqual(hv[1].item(), 49.0)
cell_bounds = partitioning.get_hypercell_bounds()
self.assertEqual(cell_bounds.shape, torch.Size([2, 2, 7, 2]))
# test m=3
pareto_Y = torch.tensor(
[[1.0, 6.0, 8.0], [2.0, 4.0, 10.0], [3.0, 5.0, 7.0]], **tkwargs
)
ref_point = torch.tensor([-1.0, -2.0, -3.0], **tkwargs)
partitioning = partitioning_class(ref_point=ref_point, Y=pareto_Y)
if partitioning_class == NondominatedPartitioning:
sorting = torch.argsort(pareto_Y[:, 0], descending=True)
self.assertTrue(torch.equal(pareto_Y[sorting], partitioning.pareto_Y))
else:
self.assertTrue(torch.equal(pareto_Y, partitioning.pareto_Y))
cell_bounds = partitioning.get_hypercell_bounds()
if partitioning_class == NondominatedPartitioning:
expected_cell_bounds = torch.tensor(
[
[
[1.0, 4.0, 7.0],
[-1.0, -2.0, 10.0],
[-1.0, 4.0, 8.0],
[1.0, -2.0, 10.0],
[1.0, 4.0, 8.0],
[-1.0, 6.0, -3.0],
[1.0, 5.0, -3.0],
[-1.0, 5.0, 8.0],
[2.0, -2.0, 7.0],
[2.0, 4.0, 7.0],
[3.0, -2.0, -3.0],
[2.0, -2.0, 8.0],
[2.0, 5.0, -3.0],
],
[
[2.0, 5.0, 8.0],
[1.0, 4.0, inf],
[1.0, 5.0, inf],
[2.0, 4.0, inf],
[2.0, 5.0, inf],
[1.0, inf, 8.0],
[2.0, inf, 8.0],
[2.0, inf, inf],
[3.0, 4.0, 8.0],
[3.0, 5.0, 8.0],
[inf, 5.0, 8.0],
[inf, 5.0, inf],
[inf, inf, inf],
],
],
**tkwargs,
)
# cell bounds can have different order
num_matches = (
(cell_bounds.unsqueeze(0) == expected_cell_bounds.unsqueeze(1))
.all(dim=-1)
.any(dim=0)
.sum()
)
self.assertTrue(num_matches, 9)
# test compute hypervolume
hv = partitioning.compute_hypervolume()
self.assertEqual(hv.item(), 358.0)
# test no pareto points better than the reference point, non-batched
partitioning = partitioning_class(
ref_point=pareto_Y.max(dim=-2).values + 1, Y=pareto_Y
)
self.assertTrue(torch.equal(partitioning.pareto_Y, pareto_Y[:0]))
self.assertEqual(
partitioning.get_hypercell_bounds().shape,
torch.Size([2, 1, pareto_Y.shape[-1]]),
)
self.assertEqual(partitioning.compute_hypervolume().item(), 0)
# TODO: test approximate decomposition
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import product
from unittest import mock
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.multi_objective.box_decompositions.box_decomposition import (
BoxDecomposition,
FastPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.utils import (
update_local_upper_bounds_incremental,
)
from botorch.utils.testing import BotorchTestCase
class DummyBoxDecomposition(BoxDecomposition):
def _partition_space(self):
pass
def _compute_hypervolume_if_y_has_data(self):
pass
def get_hypercell_bounds(self):
pass
class DummyFastPartitioning(FastPartitioning, DummyBoxDecomposition):
def _get_partitioning(self):
pass
def _get_single_cell(self):
pass
class TestBoxDecomposition(BotorchTestCase):
def setUp(self):
super().setUp()
self.ref_point_raw = torch.zeros(3, device=self.device)
self.Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[1.0, 1.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
self.pareto_Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
def test_box_decomposition(self) -> None:
with self.assertRaises(TypeError):
BoxDecomposition()
for dtype, m, sort in product(
(torch.float, torch.double), (2, 3), (True, False)
):
with mock.patch.object(
DummyBoxDecomposition,
"_partition_space_2d" if m == 2 else "_partition_space",
) as mock_partition_space:
ref_point = self.ref_point_raw[:m].to(dtype=dtype)
Y = self.Y_raw[:, :m].to(dtype=dtype)
pareto_Y = self.pareto_Y_raw[:, :m].to(dtype=dtype)
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort)
# test pareto_Y before it is initialized
with self.assertRaises(BotorchError):
bd.pareto_Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort, Y=Y)
mock_partition_space.assert_called_once()
# test attributes
expected_pareto_Y = (
pareto_Y[torch.argsort(-pareto_Y[:, 0])] if sort else pareto_Y
)
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, Y))
self.assertTrue(torch.equal(bd._neg_Y, -Y))
self.assertTrue(torch.equal(bd._neg_pareto_Y, -expected_pareto_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
self.assertTrue(torch.equal(bd._neg_ref_point, -ref_point))
self.assertEqual(bd.num_outcomes, m)
# test empty Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort, Y=Y[:0])
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y[:0]))
# test _update_neg_Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort)
bd._update_neg_Y(Y[:2])
self.assertTrue(torch.equal(bd._neg_Y, -Y[:2]))
bd._update_neg_Y(Y[2:])
self.assertTrue(torch.equal(bd._neg_Y, -Y))
# test batch mode
if m == 2:
batch_Y = torch.stack([Y, Y + 1], dim=0)
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y
)
batch_expected_pareto_Y = torch.stack(
[expected_pareto_Y, expected_pareto_Y + 1], dim=0
)
self.assertTrue(torch.equal(bd.pareto_Y, batch_expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, batch_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
# test batch ref point
batch_ref_point = torch.stack([ref_point, ref_point + 1], dim=0)
bd = DummyBoxDecomposition(
ref_point=batch_ref_point, sort=sort, Y=batch_Y
)
self.assertTrue(torch.equal(bd.ref_point, batch_ref_point))
# test multiple batch dims
with self.assertRaises(NotImplementedError):
DummyBoxDecomposition(
ref_point=ref_point,
sort=sort,
Y=batch_Y.unsqueeze(0),
)
# test empty Y
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y[:, :0]
)
self.assertTrue(
torch.equal(bd.pareto_Y, batch_expected_pareto_Y[:, :0])
)
# test padded pareto frontiers with different numbers of
# points
batch_Y[1, 1] = batch_Y[1, 0] - 1
batch_Y[1, 2] = batch_Y[1, 0] - 2
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y
)
batch_expected_pareto_Y = torch.stack(
[
expected_pareto_Y,
batch_Y[1, :1].expand(expected_pareto_Y.shape),
],
dim=0,
)
self.assertTrue(torch.equal(bd.pareto_Y, batch_expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, batch_Y))
else:
with self.assertRaises(NotImplementedError):
DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=Y.unsqueeze(0)
)
def test_fast_partitioning(self):
with self.assertRaises(TypeError):
FastPartitioning()
for dtype, m in product(
(torch.float, torch.double),
(2, 3),
):
ref_point = self.ref_point_raw[:m].to(dtype=dtype)
Y = self.Y_raw[:, :m].to(dtype=dtype)
pareto_Y = self.pareto_Y_raw[:, :m].to(dtype=dtype)
sort = m == 2
expected_pareto_Y = (
pareto_Y[torch.argsort(-pareto_Y[:, 0])] if sort else pareto_Y
)
bd = DummyFastPartitioning(ref_point=ref_point, Y=Y)
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, Y))
self.assertTrue(torch.equal(bd._neg_Y, -Y))
self.assertTrue(torch.equal(bd._neg_pareto_Y, -expected_pareto_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
self.assertTrue(torch.equal(bd._neg_ref_point, -ref_point))
self.assertEqual(bd.num_outcomes, m)
# test update
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"reset",
wraps=bd.reset,
) as mock_reset:
# with no existing neg_Y
bd.update(Y=Y[:2])
mock_reset.assert_called_once()
# test with existing Y
bd.update(Y=Y[2:])
# check that reset is only called when m=2
if m == 2:
mock_reset.assert_has_calls([mock.call(), mock.call()])
else:
mock_reset.assert_called_once()
# with existing neg_Y, and empty pareto_Y
bd = DummyFastPartitioning(ref_point=ref_point, Y=Y[:0])
with mock.patch.object(
DummyFastPartitioning,
"reset",
wraps=bd.reset,
) as mock_reset:
bd.update(Y=Y[0:])
mock_reset.assert_called_once()
# test empty pareto Y
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"_get_single_cell",
wraps=bd._get_single_cell,
) as mock_get_single_cell:
bd.update(Y=Y[:0])
mock_get_single_cell.assert_called_once()
# test batched empty pareto Y
if m == 2:
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"_get_single_cell",
wraps=bd._get_single_cell,
) as mock_get_single_cell:
bd.update(Y=Y.unsqueeze(0)[:, :0])
mock_get_single_cell.assert_called_once()
# test that update_local_upper_bounds_incremental is called when m>2
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch(
"botorch.utils.multi_objective.box_decompositions.box_decomposition."
"update_local_upper_bounds_incremental",
wraps=update_local_upper_bounds_incremental,
) as mock_update_local_upper_bounds_incremental, mock.patch.object(
DummyFastPartitioning,
"_get_partitioning",
wraps=bd._get_partitioning,
) as mock_get_partitioning, mock.patch.object(
DummyFastPartitioning,
"_partition_space_2d",
):
bd.update(Y=Y)
if m > 2:
mock_update_local_upper_bounds_incremental.assert_called_once()
# check that it is not called if the pareto set does not change
bd.update(Y=Y)
mock_update_local_upper_bounds_incremental.assert_called_once()
mock_get_partitioning.assert_called_once()
else:
self.assertEqual(
len(mock_update_local_upper_bounds_incremental.call_args_list),
0,
)
# test exception is raised for m=2, batched box decomposition using
# _partition_space
if m == 2:
with self.assertRaises(NotImplementedError):
DummyFastPartitioning(ref_point=ref_point, Y=Y.unsqueeze(0))
def test_nan_values(self) -> None:
Y = torch.rand(10, 2)
Y[8:, 1] = float("nan")
ref_pt = torch.rand(2)
# On init.
with self.assertRaisesRegex(ValueError, "with 2 NaN values"):
DummyBoxDecomposition(ref_point=ref_pt, sort=True, Y=Y)
# On update.
bd = DummyBoxDecomposition(ref_point=ref_pt, sort=True)
with self.assertRaisesRegex(ValueError, "with 2 NaN values"):
bd.update(Y=Y)
class TestBoxDecomposition_no_set_up(BotorchTestCase):
def helper_hypervolume(self, Box_Decomp_cls: type) -> None:
"""
This test should be run for each non-abstract subclass of `BoxDecomposition`.
"""
# batching
n_outcomes, batch_dim, n = 2, 3, 4
ref_point = torch.zeros(n_outcomes)
Y = torch.ones(batch_dim, n, n_outcomes)
box_decomp = Box_Decomp_cls(ref_point=ref_point, Y=Y)
hv = box_decomp.compute_hypervolume()
self.assertEqual(hv.shape, (batch_dim,))
self.assertAllClose(hv, torch.ones(batch_dim))
# no batching
Y = torch.ones(n, n_outcomes)
box_decomp = Box_Decomp_cls(ref_point=ref_point, Y=Y)
hv = box_decomp.compute_hypervolume()
self.assertEqual(hv.shape, ())
self.assertAllClose(hv, torch.tensor(1.0))
# cases where there is nothing in Y, either because n=0 or Y is None
n = 0
Y_and_expected_shape = [
(torch.ones(batch_dim, n, n_outcomes), (batch_dim,)),
(torch.ones(n, n_outcomes), ()),
(None, ()),
]
for Y, expected_shape in Y_and_expected_shape:
box_decomp = Box_Decomp_cls(ref_point=ref_point, Y=Y)
hv = box_decomp.compute_hypervolume()
self.assertEqual(hv.shape, expected_shape)
self.assertAllClose(hv, torch.zeros(expected_shape))
def test_hypervolume(self) -> None:
for cl in [
NondominatedPartitioning,
DominatedPartitioning,
FastNondominatedPartitioning,
]:
self.helper_hypervolume(cl)
def test_uninitialized_y(self) -> None:
ref_point = torch.zeros(2)
box_decomp = NondominatedPartitioning(ref_point=ref_point)
with self.assertRaises(BotorchError):
box_decomp.Y
with self.assertRaises(BotorchError):
box_decomp._compute_pareto_Y()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest.mock import patch
import torch
from botorch.acquisition.objective import PosteriorTransform
from botorch.exceptions.errors import InputDataError
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.model import Model, ModelDict, ModelList
from botorch.models.utils import parse_training_data
from botorch.posteriors.deterministic import DeterministicPosterior
from botorch.posteriors.posterior_list import PosteriorList
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class NotSoAbstractBaseModel(Model):
def posterior(self, X, output_indices, observation_noise, **kwargs):
pass
class GenericDeterministicModelWithBatchShape(GenericDeterministicModel):
# mocking torch.nn.Module components is kind of funky, so let's do this instead
@property
def batch_shape(self):
return self._batch_shape
class DummyPosteriorTransform(PosteriorTransform):
def evaluate(self, Y):
return 2 * Y + 1
def forward(self, posterior):
return PosteriorList(
*[DeterministicPosterior(2 * p.mean + 1) for p in posterior.posteriors]
)
class TestBaseModel(BotorchTestCase):
def test_abstract_base_model(self):
with self.assertRaises(TypeError):
Model()
def test_not_so_abstract_base_model(self):
model = NotSoAbstractBaseModel()
with self.assertRaises(NotImplementedError):
model.condition_on_observations(None, None)
with self.assertRaises(NotImplementedError):
model.num_outputs
with self.assertRaises(NotImplementedError):
model.batch_shape
with self.assertRaises(NotImplementedError):
model.subset_output([0])
def test_construct_inputs(self):
with patch.object(
parse_training_data, "parse_training_data", return_value={"a": 1}
):
model = NotSoAbstractBaseModel()
self.assertEqual(model.construct_inputs(None), {"a": 1})
def test_model_list(self):
tkwargs = {"device": self.device, "dtype": torch.double}
m1 = GenericDeterministicModel(lambda X: X[-1:], num_outputs=1)
m2 = GenericDeterministicModel(lambda X: X[-2:], num_outputs=2)
model = ModelList(m1, m2)
self.assertEqual(model.num_outputs, 3)
# test _get_group_subset_indices
gsi = model._get_group_subset_indices(idcs=None)
self.assertEqual(len(gsi), 2)
self.assertIsNone(gsi[0])
self.assertIsNone(gsi[1])
gsi = model._get_group_subset_indices(idcs=[0, 2])
self.assertEqual(len(gsi), 2)
self.assertEqual(gsi[0], [0])
self.assertEqual(gsi[1], [1])
# test subset_model
m_sub = model.subset_output(idcs=[0, 1])
self.assertIsInstance(m_sub, ModelList)
self.assertEqual(m_sub.num_outputs, 2)
m_sub = model.subset_output(idcs=[1, 2])
self.assertIsInstance(m_sub, GenericDeterministicModel)
self.assertEqual(m_sub.num_outputs, 2)
# test posterior
X = torch.rand(2, 2, **tkwargs)
p = model.posterior(X=X)
self.assertIsInstance(p, PosteriorList)
# test batch shape
m1 = GenericDeterministicModelWithBatchShape(lambda X: X[-1:], num_outputs=1)
m2 = GenericDeterministicModelWithBatchShape(lambda X: X[-2:], num_outputs=2)
model = ModelList(m1, m2)
m1._batch_shape = torch.Size([2])
m2._batch_shape = torch.Size([2])
self.assertEqual(model.batch_shape, torch.Size([2]))
m2._batch_shape = torch.Size([3])
with self.assertRaisesRegex(
NotImplementedError,
"is only supported if all constituent models have the same `batch_shape`",
):
model.batch_shape
def test_posterior_transform(self):
tkwargs = {"device": self.device, "dtype": torch.double}
m1 = GenericDeterministicModel(
lambda X: X.sum(dim=-1, keepdims=True), num_outputs=1
)
m2 = GenericDeterministicModel(
lambda X: X.prod(dim=-1, keepdims=True), num_outputs=1
)
model = ModelList(m1, m2)
X = torch.rand(5, 3, **tkwargs)
posterior_tf = model.posterior(X, posterior_transform=DummyPosteriorTransform())
self.assertTrue(
torch.allclose(
posterior_tf.mean, torch.cat((2 * m1(X) + 1, 2 * m2(X) + 1), dim=-1)
)
)
class TestModelDict(BotorchTestCase):
def test_model_dict(self):
models = {"m1": MockModel(MockPosterior()), "m2": MockModel(MockPosterior())}
model_dict = ModelDict(**models)
self.assertIs(model_dict["m1"], models["m1"])
self.assertIs(model_dict["m2"], models["m2"])
with self.assertRaisesRegex(
InputDataError, "Expected all models to be a BoTorch `Model`."
):
ModelDict(m=MockPosterior())
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.exceptions.errors import UnsupportedError
from botorch.models.deterministic import (
AffineDeterministicModel,
DeterministicModel,
FixedSingleSampleModel,
GenericDeterministicModel,
PosteriorMeanModel,
)
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.utils.testing import BotorchTestCase
class DummyDeterministicModel(DeterministicModel):
r"""A dummy deterministic model that uses transforms."""
def __init__(self, outcome_transform, input_transform):
r"""
Args:
outcome_transform: An outcome transform that is applied to the
training data during instantiation and to the posterior during
inference (that is, the `Posterior` obtained by calling
`.posterior` on the model will be on the original scale).
input_transform: An input transform that is applied in the model's
forward pass. Only input transforms are allowed which do not
transform the categorical dimensions. This can be achieved
by using the `indices` argument when constructing the transform.
"""
super().__init__()
self.input_transform = input_transform
self.outcome_transform = outcome_transform
def forward(self, X):
# just a non-linear objective that is sure to break without transforms
return (X - 1.0).pow(2).sum(dim=-1, keepdim=True) - 5.0
class TestDeterministicModels(BotorchTestCase):
def test_abstract_base_model(self):
with self.assertRaises(TypeError):
DeterministicModel()
def test_GenericDeterministicModel(self):
def f(X):
return X.mean(dim=-1, keepdim=True)
model = GenericDeterministicModel(f)
self.assertEqual(model.num_outputs, 1)
X = torch.rand(3, 2)
# basic test
p = model.posterior(X)
self.assertIsInstance(p, EnsemblePosterior)
self.assertEqual(p.ensemble_size, 1)
self.assertTrue(torch.equal(p.mean, f(X)))
# check that observation noise doesn't change things
p_noisy = model.posterior(X, observation_noise=True)
self.assertTrue(torch.equal(p_noisy.mean, f(X)))
# test proper error on explicit observation noise
with self.assertRaises(UnsupportedError):
model.posterior(X, observation_noise=X[..., :-1])
# check output indices
model = GenericDeterministicModel(lambda X: X, num_outputs=2)
self.assertEqual(model.num_outputs, 2)
p = model.posterior(X, output_indices=[0])
self.assertTrue(torch.equal(p.mean, X[..., [0]]))
# test subset output
subset_model = model.subset_output([0])
self.assertIsInstance(subset_model, GenericDeterministicModel)
p_sub = subset_model.posterior(X)
self.assertTrue(torch.equal(p_sub.mean, X[..., [0]]))
def test_AffineDeterministicModel(self):
# test error on bad shape of a
with self.assertRaises(ValueError):
AffineDeterministicModel(torch.rand(2))
# test error on bad shape of b
with self.assertRaises(ValueError):
AffineDeterministicModel(torch.rand(2, 1), torch.rand(2, 1))
# test one-dim output
a = torch.rand(3, 1)
model = AffineDeterministicModel(a)
self.assertEqual(model.num_outputs, 1)
for shape in ((4, 3), (1, 4, 3)):
X = torch.rand(*shape)
p = model.posterior(X)
mean_exp = model.b + (X.unsqueeze(-1) * a).sum(dim=-2)
self.assertAllClose(p.mean, mean_exp)
# # test two-dim output
a = torch.rand(3, 2)
model = AffineDeterministicModel(a)
self.assertEqual(model.num_outputs, 2)
for shape in ((4, 3), (1, 4, 3)):
X = torch.rand(*shape)
p = model.posterior(X)
mean_exp = model.b + (X.unsqueeze(-1) * a).sum(dim=-2)
self.assertAllClose(p.mean, mean_exp)
# test subset output
X = torch.rand(4, 3)
subset_model = model.subset_output([0])
self.assertIsInstance(subset_model, AffineDeterministicModel)
p = model.posterior(X)
p_sub = subset_model.posterior(X)
self.assertTrue(torch.equal(p_sub.mean, p.mean[..., [0]]))
def test_with_transforms(self):
dim = 2
bounds = torch.stack([torch.zeros(dim), torch.ones(dim) * 3])
intf = Normalize(d=dim, bounds=bounds)
octf = Standardize(m=1)
# update octf state with dummy data
octf(torch.rand(5, 1) * 7)
octf.eval()
model = DummyDeterministicModel(octf, intf)
# check that the posterior output agrees with the manually transformed one
test_X = torch.rand(3, dim)
expected_Y, _ = octf.untransform(model.forward(intf(test_X)))
with warnings.catch_warnings(record=True) as ws:
posterior = model.posterior(test_X)
msg = "does not have a `train_inputs` attribute"
self.assertTrue(any(msg in str(w.message) for w in ws))
self.assertAllClose(expected_Y, posterior.mean)
# check that model.train/eval works and raises the warning
model.train()
with self.assertWarns(RuntimeWarning):
model.eval()
def test_posterior_transform(self):
def f(X):
return X
model = GenericDeterministicModel(f)
test_X = torch.rand(3, 2)
post_tf = ScalarizedPosteriorTransform(weights=torch.rand(2))
# expect error due to post_tf expecting an MVN
with self.assertRaises(NotImplementedError):
model.posterior(test_X, posterior_transform=post_tf)
def test_PosteriorMeanModel(self):
train_X = torch.rand(2, 3)
train_Y = torch.rand(2, 2)
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
mean_model = PosteriorMeanModel(model=model)
test_X = torch.rand(2, 3)
post = model.posterior(test_X)
mean_post = mean_model.posterior(test_X)
self.assertTrue((mean_post.variance == 0).all())
self.assertTrue(torch.equal(post.mean, mean_post.mean))
def test_FixedSingleSampleModel(self):
torch.manual_seed(123)
train_X = torch.rand(2, 3)
train_Y = torch.rand(2, 2)
model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
fss_model = FixedSingleSampleModel(model=model)
# test without specifying w and dim
test_X = torch.rand(2, 3)
w = fss_model.w
post = model.posterior(test_X)
original_output = post.mean + post.variance.sqrt() * w
fss_output = fss_model(test_X)
self.assertTrue(torch.equal(original_output, fss_output))
self.assertTrue(hasattr(fss_model, "num_outputs"))
# test specifying w
w = torch.randn(4)
fss_model = FixedSingleSampleModel(model=model, w=w)
self.assertTrue(fss_model.w.shape == w.shape)
# test dim
dim = 5
fss_model = FixedSingleSampleModel(model=model, w=w, dim=dim)
# dim should be ignored
self.assertTrue(fss_model.w.shape == w.shape)
# test dim when no w is provided
fss_model = FixedSingleSampleModel(model=model, dim=dim)
# dim should be ignored
self.assertTrue(fss_model.w.shape == torch.Size([dim]))
# check w dtype conversion
train_X_double = torch.rand(2, 3, dtype=torch.double)
train_Y_double = torch.rand(2, 2, dtype=torch.double)
model_double = SingleTaskGP(train_X=train_X_double, train_Y=train_Y_double)
fss_model_double = FixedSingleSampleModel(model=model_double)
test_X_float = torch.rand(2, 3, dtype=torch.float)
# the following line should execute fine
fss_model_double.posterior(test_X_float)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
from typing import Optional
import torch
from botorch import settings
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.exceptions import (
BotorchTensorDimensionError,
BotorchTensorDimensionWarning,
)
from botorch.exceptions.errors import InputDataError
from botorch.fit import fit_gpytorch_mll
from botorch.models.gpytorch import (
BatchedMultiOutputGPyTorchModel,
GPyTorchModel,
ModelListGPyTorchModel,
)
from botorch.models.model import FantasizeMixin
from botorch.models.transforms import Standardize
from botorch.models.transforms.input import ChainedInputTransform, InputTransform
from botorch.models.utils import fantasize
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.models import ExactGP, IndependentModelList
from torch import Tensor
class SimpleInputTransform(InputTransform, torch.nn.Module):
def __init__(self, transform_on_train: bool) -> None:
r"""
Args:
transform_on_train: A boolean indicating whether to apply the
transform in train() mode.
"""
super().__init__()
self.transform_on_train = transform_on_train
self.transform_on_eval = True
self.transform_on_fantasize = True
# to test the `input_transform.to()` call
self.register_buffer("add_value", torch.ones(1))
def transform(self, X: Tensor) -> Tensor:
return X + self.add_value
class SimpleGPyTorchModel(GPyTorchModel, ExactGP, FantasizeMixin):
last_fantasize_flag: bool = False
def __init__(self, train_X, train_Y, outcome_transform=None, input_transform=None):
r"""
Args:
train_X: A tensor of inputs, passed to self.transform_inputs.
train_Y: Passed to outcome_transform.
outcome_transform: Transform applied to train_Y.
input_transform: A Module that performs the input transformation, passed to
self.transform_inputs.
"""
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
self._validate_tensor_args(transformed_X, train_Y)
train_Y = train_Y.squeeze(-1)
likelihood = GaussianLikelihood()
super().__init__(train_X, train_Y, likelihood)
self.mean_module = ConstantMean()
self.covar_module = ScaleKernel(RBFKernel())
if outcome_transform is not None:
self.outcome_transform = outcome_transform
if input_transform is not None:
self.input_transform = input_transform
self._num_outputs = 1
self.to(train_X)
self.transformed_call_args = []
def forward(self, x):
self.last_fantasize_flag = fantasize.on()
if self.training:
x = self.transform_inputs(x)
self.transformed_call_args.append(x)
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class SimpleBatchedMultiOutputGPyTorchModel(
BatchedMultiOutputGPyTorchModel, ExactGP, FantasizeMixin
):
_batch_shape: Optional[torch.Size] = None
def __init__(self, train_X, train_Y, outcome_transform=None, input_transform=None):
r"""
Args:
train_X: A tensor of inputs, passed to self.transform_inputs.
train_Y: Passed to outcome_transform.
outcome_transform: Transform applied to train_Y.
input_transform: A Module that performs the input transformation, passed to
self.transform_inputs.
"""
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
self._validate_tensor_args(transformed_X, train_Y)
self._set_dimensions(train_X=train_X, train_Y=train_Y)
train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y)
likelihood = GaussianLikelihood(batch_shape=self._aug_batch_shape)
super().__init__(train_X, train_Y, likelihood)
self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape)
self.covar_module = ScaleKernel(
RBFKernel(batch_shape=self._aug_batch_shape),
batch_shape=self._aug_batch_shape,
)
if outcome_transform is not None:
self.outcome_transform = outcome_transform
if input_transform is not None:
self.input_transform = input_transform
self.to(train_X)
def forward(self, x):
if self.training:
x = self.transform_inputs(x)
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
@property
def batch_shape(self) -> torch.Size:
if self._batch_shape is not None:
return self._batch_shape
return super().batch_shape
class SimpleModelListGPyTorchModel(IndependentModelList, ModelListGPyTorchModel):
def __init__(self, *gp_models: GPyTorchModel):
r"""
Args:
gp_models: Arbitrary number of GPyTorchModels.
"""
super().__init__(*gp_models)
class TestGPyTorchModel(BotorchTestCase):
def test_gpytorch_model(self):
for dtype, use_octf in itertools.product(
(torch.float, torch.double), (False, True)
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=1) if use_octf else None
train_X = torch.rand(5, 1, **tkwargs)
train_Y = torch.sin(train_X)
# basic test
model = SimpleGPyTorchModel(train_X, train_Y, octf)
self.assertEqual(model.num_outputs, 1)
self.assertEqual(model.batch_shape, torch.Size())
test_X = torch.rand(2, 1, **tkwargs)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
p_tf = model.posterior(test_X)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(p_tf).variance
self.assertAllClose(posterior.variance, expected_var)
# test observation noise
posterior = model.posterior(test_X, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
posterior = model.posterior(
test_X, observation_noise=torch.rand(2, 1, **tkwargs)
)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
# test noise shape validation
with self.assertRaises(BotorchTensorDimensionError):
model.posterior(test_X, observation_noise=torch.rand(2, **tkwargs))
# test conditioning on observations
cm = model.condition_on_observations(
torch.rand(2, 1, **tkwargs), torch.rand(2, 1, **tkwargs)
)
self.assertIsInstance(cm, SimpleGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([7]))
# test subset_output
with self.assertRaises(NotImplementedError):
model.subset_output([0])
# test fantasize
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
cm = model.fantasize(torch.rand(2, 1, **tkwargs), sampler=sampler)
self.assertIsInstance(cm, SimpleGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
cm = model.fantasize(
torch.rand(2, 1, **tkwargs), sampler=sampler, observation_noise=True
)
self.assertIsInstance(cm, SimpleGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
cm = model.fantasize(
torch.rand(2, 1, **tkwargs),
sampler=sampler,
observation_noise=torch.rand(2, 1, **tkwargs),
)
self.assertIsInstance(cm, SimpleGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
def test_validate_tensor_args(self) -> None:
n, d = 3, 2
for batch_shape, output_dim_shape, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(torch.Size(), torch.Size([1]), torch.Size([2])),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
X = torch.empty(batch_shape + torch.Size([n, d]), **tkwargs)
# test using the same batch_shape as X
Y = torch.empty(batch_shape + torch.Size([n]) + output_dim_shape, **tkwargs)
if len(output_dim_shape) > 0:
# check that no exception is raised
for strict in [False, True]:
GPyTorchModel._validate_tensor_args(X, Y, strict=strict)
else:
expected_message = (
"An explicit output dimension is required for targets."
)
with self.assertRaisesRegex(
BotorchTensorDimensionError, expected_message
):
GPyTorchModel._validate_tensor_args(X, Y)
with self.assertWarnsRegex(
BotorchTensorDimensionWarning,
(
"Non-strict enforcement of botorch tensor conventions. "
"The following error would have been raised with strict "
"enforcement: "
)
+ expected_message,
):
GPyTorchModel._validate_tensor_args(X, Y, strict=False)
# test using different batch_shape
if len(batch_shape) > 0:
expected_message = (
"Expected X and Y to have the same number of dimensions"
)
with self.assertRaisesRegex(
BotorchTensorDimensionError, expected_message
):
GPyTorchModel._validate_tensor_args(X, Y[0])
with settings.debug(True), self.assertWarnsRegex(
BotorchTensorDimensionWarning,
(
"Non-strict enforcement of botorch tensor conventions. "
"The following error would have been raised with strict "
"enforcement: "
)
+ expected_message,
):
GPyTorchModel._validate_tensor_args(X, Y[0], strict=False)
# with Yvar
if len(output_dim_shape) > 0:
Yvar = torch.empty(torch.Size([n]) + output_dim_shape, **tkwargs)
GPyTorchModel._validate_tensor_args(X, Y, Yvar)
Yvar = torch.empty(n, 5, **tkwargs)
for strict in [False, True]:
with self.assertRaisesRegex(
BotorchTensorDimensionError,
"An explicit output dimension is required for "
"observation noise.",
):
GPyTorchModel._validate_tensor_args(X, Y, Yvar, strict=strict)
def test_fantasize_flag(self):
train_X = torch.rand(5, 1)
train_Y = torch.sin(train_X)
model = SimpleGPyTorchModel(train_X, train_Y)
model.eval()
test_X = torch.ones(1, 1)
model(test_X)
self.assertFalse(model.last_fantasize_flag)
model.posterior(test_X)
self.assertFalse(model.last_fantasize_flag)
model.fantasize(test_X, SobolQMCNormalSampler(sample_shape=torch.Size([2])))
self.assertTrue(model.last_fantasize_flag)
model.last_fantasize_flag = False
with fantasize():
model.posterior(test_X)
self.assertTrue(model.last_fantasize_flag)
def test_input_transform(self):
# simple test making sure that the input transforms are applied to both
# train and test inputs
for dtype, transform_on_train in itertools.product(
(torch.float, torch.double), (False, True)
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X = torch.rand(5, 1, **tkwargs)
train_Y = torch.sin(train_X)
intf = SimpleInputTransform(transform_on_train)
model = SimpleGPyTorchModel(train_X, train_Y, input_transform=intf)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 2}})
test_X = torch.rand(2, 1, **tkwargs)
model.posterior(test_X)
# posterior calls model.forward twice, one with training inputs only,
# other with both train and test inputs
expected_train = intf(train_X) if transform_on_train else train_X
expected_test = intf(test_X)
self.assertTrue(
torch.equal(model.transformed_call_args[-2], expected_train)
)
self.assertTrue(
torch.equal(
model.transformed_call_args[-1],
torch.cat([expected_train, expected_test], dim=0),
)
)
def test_posterior_transform(self):
tkwargs = {"device": self.device, "dtype": torch.double}
train_X = torch.rand(5, 1, **tkwargs)
train_Y = torch.sin(train_X)
model = SimpleGPyTorchModel(train_X, train_Y)
post_tf = ScalarizedPosteriorTransform(weights=torch.zeros(1, **tkwargs))
post = model.posterior(torch.rand(3, 1, **tkwargs), posterior_transform=post_tf)
self.assertTrue(torch.equal(post.mean, torch.zeros(3, 1, **tkwargs)))
def test_float_warning_and_dtype_error(self):
with self.assertWarnsRegex(UserWarning, "double precision"):
SimpleGPyTorchModel(torch.rand(5, 1), torch.randn(5, 1))
with self.assertRaisesRegex(InputDataError, "same dtype"):
SimpleGPyTorchModel(torch.rand(5, 1), torch.randn(5, 1, dtype=torch.double))
class TestBatchedMultiOutputGPyTorchModel(BotorchTestCase):
def test_batched_multi_output_gpytorch_model(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
train_X = torch.rand(5, 1, **tkwargs)
train_Y = torch.cat([torch.sin(train_X), torch.cos(train_X)], dim=-1)
# basic test
model = SimpleBatchedMultiOutputGPyTorchModel(train_X, train_Y)
self.assertEqual(model.num_outputs, 2)
self.assertEqual(model.batch_shape, torch.Size())
test_X = torch.rand(2, 1, **tkwargs)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
# test observation noise
posterior = model.posterior(test_X, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
posterior = model.posterior(
test_X, observation_noise=torch.rand(2, 2, **tkwargs)
)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
# test subset_output
with self.assertRaises(NotImplementedError):
model.subset_output([0])
# test conditioning on observations
cm = model.condition_on_observations(
torch.rand(2, 1, **tkwargs), torch.rand(2, 2, **tkwargs)
)
self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
# test fantasize
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2]))
cm = model.fantasize(torch.rand(2, 1, **tkwargs), sampler=sampler)
self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
cm = model.fantasize(
torch.rand(2, 1, **tkwargs), sampler=sampler, observation_noise=True
)
self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
cm = model.fantasize(
torch.rand(2, 1, **tkwargs),
sampler=sampler,
observation_noise=torch.rand(2, 2, **tkwargs),
)
self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
# test get_batch_dimensions
get_batch_dims = SimpleBatchedMultiOutputGPyTorchModel.get_batch_dimensions
for input_batch_dim in (0, 3):
for num_outputs in (1, 2):
input_batch_shape, aug_batch_shape = get_batch_dims(
train_X=train_X.unsqueeze(0).expand(3, 5, 1)
if input_batch_dim == 3
else train_X,
train_Y=train_Y[:, 0:1] if num_outputs == 1 else train_Y,
)
expected_input_batch_shape = (
torch.Size([3]) if input_batch_dim == 3 else torch.Size([])
)
self.assertEqual(input_batch_shape, expected_input_batch_shape)
self.assertEqual(
aug_batch_shape,
expected_input_batch_shape + torch.Size([])
if num_outputs == 1
else expected_input_batch_shape + torch.Size([2]),
)
def test_posterior_transform(self):
tkwargs = {"device": self.device, "dtype": torch.double}
train_X = torch.rand(5, 2, **tkwargs)
train_Y = torch.sin(train_X)
model = SimpleBatchedMultiOutputGPyTorchModel(train_X, train_Y)
post_tf = ScalarizedPosteriorTransform(weights=torch.zeros(2, **tkwargs))
post = model.posterior(torch.rand(3, 2, **tkwargs), posterior_transform=post_tf)
self.assertTrue(torch.equal(post.mean, torch.zeros(3, 1, **tkwargs)))
class TestModelListGPyTorchModel(BotorchTestCase):
def test_model_list_gpytorch_model(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
train_X1, train_X2 = (
torch.rand(5, 1, **tkwargs),
torch.rand(5, 1, **tkwargs),
)
train_Y1 = torch.sin(train_X1)
train_Y2 = torch.cos(train_X2)
# test SAAS type batch shape
m1 = SimpleBatchedMultiOutputGPyTorchModel(train_X1, train_Y1)
m2 = SimpleBatchedMultiOutputGPyTorchModel(train_X2, train_Y2)
m1._batch_shape = torch.Size([2])
m2._batch_shape = torch.Size([2])
model = SimpleModelListGPyTorchModel(m1, m2)
self.assertEqual(model.batch_shape, torch.Size([2]))
# test different batch shapes (broadcastable)
m1 = SimpleGPyTorchModel(
train_X1.expand(2, *train_X1.shape), train_Y1.expand(2, *train_Y1.shape)
)
m2 = SimpleGPyTorchModel(train_X2, train_Y2)
model = SimpleModelListGPyTorchModel(m1, m2)
self.assertEqual(model.num_outputs, 2)
with warnings.catch_warnings(record=True) as ws:
self.assertEqual(model.batch_shape, torch.Size([2]))
msg = (
"Component models of SimpleModelListGPyTorchModel have "
"different batch shapes"
)
self.assertTrue(any(msg in str(w.message) for w in ws))
# test different batch shapes (not broadcastable)
m2 = SimpleGPyTorchModel(
train_X2.expand(3, *train_X2.shape), train_Y2.expand(3, *train_Y2.shape)
)
model = SimpleModelListGPyTorchModel(m1, m2)
with self.assertRaises(NotImplementedError):
model.batch_shape
# test same batch shape
m2 = SimpleGPyTorchModel(
train_X2.expand(2, *train_X2.shape), train_Y2.expand(2, *train_Y2.shape)
)
model = SimpleModelListGPyTorchModel(m1, m2)
self.assertEqual(model.num_outputs, 2)
self.assertEqual(model.batch_shape, torch.Size([2]))
# test non-batch
m1 = SimpleGPyTorchModel(train_X1, train_Y1)
m2 = SimpleGPyTorchModel(train_X2, train_Y2)
model = SimpleModelListGPyTorchModel(m1, m2)
self.assertEqual(model.batch_shape, torch.Size([]))
test_X = torch.rand(2, 1, **tkwargs)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
# test output indices
for output_indices in ([0], [1], [0, 1]):
posterior_subset = model.posterior(
test_X, output_indices=output_indices
)
self.assertIsInstance(posterior_subset, GPyTorchPosterior)
self.assertEqual(
posterior_subset.mean.shape, torch.Size([2, len(output_indices)])
)
self.assertTrue(
torch.equal(
posterior_subset.mean, posterior.mean[..., output_indices]
)
)
self.assertTrue(
torch.equal(
posterior_subset.variance,
posterior.variance[..., output_indices],
)
)
# test observation noise
posterior = model.posterior(test_X, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
posterior = model.posterior(
test_X, observation_noise=torch.rand(2, 2, **tkwargs)
)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
posterior = model.posterior(
test_X,
output_indices=[0],
observation_noise=torch.rand(2, 2, **tkwargs),
)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
# conditioning is not implemented (see ModelListGP for tests)
with self.assertRaises(NotImplementedError):
model.condition_on_observations(
X=torch.rand(2, 1, **tkwargs), Y=torch.rand(2, 2, **tkwargs)
)
def test_input_transform(self):
# test that the input transforms are applied properly to individual models
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
train_X1, train_X2 = (
torch.rand(5, 1, **tkwargs),
torch.rand(5, 1, **tkwargs),
)
train_Y1 = torch.sin(train_X1)
train_Y2 = torch.cos(train_X2)
# test transform on only one model
m1 = SimpleGPyTorchModel(train_X1, train_Y1)
m2_tf = SimpleInputTransform(True)
m2 = SimpleGPyTorchModel(train_X2, train_Y2, input_transform=m2_tf)
# test `input_transform.to(X)` call
self.assertEqual(m2_tf.add_value.dtype, dtype)
self.assertEqual(m2_tf.add_value.device.type, self.device.type)
# train models to have the train inputs preprocessed
for m in [m1, m2]:
mll = ExactMarginalLogLikelihood(m.likelihood, m)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 2}})
model = SimpleModelListGPyTorchModel(m1, m2)
test_X = torch.rand(2, 1, **tkwargs)
model.posterior(test_X)
# posterior calls model.forward twice, one with training inputs only,
# other with both train and test inputs
for m, t_X in [[m1, train_X1], [m2, train_X2]]:
expected_train = m.transform_inputs(t_X)
expected_test = m.transform_inputs(test_X)
self.assertTrue(
torch.equal(m.transformed_call_args[-2], expected_train)
)
self.assertTrue(
torch.equal(
m.transformed_call_args[-1],
torch.cat([expected_train, expected_test], dim=0),
)
)
# different transforms on the two models
m1_tf = ChainedInputTransform(
tf1=SimpleInputTransform(False),
tf2=SimpleInputTransform(True),
)
m1 = SimpleGPyTorchModel(train_X1, train_Y1, input_transform=m1_tf)
m2_tf = SimpleInputTransform(False)
m2 = SimpleGPyTorchModel(train_X2, train_Y2, input_transform=m2_tf)
for m in [m1, m2]:
mll = ExactMarginalLogLikelihood(m.likelihood, m)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 2}})
model = SimpleModelListGPyTorchModel(m1, m2)
model.posterior(test_X)
for m, t_X in [[m1, train_X1], [m2, train_X2]]:
expected_train = m.input_transform.preprocess_transform(t_X)
expected_test = m.transform_inputs(test_X)
self.assertTrue(
torch.equal(m.transformed_call_args[-2], expected_train)
)
self.assertTrue(
torch.equal(
m.transformed_call_args[-1],
torch.cat([expected_train, expected_test], dim=0),
)
)
def test_posterior_transform(self):
tkwargs = {"device": self.device, "dtype": torch.double}
train_X1, train_X2 = (
torch.rand(5, 1, **tkwargs),
torch.rand(5, 1, **tkwargs),
)
train_Y1 = torch.sin(train_X1)
train_Y2 = torch.cos(train_X2)
# test different batch shapes
m1 = SimpleGPyTorchModel(train_X1, train_Y1)
m2 = SimpleGPyTorchModel(train_X2, train_Y2)
model = SimpleModelListGPyTorchModel(m1, m2)
post_tf = ScalarizedPosteriorTransform(torch.ones(2, **tkwargs))
post = model.posterior(torch.rand(3, 1, **tkwargs), posterior_transform=post_tf)
self.assertEqual(post.mean.shape, torch.Size([3, 1]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
from typing import Optional
import torch
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models import ModelListGP
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.multitask import MultiTaskGP
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import ChainedOutcomeTransform, Log, Standardize
from botorch.posteriors import GPyTorchPosterior, PosteriorList, TransformedPosterior
from botorch.sampling.base import MCSampler
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import LikelihoodList
from gpytorch.means import ConstantMean
from gpytorch.mlls import SumMarginalLogLikelihood
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.priors import GammaPrior
from torch import Tensor
def _get_model(
fixed_noise=False, outcome_transform: str = "None", use_intf=False, **tkwargs
) -> ModelListGP:
train_x1, train_y1 = _get_random_data(
batch_shape=torch.Size(), m=1, n=10, **tkwargs
)
train_y1 = torch.exp(train_y1)
train_x2, train_y2 = _get_random_data(
batch_shape=torch.Size(), m=1, n=11, **tkwargs
)
if outcome_transform == "Standardize":
octfs = [Standardize(m=1), Standardize(m=1)]
elif outcome_transform == "Log":
octfs = [Log(), Standardize(m=1)]
elif outcome_transform == "Chained":
octfs = [
ChainedOutcomeTransform(
chained=ChainedOutcomeTransform(log=Log(), standardize=Standardize(m=1))
),
Standardize(m=1),
]
elif outcome_transform == "None":
octfs = [None, None]
else:
raise KeyError( # pragma: no cover
"outcome_transform must be one of 'Standardize', 'Log', 'Chained', or "
"'None'."
)
intfs = [Normalize(d=1), Normalize(d=1)] if use_intf else [None, None]
if fixed_noise:
train_y1_var = 0.1 + 0.1 * torch.rand_like(train_y1, **tkwargs)
train_y2_var = 0.1 + 0.1 * torch.rand_like(train_y2, **tkwargs)
model1 = FixedNoiseGP(
train_X=train_x1,
train_Y=train_y1,
train_Yvar=train_y1_var,
outcome_transform=octfs[0],
input_transform=intfs[0],
)
model2 = FixedNoiseGP(
train_X=train_x2,
train_Y=train_y2,
train_Yvar=train_y2_var,
outcome_transform=octfs[1],
input_transform=intfs[1],
)
else:
model1 = SingleTaskGP(
train_X=train_x1,
train_Y=train_y1,
outcome_transform=octfs[0],
input_transform=intfs[0],
)
model2 = SingleTaskGP(
train_X=train_x2,
train_Y=train_y2,
outcome_transform=octfs[1],
input_transform=intfs[1],
)
model = ModelListGP(model1, model2)
return model.to(**tkwargs)
class TestModelListGP(BotorchTestCase):
def _base_test_ModelListGP(
self, fixed_noise: bool, dtype, outcome_transform: str
) -> ModelListGP:
tkwargs = {"device": self.device, "dtype": dtype}
model = _get_model(
fixed_noise=fixed_noise, outcome_transform=outcome_transform, **tkwargs
)
self.assertIsInstance(model, ModelListGP)
self.assertIsInstance(model.likelihood, LikelihoodList)
self.assertEqual(model.num_outputs, 2)
for m in model.models:
self.assertIsInstance(m.mean_module, ConstantMean)
self.assertIsInstance(m.covar_module, ScaleKernel)
matern_kernel = m.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
if outcome_transform != "None":
self.assertIsInstance(
m.outcome_transform, (Log, Standardize, ChainedOutcomeTransform)
)
else:
assert not hasattr(m, "outcome_transform")
# test constructing likelihood wrapper
mll = SumMarginalLogLikelihood(model.likelihood, model)
for mll_ in mll.mlls:
self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
# test model fitting (sequential)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
# test model fitting (joint)
mll = fit_gpytorch_mll(
mll,
optimizer_kwargs={"options": {"maxiter": 1}},
max_attempts=1,
sequential=False,
)
# test subset outputs
subset_model = model.subset_output([1])
self.assertIsInstance(subset_model, ModelListGP)
self.assertEqual(len(subset_model.models), 1)
sd_subset = subset_model.models[0].state_dict()
sd = model.models[1].state_dict()
self.assertTrue(set(sd_subset.keys()) == set(sd.keys()))
self.assertTrue(all(torch.equal(v, sd[k]) for k, v in sd_subset.items()))
# test posterior
test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
posterior = model.posterior(test_x)
gpytorch_posterior_expected = outcome_transform in ("None", "Standardize")
expected_type = (
GPyTorchPosterior if gpytorch_posterior_expected else PosteriorList
)
self.assertIsInstance(posterior, expected_type)
submodel = model.models[0]
p0 = submodel.posterior(test_x)
self.assertAllClose(posterior.mean[:, [0]], p0.mean)
self.assertAllClose(posterior.variance[:, [0]], p0.variance)
if gpytorch_posterior_expected:
self.assertIsInstance(posterior.distribution, MultitaskMultivariateNormal)
if outcome_transform != "None":
# ensure un-transformation is applied
submodel = model.models[0]
p0 = submodel.posterior(test_x)
tmp_tf = submodel.outcome_transform
del submodel.outcome_transform
p0_tf = submodel.posterior(test_x)
submodel.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(p0_tf).variance
self.assertAllClose(p0.variance, expected_var)
# test output_indices
posterior = model.posterior(test_x, output_indices=[0], observation_noise=True)
self.assertIsInstance(posterior, expected_type)
if gpytorch_posterior_expected:
self.assertIsInstance(posterior.distribution, MultivariateNormal)
# test condition_on_observations
f_x = [torch.rand(2, 1, **tkwargs) for _ in range(2)]
f_y = torch.rand(2, 2, **tkwargs)
if fixed_noise:
noise = 0.1 + 0.1 * torch.rand_like(f_y)
cond_kwargs = {"noise": noise}
else:
cond_kwargs = {}
cm = model.condition_on_observations(f_x, f_y, **cond_kwargs)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations batched
f_x = [torch.rand(3, 2, 1, **tkwargs) for _ in range(2)]
f_y = torch.rand(3, 2, 2, **tkwargs)
cm = model.condition_on_observations(f_x, f_y, **cond_kwargs)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations batched (fast fantasies)
f_x = [torch.rand(2, 1, **tkwargs) for _ in range(2)]
f_y = torch.rand(3, 2, 2, **tkwargs)
cm = model.condition_on_observations(f_x, f_y, **cond_kwargs)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations (incorrect input shape error)
with self.assertRaises(BotorchTensorDimensionError):
model.condition_on_observations(
f_x, torch.rand(3, 2, 3, **tkwargs), **cond_kwargs
)
# test X having wrong size
with self.assertRaises(AssertionError):
model.condition_on_observations(f_x[:1], f_y)
# test posterior transform
X = torch.rand(3, 1, **tkwargs)
weights = torch.tensor([1, 2], **tkwargs)
post_tf = ScalarizedPosteriorTransform(weights=weights)
if gpytorch_posterior_expected:
posterior_tf = model.posterior(X, posterior_transform=post_tf)
self.assertTrue(
torch.allclose(
posterior_tf.mean,
model.posterior(X).mean @ weights.unsqueeze(-1),
)
)
return model
def test_ModelListGP(self) -> None:
for dtype, outcome_transform in itertools.product(
(torch.float, torch.double), ("None", "Standardize", "Log", "Chained")
):
model = self._base_test_ModelListGP(
fixed_noise=False, dtype=dtype, outcome_transform=outcome_transform
)
tkwargs = {"device": self.device, "dtype": dtype}
# test observation_noise
test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
posterior = model.posterior(test_x, observation_noise=True)
gpytorch_posterior_expected = outcome_transform in ("None", "Standardize")
expected_type = (
GPyTorchPosterior if gpytorch_posterior_expected else PosteriorList
)
self.assertIsInstance(posterior, expected_type)
if gpytorch_posterior_expected:
self.assertIsInstance(
posterior.distribution, MultitaskMultivariateNormal
)
else:
self.assertIsInstance(posterior.posteriors[0], TransformedPosterior)
# Test tensor valued observation noise.
observation_noise = torch.rand(2, 2, **tkwargs)
with torch.no_grad():
noise_free_variance = model.posterior(test_x).variance
noisy_variance = model.posterior(
test_x, observation_noise=observation_noise
).variance
self.assertEqual(noise_free_variance.shape, noisy_variance.shape)
if outcome_transform == "None":
self.assertAllClose(
noise_free_variance + observation_noise, noisy_variance
)
def test_ModelListGP_fixed_noise(self) -> None:
for dtype, outcome_transform in itertools.product(
(torch.float, torch.double), ("None", "Standardize")
):
model = self._base_test_ModelListGP(
fixed_noise=True, dtype=dtype, outcome_transform=outcome_transform
)
tkwargs = {"device": self.device, "dtype": dtype}
f_x = [torch.rand(2, 1, **tkwargs) for _ in range(2)]
f_y = torch.rand(2, 2, **tkwargs)
# test condition_on_observations (incorrect noise shape error)
with self.assertRaises(BotorchTensorDimensionError):
model.condition_on_observations(
f_x, f_y, noise=torch.rand(2, 3, **tkwargs)
)
def test_ModelListGP_single(self):
tkwargs = {"device": self.device, "dtype": torch.float}
train_x1, train_y1 = _get_random_data(
batch_shape=torch.Size(), m=1, n=10, **tkwargs
)
model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
model = ModelListGP(model1)
model.to(**tkwargs)
test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
posterior = model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.distribution, MultivariateNormal)
def test_ModelListGP_multi_task(self):
tkwargs = {"device": self.device, "dtype": torch.float}
train_x_raw, train_y = _get_random_data(
batch_shape=torch.Size(), m=1, n=10, **tkwargs
)
task_idx = torch.cat(
[torch.ones(5, 1, **tkwargs), torch.zeros(5, 1, **tkwargs)], dim=0
)
train_x = torch.cat([train_x_raw, task_idx], dim=-1)
model = MultiTaskGP(
train_X=train_x,
train_Y=train_y,
task_feature=-1,
output_tasks=[0],
)
# Wrap a single single-output MTGP.
model_list_gp = ModelListGP(model)
self.assertEqual(model_list_gp.num_outputs, 1)
with torch.no_grad():
model_mean = model.posterior(train_x_raw).mean
model_list_gp_mean = model_list_gp.posterior(train_x_raw).mean
self.assertAllClose(model_mean, model_list_gp_mean)
# Wrap two single-output MTGPs.
model_list_gp = ModelListGP(model, model)
self.assertEqual(model_list_gp.num_outputs, 2)
with torch.no_grad():
model_list_gp_mean = model_list_gp.posterior(train_x_raw).mean
expected_mean = torch.cat([model_mean, model_mean], dim=-1)
self.assertAllClose(expected_mean, model_list_gp_mean)
# Wrap a multi-output MTGP.
model2 = MultiTaskGP(
train_X=train_x,
train_Y=train_y,
task_feature=-1,
)
model_list_gp = ModelListGP(model2)
self.assertEqual(model_list_gp.num_outputs, 2)
with torch.no_grad():
model2_mean = model2.posterior(train_x_raw).mean
model_list_gp_mean = model_list_gp.posterior(train_x_raw).mean
self.assertAllClose(model2_mean, model_list_gp_mean)
# Mix of multi-output and single-output MTGPs.
model_list_gp = ModelListGP(model, model2)
self.assertEqual(model_list_gp.num_outputs, 3)
with torch.no_grad():
model_list_gp_mean = model_list_gp.posterior(train_x_raw).mean
expected_mean = torch.cat([model_mean, model2_mean], dim=-1)
self.assertAllClose(expected_mean, model_list_gp_mean)
def test_transform_revert_train_inputs(self):
tkwargs = {"device": self.device, "dtype": torch.float}
model_list = _get_model(use_intf=True, **tkwargs)
org_inputs = [m.train_inputs[0] for m in model_list.models]
model_list.eval()
for i, m in enumerate(model_list.models):
self.assertTrue(
torch.allclose(
m.train_inputs[0],
m.input_transform.preprocess_transform(org_inputs[i]),
)
)
self.assertTrue(m._has_transformed_inputs)
self.assertTrue(torch.equal(m._original_train_inputs, org_inputs[i]))
model_list.train(mode=True)
for i, m in enumerate(model_list.models):
self.assertTrue(torch.equal(m.train_inputs[0], org_inputs[i]))
self.assertFalse(m._has_transformed_inputs)
model_list.train(mode=False)
for i, m in enumerate(model_list.models):
self.assertTrue(
torch.allclose(
m.train_inputs[0],
m.input_transform.preprocess_transform(org_inputs[i]),
)
)
self.assertTrue(m._has_transformed_inputs)
self.assertTrue(torch.equal(m._original_train_inputs, org_inputs[i]))
def test_fantasize(self):
m1 = SingleTaskGP(torch.rand(5, 2), torch.rand(5, 1)).eval()
m2 = SingleTaskGP(torch.rand(5, 2), torch.rand(5, 1)).eval()
modellist = ModelListGP(m1, m2)
fm = modellist.fantasize(
torch.rand(3, 2), sampler=IIDNormalSampler(sample_shape=torch.Size([2]))
)
self.assertIsInstance(fm, ModelListGP)
for i in range(2):
fm_i = fm.models[i]
self.assertIsInstance(fm_i, SingleTaskGP)
self.assertEqual(fm_i.train_inputs[0].shape, torch.Size([2, 8, 2]))
self.assertEqual(fm_i.train_targets.shape, torch.Size([2, 8]))
# test decoupled
sampler1 = IIDNormalSampler(sample_shape=torch.Size([2]))
sampler2 = IIDNormalSampler(sample_shape=torch.Size([2]))
eval_mask = torch.tensor(
[[1, 0], [0, 1], [1, 0]],
dtype=torch.bool,
)
fm = modellist.fantasize(
torch.rand(3, 2),
sampler=ListSampler(sampler1, sampler2),
evaluation_mask=eval_mask,
)
self.assertIsInstance(fm, ModelListGP)
for i in range(2):
fm_i = fm.models[i]
self.assertIsInstance(fm_i, SingleTaskGP)
num_points = 7 - i
self.assertEqual(fm_i.train_inputs[0].shape, torch.Size([2, num_points, 2]))
self.assertEqual(fm_i.train_targets.shape, torch.Size([2, num_points]))
def test_fantasize_with_outcome_transform(self) -> None:
"""
Check that fantasized posteriors from a `ModelListGP` with transforms
relate in a predictable way to posteriors from a `ModelListGP` when the
outputs have been manually transformed.
We are essentially fitting "Y = 10 * X" with Y standardized.
- In the original space, we should predict a mean of ~5 at 0.5
- In the standardized space, we should predict ~0.
- If we untransform the result in the standardized space, we should recover
the prediction of ~5 we would have gotten in the original space.
"""
for dtype in [torch.float, torch.double]:
with self.subTest(dtype=dtype):
tkwargs = {"device": self.device, "dtype": dtype}
X = torch.linspace(0, 1, 20, **tkwargs)[:, None]
Y1 = 10 * torch.linspace(0, 1, 20, **tkwargs)[:, None]
Y2 = 2 * Y1
Y = torch.cat([Y1, Y2], dim=-1)
target_x = torch.tensor([[0.5]], **tkwargs)
model_with_transform = ModelListGP(
SingleTaskGP(X, Y1, outcome_transform=Standardize(m=1)),
SingleTaskGP(X, Y2, outcome_transform=Standardize(m=1)),
)
outcome_transform = Standardize(m=2)
y_standardized, _ = outcome_transform(Y)
outcome_transform.eval()
model_manually_transformed = ModelListGP(
SingleTaskGP(X, y_standardized[:, :1]),
SingleTaskGP(X, y_standardized[:, 1:]),
)
def _get_fant_mean(
model: ModelListGP,
sampler: MCSampler,
eval_mask: Optional[Tensor] = None,
) -> float:
fant = model.fantasize(
target_x,
sampler=sampler,
evaluation_mask=eval_mask,
)
return fant.posterior(target_x).mean.mean(dim=(-2, -3))
# ~0
sampler = IIDNormalSampler(sample_shape=torch.Size([10]), seed=0)
fant_mean_with_manual_transform = _get_fant_mean(
model_manually_transformed, sampler=sampler
)
# Inexact since this is an MC test and we don't want it flaky
self.assertLessEqual(
(fant_mean_with_manual_transform - 0.0).abs().max().item(), 0.1
)
manually_rescaled_mean = outcome_transform.untransform(
fant_mean_with_manual_transform
)[0].view(-1)
fant_mean_with_native_transform = _get_fant_mean(
model_with_transform, sampler=sampler
)
# Inexact since this is an MC test and we don't want it flaky
self.assertLessEqual(
(
fant_mean_with_native_transform
- torch.tensor([5.0, 10.0], **tkwargs)
)
.abs()
.max()
.item(),
0.5,
)
# tighter tolerance here since the models should use the same samples
self.assertAllClose(
manually_rescaled_mean,
fant_mean_with_native_transform,
)
# test decoupled
sampler = ListSampler(
IIDNormalSampler(sample_shape=torch.Size([10]), seed=0),
IIDNormalSampler(sample_shape=torch.Size([10]), seed=0),
)
fant_mean_with_manual_transform = _get_fant_mean(
model_manually_transformed,
sampler=sampler,
eval_mask=torch.tensor(
[[0, 1]], dtype=torch.bool, device=tkwargs["device"]
),
)
# Inexact since this is an MC test and we don't want it flaky
self.assertLessEqual(
(fant_mean_with_manual_transform - 0.0).abs().max().item(), 0.1
)
manually_rescaled_mean = outcome_transform.untransform(
fant_mean_with_manual_transform
)[0].view(-1)
fant_mean_with_native_transform = _get_fant_mean(
model_with_transform,
sampler=sampler,
eval_mask=torch.tensor(
[[0, 1]], dtype=torch.bool, device=tkwargs["device"]
),
)
# Inexact since this is an MC test and we don't want it flaky
self.assertLessEqual(
(
fant_mean_with_native_transform
- torch.tensor([5.0, 10.0], **tkwargs)
)
.abs()
.max()
.item(),
0.5,
)
# tighter tolerance here since the models should use the same samples
self.assertAllClose(
manually_rescaled_mean,
fant_mean_with_native_transform,
)
def test_fantasize_with_outcome_transform_fixed_noise(self) -> None:
"""
Test that 'fantasize' on average recovers the true mean fn.
Loose tolerance to protect against flakiness. The true mean function is
100 at x=0. If transforms are not properly applied, we'll get answers
on the order of ~1. Answers between 99 and 101 are acceptable.
"""
n_fants = torch.Size([20])
y_at_low_x = 100.0
y_at_high_x = -40.0
for dtype in [torch.float, torch.double]:
with self.subTest(dtype=dtype):
tkwargs = {"device": self.device, "dtype": dtype}
X = torch.tensor([[0.0], [1.0]], **tkwargs)
Y = torch.tensor([[y_at_low_x], [y_at_high_x]], **tkwargs)
Y2 = 2 * Y
yvar = torch.full_like(Y, 1e-4)
yvar2 = 2 * yvar
model = ModelListGP(
FixedNoiseGP(X, Y, yvar, outcome_transform=Standardize(m=1)),
FixedNoiseGP(X, Y2, yvar2, outcome_transform=Standardize(m=1)),
)
# test exceptions
eval_mask = torch.zeros(
3, 2, 2, dtype=torch.bool, device=tkwargs["device"]
)
msg = (
f"Expected evaluation_mask of shape `{X.shape[0]} x "
f"{model.num_outputs}`, but got `"
f"{' x '.join(str(i) for i in eval_mask.shape)}`."
)
with self.assertRaisesRegex(BotorchTensorDimensionError, msg):
model.fantasize(
X,
evaluation_mask=eval_mask,
sampler=ListSampler(
IIDNormalSampler(n_fants, seed=0),
IIDNormalSampler(n_fants, seed=0),
),
)
msg = "Decoupled fantasization requires a list of samplers."
with self.assertRaisesRegex(ValueError, msg):
model.fantasize(
X,
evaluation_mask=eval_mask[0],
sampler=IIDNormalSampler(n_fants, seed=0),
)
model.posterior(torch.zeros((1, 1), **tkwargs))
for decoupled in (False, True):
if decoupled:
kwargs = {
"sampler": ListSampler(
IIDNormalSampler(n_fants, seed=0),
IIDNormalSampler(n_fants, seed=0),
),
"evaluation_mask": torch.tensor(
[[0, 1], [1, 0]],
dtype=torch.bool,
device=tkwargs["device"],
),
}
else:
kwargs = {
"sampler": IIDNormalSampler(n_fants, seed=0),
}
fant = model.fantasize(X, **kwargs)
fant_mean = fant.posterior(X).mean.mean(0)
self.assertAlmostEqual(fant_mean[0, 0].item(), y_at_low_x, delta=1)
self.assertAlmostEqual(
fant_mean[0, 1].item(), 2 * y_at_low_x, delta=1
)
# delta=1 is a 1% error (since y_at_low_x = 100)
self.assertAlmostEqual(fant_mean[1, 0].item(), y_at_high_x, delta=1)
self.assertAlmostEqual(
fant_mean[1, 1].item(), 2 * y_at_high_x, delta=1
)
for i, fm_i in enumerate(fant.models):
n_points = 3 if decoupled else 4
self.assertEqual(
fm_i.train_inputs[0].shape, torch.Size([20, n_points, 1])
)
self.assertEqual(
fm_i.train_targets.shape, torch.Size([20, n_points])
)
if decoupled:
self.assertTrue(
torch.equal(fm_i.train_inputs[0][0][-1], X[1 - i])
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import random
import warnings
from typing import Dict, Tuple, Union
import torch
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.exceptions import OptimizationWarning, UnsupportedError
from botorch.exceptions.warnings import _get_single_precision_warning, InputDataWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models.likelihoods.pairwise import (
PairwiseLikelihood,
PairwiseLogitLikelihood,
PairwiseProbitLikelihood,
)
from botorch.models.model import Model
from botorch.models.pairwise_gp import (
_ensure_psd_with_jitter,
PairwiseGP,
PairwiseLaplaceMarginalLogLikelihood,
)
from botorch.models.transforms.input import Normalize
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling.pairwise_samplers import PairwiseSobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.kernels.linear_kernel import LinearKernel
from gpytorch.means import ConstantMean
from gpytorch.priors import GammaPrior, SmoothedBoxPrior
from linear_operator.utils.errors import NotPSDError
from torch import Tensor
class TestPairwiseGP(BotorchTestCase):
def setUp(self, suppress_input_warnings: bool = True) -> None:
super().setUp(suppress_input_warnings)
# single-precision tests are carried out by TestPairwiseGP_float32
self.dtype = torch.float64
def _make_rand_mini_data(
self,
batch_shape,
X_dim=2,
) -> Tuple[Tensor, Tensor]:
train_X = torch.rand(
*batch_shape, 2, X_dim, device=self.device, dtype=self.dtype
)
train_Y = train_X.sum(dim=-1, keepdim=True)
train_comp = torch.topk(train_Y, k=2, dim=-2).indices.transpose(-1, -2)
return train_X, train_comp
def _get_model_and_data(
self,
batch_shape,
X_dim=2,
likelihood_cls=None,
) -> Tuple[Model, Dict[str, Union[Tensor, PairwiseLikelihood]]]:
train_X, train_comp = self._make_rand_mini_data(
batch_shape=batch_shape,
X_dim=X_dim,
)
model_kwargs = {
"datapoints": train_X,
"comparisons": train_comp,
"likelihood": None if likelihood_cls is None else likelihood_cls(),
}
model = PairwiseGP(**model_kwargs)
return model, model_kwargs
def test_pairwise_gp(self) -> None:
torch.manual_seed(random.randint(0, 10))
for batch_shape, likelihood_cls in itertools.product(
(torch.Size(), torch.Size([2])),
(PairwiseLogitLikelihood, PairwiseProbitLikelihood),
):
tkwargs = {"device": self.device, "dtype": self.dtype}
X_dim = 2
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape,
X_dim=X_dim,
likelihood_cls=likelihood_cls,
)
train_X = model_kwargs["datapoints"]
train_comp = model_kwargs["comparisons"]
# test training
# regular training
mll = PairwiseLaplaceMarginalLogLikelihood(model.likelihood, model).to(
**tkwargs
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 2}}, max_attempts=1
)
with self.subTest("prior training"):
# prior training
prior_m = PairwiseGP(None, None).to(**tkwargs)
with self.assertRaises(RuntimeError):
prior_m(train_X)
with self.subTest("forward in training mode with non-training data"):
custom_m = PairwiseGP(**model_kwargs)
other_X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
other_comp = train_comp.clone()
with self.assertRaises(RuntimeError):
custom_m(other_X)
custom_mll = PairwiseLaplaceMarginalLogLikelihood(
custom_m.likelihood, custom_m
).to(**tkwargs)
post = custom_m(train_X)
with self.assertRaises(RuntimeError):
custom_mll(post, other_comp)
with self.subTest("init"):
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
self.assertIsInstance(model.covar_module.base_kernel, RBFKernel)
self.assertIsInstance(
model.covar_module.base_kernel.lengthscale_prior, GammaPrior
)
self.assertIsInstance(
model.covar_module.outputscale_prior, SmoothedBoxPrior
)
self.assertEqual(model.num_outputs, 1)
self.assertEqual(model.batch_shape, batch_shape)
# test not using a ScaleKernel
with self.assertRaisesRegex(UnsupportedError, "used with a ScaleKernel"):
PairwiseGP(**model_kwargs, covar_module=LinearKernel())
# test custom models
custom_m = PairwiseGP(
**model_kwargs, covar_module=ScaleKernel(LinearKernel())
)
self.assertIsInstance(custom_m.covar_module, ScaleKernel)
self.assertIsInstance(custom_m.covar_module.base_kernel, LinearKernel)
# prior prediction
prior_m = PairwiseGP(None, None).to(**tkwargs)
prior_m.eval()
post = prior_m.posterior(train_X)
self.assertIsInstance(post, GPyTorchPosterior)
# test initial utility val
util_comp = torch.topk(model.utility, k=2, dim=-1).indices.unsqueeze(-2)
self.assertTrue(torch.all(util_comp == train_comp))
# test posterior
# test non batch evaluation
X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
expected_shape = batch_shape + torch.Size([3, 1])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
self.assertEqual(posterior.variance.shape, expected_shape)
# test posterior transform
post_tf = ScalarizedPosteriorTransform(weights=torch.ones(1))
posterior_tf = model.posterior(X, posterior_transform=post_tf)
self.assertTrue(torch.equal(posterior.mean, posterior_tf.mean))
# expect to raise error when output_indices is not None
with self.assertRaises(RuntimeError):
model.posterior(X, output_indices=[0])
# test re-evaluating utility when it's None
model.utility = None
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
# test batch evaluation
X = torch.rand(2, *batch_shape, 3, X_dim, **tkwargs)
expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, 1])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
# test input_transform
# the untransfomed one should be stored
normalize_tf = Normalize(d=2, bounds=torch.tensor([[0, 0], [0.5, 1.5]]))
model = PairwiseGP(**model_kwargs, input_transform=normalize_tf)
self.assertTrue(torch.equal(model.datapoints, train_X))
# test set_train_data strict mode
model = PairwiseGP(**model_kwargs)
changed_train_X = train_X.unsqueeze(0)
changed_train_comp = train_comp.unsqueeze(0)
# expect to raise error when set data to something different
with self.assertRaises(RuntimeError):
model.set_train_data(changed_train_X, changed_train_comp, strict=True)
# the same datapoints but changed comparison will also raise error
with self.assertRaises(RuntimeError):
model.set_train_data(train_X, changed_train_comp, strict=True)
def test_consolidation(self) -> None:
for batch_shape, likelihood_cls in itertools.product(
(torch.Size(), torch.Size([2])),
(PairwiseLogitLikelihood, PairwiseProbitLikelihood),
):
X_dim = 2
_, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape,
X_dim=X_dim,
likelihood_cls=likelihood_cls,
)
train_X = model_kwargs["datapoints"]
train_comp = model_kwargs["comparisons"]
# Test consolidation
i1, i2 = train_X.shape[-2], train_X.shape[-2] + 1
dup_comp = torch.cat(
[
train_comp,
torch.tensor(
[[i1, i2]], dtype=train_comp.dtype, device=train_comp.device
).expand(*batch_shape, 1, 2),
],
dim=-2,
)
dup_X = torch.cat([train_X, train_X[..., :2, :]], dim=-2)
model = PairwiseGP(datapoints=dup_X, comparisons=dup_comp)
self.assertIs(dup_X, model.unconsolidated_datapoints)
self.assertIs(dup_comp, model.unconsolidated_comparisons)
if batch_shape:
self.assertIs(dup_X, model.consolidated_datapoints)
self.assertIs(dup_comp, model.consolidated_comparisons)
self.assertIs(model.utility, model.unconsolidated_utility)
else:
self.assertFalse(torch.equal(dup_X, model.consolidated_datapoints))
self.assertFalse(torch.equal(dup_comp, model.consolidated_comparisons))
self.assertFalse(
torch.equal(model.utility, model.unconsolidated_utility)
)
# calling forward with duplicated datapoints should work after consolidation
mll = PairwiseLaplaceMarginalLogLikelihood(model.likelihood, model)
# make sure model is in training mode
self.assertTrue(model.training)
pred = model(dup_X)
# posterior shape in training should match the consolidated utility
self.assertEqual(pred.shape(), model.utility.shape)
if batch_shape:
# do not perform consolidation in batch mode
# because the block structure cannot be guaranteed
self.assertEqual(pred.shape(), dup_X.shape[:-1])
else:
self.assertEqual(pred.shape(), train_X.shape[:-1])
# Pass the original comparisons through mll should work
mll(pred, dup_comp)
def test_condition_on_observations(self) -> None:
for batch_shape, likelihood_cls in itertools.product(
(torch.Size(), torch.Size([2])),
(PairwiseLogitLikelihood, PairwiseProbitLikelihood),
):
tkwargs = {"device": self.device, "dtype": self.dtype}
X_dim = 2
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape,
X_dim=X_dim,
likelihood_cls=likelihood_cls,
)
train_X = model_kwargs["datapoints"]
train_comp = model_kwargs["comparisons"]
# evaluate model
model.posterior(torch.rand(torch.Size([4, X_dim]), **tkwargs))
# test condition_on_observations
# test condition_on_observations with prior mode
prior_m = PairwiseGP(None, None).to(**tkwargs)
cond_m = prior_m.condition_on_observations(train_X, train_comp)
self.assertIs(cond_m.datapoints, train_X)
self.assertIs(cond_m.comparisons, train_comp)
# fantasize at different input points
fant_shape = torch.Size([2])
X_fant, comp_fant = self._make_rand_mini_data(
batch_shape=fant_shape + batch_shape,
X_dim=X_dim,
)
# cannot condition on non-pairwise Ys
with self.assertRaises(RuntimeError):
model.condition_on_observations(X_fant, comp_fant[..., 0])
cm = model.condition_on_observations(X_fant, comp_fant)
# make sure it's a deep copy
self.assertTrue(model is not cm)
# fantasize at same input points (check proper broadcasting)
cm_same_inputs = model.condition_on_observations(X_fant[0], comp_fant)
test_Xs = [
# test broadcasting single input across fantasy and model batches
torch.rand(4, X_dim, **tkwargs),
# separate input for each model batch and broadcast across
# fantasy batches
torch.rand(batch_shape + torch.Size([4, X_dim]), **tkwargs),
# separate input for each model and fantasy batch
torch.rand(
fant_shape + batch_shape + torch.Size([4, X_dim]), **tkwargs
),
]
for test_X in test_Xs:
posterior = cm.posterior(test_X)
self.assertEqual(
posterior.mean.shape, fant_shape + batch_shape + torch.Size([4, 1])
)
posterior_same_inputs = cm_same_inputs.posterior(test_X)
self.assertEqual(
posterior_same_inputs.mean.shape,
fant_shape + batch_shape + torch.Size([4, 1]),
)
# check that fantasies of batched model are correct
if len(batch_shape) > 0 and test_X.dim() == 2:
state_dict_non_batch = {
key: (val[0] if val.numel() > 1 else val)
for key, val in model.state_dict().items()
}
model_kwargs_non_batch = {
"datapoints": model_kwargs["datapoints"][0],
"comparisons": model_kwargs["comparisons"][0],
"likelihood": likelihood_cls(),
}
model_non_batch = model.__class__(**model_kwargs_non_batch)
model_non_batch.load_state_dict(state_dict_non_batch)
model_non_batch.eval()
model_non_batch.posterior(
torch.rand(torch.Size([4, X_dim]), **tkwargs)
)
cm_non_batch = model_non_batch.condition_on_observations(
X_fant[0][0], comp_fant[:, 0, :]
)
non_batch_posterior = cm_non_batch.posterior(test_X)
self.assertAllClose(
posterior_same_inputs.mean[:, 0, ...],
non_batch_posterior.mean,
atol=1e-3,
)
self.assertAllClose(
posterior_same_inputs.distribution.covariance_matrix[
:, 0, :, :
],
non_batch_posterior.distribution.covariance_matrix,
atol=1e-3,
)
def test_fantasize(self) -> None:
for batch_shape, likelihood_cls in itertools.product(
(torch.Size(), torch.Size([2])),
(PairwiseLogitLikelihood, PairwiseProbitLikelihood),
):
tkwargs = {"device": self.device, "dtype": self.dtype}
X_dim = 2
model, _ = self._get_model_and_data(
batch_shape=batch_shape,
X_dim=X_dim,
likelihood_cls=likelihood_cls,
)
# fantasize
X_f = torch.rand(
torch.Size(batch_shape + torch.Size([4, X_dim])), **tkwargs
)
sampler = PairwiseSobolQMCNormalSampler(sample_shape=torch.Size([3]))
fm = model.fantasize(X=X_f, sampler=sampler)
self.assertIsInstance(fm, model.__class__)
fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
self.assertIsInstance(fm, model.__class__)
def test_load_state_dict(self) -> None:
model, _ = self._get_model_and_data(batch_shape=[])
sd = model.state_dict()
with self.assertRaises(UnsupportedError):
model.load_state_dict(sd, strict=True)
# Set instance buffers to None
for buffer_name in model._buffer_names:
model.register_buffer(buffer_name, None)
# Check that instance buffers were not restored
_ = model.load_state_dict(sd)
for buffer_name in model._buffer_names:
self.assertIsNone(model.get_buffer(buffer_name))
def test_helper_functions(self) -> None:
for batch_shape in (torch.Size(), torch.Size([2])):
tkwargs = {"device": self.device, "dtype": self.dtype}
# M is borderline PSD
M = torch.ones((*batch_shape, 2, 2), **tkwargs)
with self.assertRaises(torch._C._LinAlgError):
torch.linalg.cholesky(M)
# This should work fine
_ensure_psd_with_jitter(M)
bad_M = torch.tensor([[1.0, 2.0], [2.0, 1.0]], **tkwargs).expand(
(*batch_shape, 2, 2)
)
with self.assertRaises(NotPSDError):
_ensure_psd_with_jitter(bad_M)
class TestPairwiseGP_float32(TestPairwiseGP):
"""Runs tests from TestPairwiseGP in single precision."""
def setUp(self, suppress_input_warnings: bool = True) -> None:
super().setUp(suppress_input_warnings)
self.dtype = torch.float32
warnings.filterwarnings(
"ignore",
category=InputDataWarning,
message=_get_single_precision_warning(str(torch.float32)),
)
def test_init_warns_on_single_precision(self) -> None:
with self.assertWarnsRegex(
InputDataWarning,
expected_regex=_get_single_precision_warning(str(torch.float32)),
):
self._get_model_and_data(batch_shape=torch.Size([]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.exceptions.warnings import OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models.gp_regression import (
FixedNoiseGP,
HeteroskedasticSingleTaskGP,
SingleTaskGP,
)
from botorch.models.transforms import Normalize, Standardize
from botorch.models.transforms.input import InputStandardize
from botorch.models.utils import add_output_dim
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.sampling import manual_seed
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import (
_GaussianLikelihoodBase,
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
HeteroskedasticNoise,
)
from gpytorch.means import ConstantMean, ZeroMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.mlls.noise_model_added_loss_term import NoiseModelAddedLossTerm
from gpytorch.priors import GammaPrior
class TestSingleTaskGP(BotorchTestCase):
def _get_model_and_data(
self,
batch_shape,
m,
outcome_transform=None,
input_transform=None,
extra_model_kwargs=None,
**tkwargs,
):
extra_model_kwargs = extra_model_kwargs or {}
train_X, train_Y = _get_random_data(batch_shape=batch_shape, m=m, **tkwargs)
model_kwargs = {
"train_X": train_X,
"train_Y": train_Y,
"outcome_transform": outcome_transform,
"input_transform": input_transform,
}
model = SingleTaskGP(**model_kwargs, **extra_model_kwargs)
return model, model_kwargs
def _get_extra_model_kwargs(self):
return {
"mean_module": ZeroMean(),
"covar_module": RBFKernel(use_ard=False),
"likelihood": GaussianLikelihood(),
}
def test_gp(self, double_only: bool = False):
bounds = torch.tensor([[-1.0], [1.0]])
for batch_shape, m, dtype, use_octf, use_intf in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.double,) if double_only else (torch.float, torch.double),
(False, True),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
intf = (
Normalize(d=1, bounds=bounds.to(**tkwargs), transform_on_train=True)
if use_intf
else None
)
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape,
m=m,
outcome_transform=octf,
input_transform=intf,
**tkwargs,
)
mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test init
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
if use_octf:
self.assertIsInstance(model.outcome_transform, Standardize)
if use_intf:
self.assertIsInstance(model.input_transform, Normalize)
# permute output dim
train_X, train_Y, _ = model._transform_tensor_args(
X=model_kwargs["train_X"], Y=model_kwargs["train_Y"]
)
# check that the train inputs have been transformed and set on the model
self.assertTrue(torch.equal(model.train_inputs[0], intf(train_X)))
# test param sizes
params = dict(model.named_parameters())
for p in params:
self.assertEqual(
params[p].numel(), m * torch.tensor(batch_shape).prod().item()
)
# test posterior
# test non batch evaluation
X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
expected_shape = batch_shape + torch.Size([3, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
self.assertEqual(posterior.variance.shape, expected_shape)
# test adding observation noise
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
self.assertEqual(posterior_pred.variance.shape, expected_shape)
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
pp_tf = model.posterior(X, observation_noise=True)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(pp_tf).variance
self.assertAllClose(posterior_pred.variance, expected_var)
else:
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)
# Tensor valued observation noise.
obs_noise = torch.rand(X.shape, **tkwargs)
posterior_pred = model.posterior(X, observation_noise=obs_noise)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
self.assertEqual(posterior_pred.variance.shape, expected_shape)
if use_octf:
_, obs_noise = model.outcome_transform.untransform(obs_noise, obs_noise)
self.assertAllClose(posterior_pred.variance, posterior.variance + obs_noise)
# test batch evaluation
X = torch.rand(2, *batch_shape, 3, 1, **tkwargs)
expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
# test adding observation noise in batch mode
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
pp_tf = model.posterior(X, observation_noise=True)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(pp_tf).variance
self.assertAllClose(posterior_pred.variance, expected_var)
else:
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)
def test_custom_init(self):
extra_model_kwargs = self._get_extra_model_kwargs()
for batch_shape, m, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape,
m=m,
extra_model_kwargs=extra_model_kwargs,
**tkwargs,
)
self.assertEqual(model.mean_module, extra_model_kwargs["mean_module"])
self.assertEqual(model.covar_module, extra_model_kwargs["covar_module"])
if "likelihood" in extra_model_kwargs:
self.assertEqual(model.likelihood, extra_model_kwargs["likelihood"])
def test_condition_on_observations(self):
for batch_shape, m, dtype, use_octf in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape, m=m, outcome_transform=octf, **tkwargs
)
# evaluate model
model.posterior(torch.rand(torch.Size([4, 1]), **tkwargs))
# test condition_on_observations
fant_shape = torch.Size([2])
# fantasize at different input points
X_fant, Y_fant = _get_random_data(
batch_shape=fant_shape + batch_shape, m=m, n=3, **tkwargs
)
c_kwargs = (
{"noise": torch.full_like(Y_fant, 0.01)}
if isinstance(model, FixedNoiseGP)
else {}
)
cm = model.condition_on_observations(X_fant, Y_fant, **c_kwargs)
# fantasize at same input points (check proper broadcasting)
c_kwargs_same_inputs = (
{"noise": torch.full_like(Y_fant[0], 0.01)}
if isinstance(model, FixedNoiseGP)
else {}
)
cm_same_inputs = model.condition_on_observations(
X_fant[0], Y_fant, **c_kwargs_same_inputs
)
test_Xs = [
# test broadcasting single input across fantasy and model batches
torch.rand(4, 1, **tkwargs),
# separate input for each model batch and broadcast across
# fantasy batches
torch.rand(batch_shape + torch.Size([4, 1]), **tkwargs),
# separate input for each model and fantasy batch
torch.rand(fant_shape + batch_shape + torch.Size([4, 1]), **tkwargs),
]
for test_X in test_Xs:
posterior = cm.posterior(test_X)
self.assertEqual(
posterior.mean.shape, fant_shape + batch_shape + torch.Size([4, m])
)
posterior_same_inputs = cm_same_inputs.posterior(test_X)
self.assertEqual(
posterior_same_inputs.mean.shape,
fant_shape + batch_shape + torch.Size([4, m]),
)
# check that fantasies of batched model are correct
if len(batch_shape) > 0 and test_X.dim() == 2:
state_dict_non_batch = {
key: (val[0] if val.numel() > 1 else val)
for key, val in model.state_dict().items()
}
model_kwargs_non_batch = {
"train_X": model_kwargs["train_X"][0],
"train_Y": model_kwargs["train_Y"][0],
}
if "train_Yvar" in model_kwargs:
model_kwargs_non_batch["train_Yvar"] = model_kwargs[
"train_Yvar"
][0]
if model_kwargs["outcome_transform"] is not None:
model_kwargs_non_batch["outcome_transform"] = Standardize(m=m)
model_non_batch = type(model)(**model_kwargs_non_batch)
model_non_batch.load_state_dict(state_dict_non_batch)
model_non_batch.eval()
model_non_batch.likelihood.eval()
model_non_batch.posterior(torch.rand(torch.Size([4, 1]), **tkwargs))
c_kwargs = (
{"noise": torch.full_like(Y_fant[0, 0, :], 0.01)}
if isinstance(model, FixedNoiseGP)
else {}
)
cm_non_batch = model_non_batch.condition_on_observations(
X_fant[0][0], Y_fant[:, 0, :], **c_kwargs
)
non_batch_posterior = cm_non_batch.posterior(test_X)
self.assertTrue(
torch.allclose(
posterior_same_inputs.mean[:, 0, ...],
non_batch_posterior.mean,
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
posterior_same_inputs.distribution.covariance_matrix[
:, 0, :, :
],
non_batch_posterior.distribution.covariance_matrix,
atol=1e-3,
)
)
def test_fantasize(self):
for batch_shape, m, dtype, use_octf in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
model, _ = self._get_model_and_data(
batch_shape=batch_shape, m=m, outcome_transform=octf, **tkwargs
)
# fantasize
X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, 1])), **tkwargs)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([3]))
fm = model.fantasize(X=X_f, sampler=sampler)
self.assertIsInstance(fm, model.__class__)
fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
self.assertIsInstance(fm, model.__class__)
# check that input transforms are applied to X.
tkwargs = {"device": self.device, "dtype": torch.float}
intf = Normalize(d=1, bounds=torch.tensor([[0], [10]], **tkwargs))
model, _ = self._get_model_and_data(
batch_shape=torch.Size(),
m=1,
input_transform=intf,
**tkwargs,
)
X_f = torch.rand(4, 1, **tkwargs)
fm = model.fantasize(
X_f, sampler=SobolQMCNormalSampler(sample_shape=torch.Size([3]))
)
self.assertTrue(
torch.allclose(fm.train_inputs[0][:, -4:], intf(X_f).expand(3, -1, -1))
)
def test_subset_model(self):
for batch_shape, dtype, use_octf in itertools.product(
(torch.Size(), torch.Size([2])), (torch.float, torch.double), (True, False)
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=2, batch_shape=batch_shape) if use_octf else None
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape, m=2, outcome_transform=octf, **tkwargs
)
subset_model = model.subset_output([0])
X = torch.rand(torch.Size(batch_shape + torch.Size([3, 1])), **tkwargs)
p = model.posterior(X)
p_sub = subset_model.posterior(X)
self.assertTrue(
torch.allclose(p_sub.mean, p.mean[..., [0]], atol=1e-4, rtol=1e-4)
)
self.assertTrue(
torch.allclose(
p_sub.variance, p.variance[..., [0]], atol=1e-4, rtol=1e-4
)
)
# test subsetting each of the outputs (follows a different code branch)
subset_all_model = model.subset_output([0, 1])
p_sub_all = subset_all_model.posterior(X)
self.assertAllClose(p_sub_all.mean, p.mean)
# subsetting should still return a copy
self.assertNotEqual(model, subset_all_model)
def test_construct_inputs(self):
for batch_shape, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape, m=1, **tkwargs
)
X = model_kwargs["train_X"]
Y = model_kwargs["train_Y"]
training_data = SupervisedDataset(X, Y)
data_dict = model.construct_inputs(training_data)
self.assertTrue(X.equal(data_dict["train_X"]))
self.assertTrue(Y.equal(data_dict["train_Y"]))
def test_set_transformed_inputs(self):
# This intended to catch https://github.com/pytorch/botorch/issues/1078.
# More general testing of _set_transformed_inputs is done under ModelListGP.
X = torch.rand(5, 2)
Y = X**2
for tf_class in [Normalize, InputStandardize]:
intf = tf_class(d=2)
model = SingleTaskGP(X, Y, input_transform=intf)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 2}})
tf_X = intf(X)
self.assertEqual(X.shape, tf_X.shape)
class TestFixedNoiseGP(TestSingleTaskGP):
def _get_model_and_data(
self,
batch_shape,
m,
outcome_transform=None,
input_transform=None,
extra_model_kwargs=None,
**tkwargs,
):
extra_model_kwargs = extra_model_kwargs or {}
train_X, train_Y = _get_random_data(batch_shape=batch_shape, m=m, **tkwargs)
model_kwargs = {
"train_X": train_X,
"train_Y": train_Y,
"train_Yvar": torch.full_like(train_Y, 0.01),
"input_transform": input_transform,
"outcome_transform": outcome_transform,
}
model = FixedNoiseGP(**model_kwargs, **extra_model_kwargs)
return model, model_kwargs
def _get_extra_model_kwargs(self):
return {
"mean_module": ZeroMean(),
"covar_module": RBFKernel(use_ard=False),
}
def test_fixed_noise_likelihood(self):
for batch_shape, m, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape, m=m, **tkwargs
)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertTrue(
torch.equal(
model.likelihood.noise.contiguous().view(-1),
model_kwargs["train_Yvar"].contiguous().view(-1),
)
)
def test_construct_inputs(self):
for batch_shape, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
batch_shape=batch_shape, m=1, **tkwargs
)
X = model_kwargs["train_X"]
Y = model_kwargs["train_Y"]
Yvar = model_kwargs["train_Yvar"]
training_data = SupervisedDataset(X, Y, Yvar)
data_dict = model.construct_inputs(training_data)
self.assertTrue(X.equal(data_dict["train_X"]))
self.assertTrue(Y.equal(data_dict["train_Y"]))
self.assertTrue(Yvar.equal(data_dict["train_Yvar"]))
class TestHeteroskedasticSingleTaskGP(TestSingleTaskGP):
def _get_model_and_data(
self, batch_shape, m, outcome_transform=None, input_transform=None, **tkwargs
):
with manual_seed(0):
train_X, train_Y = _get_random_data(batch_shape=batch_shape, m=m, **tkwargs)
train_Yvar = (0.1 + 0.1 * torch.rand_like(train_Y)) ** 2
model_kwargs = {
"train_X": train_X,
"train_Y": train_Y,
"train_Yvar": train_Yvar,
"input_transform": input_transform,
"outcome_transform": outcome_transform,
}
model = HeteroskedasticSingleTaskGP(**model_kwargs)
return model, model_kwargs
def test_custom_init(self) -> None:
"""
This test exists because `TestHeteroskedasticSingleTaskGP` inherits from
`TestSingleTaskGP`, which has a `test_custom_init` method that isn't relevant
for `TestHeteroskedasticSingleTaskGP`.
"""
def test_gp(self):
super().test_gp(double_only=True)
def test_fantasize(self) -> None:
"""
This test exists because `TestHeteroskedasticSingleTaskGP` inherits from
`TestSingleTaskGP`, which has a `fantasize` method that isn't relevant
for `TestHeteroskedasticSingleTaskGP`.
"""
def test_heteroskedastic_likelihood(self):
for batch_shape, m, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
model, _ = self._get_model_and_data(batch_shape=batch_shape, m=m, **tkwargs)
self.assertIsInstance(model.likelihood, _GaussianLikelihoodBase)
self.assertFalse(isinstance(model.likelihood, GaussianLikelihood))
self.assertIsInstance(model.likelihood.noise_covar, HeteroskedasticNoise)
self.assertIsInstance(
model.likelihood.noise_covar.noise_model, SingleTaskGP
)
self.assertIsInstance(
model._added_loss_terms["noise_added_loss"], NoiseModelAddedLossTerm
)
def test_condition_on_observations(self):
with self.assertRaises(NotImplementedError):
super().test_condition_on_observations()
def test_subset_model(self):
with self.assertRaises(NotImplementedError):
super().test_subset_model()
def _get_pvar_expected(posterior, model, X, m):
X = model.transform_inputs(X)
lh_kwargs = {}
if isinstance(model.likelihood, FixedNoiseGaussianLikelihood):
lh_kwargs["noise"] = model.likelihood.noise.mean().expand(X.shape[:-1])
if m == 1:
return model.likelihood(
posterior.distribution, X, **lh_kwargs
).variance.unsqueeze(-1)
X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape)
pvar_exp = model.likelihood(model(X_), X_, **lh_kwargs).variance
return torch.stack([pvar_exp.select(dim=odi, index=i) for i in range(m)], dim=-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
from typing import Tuple
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gp_regression_fidelity import (
FixedNoiseMultiFidelityGP,
SingleTaskMultiFidelityGP,
)
from botorch.models.transforms import Normalize, Standardize
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from torch import Tensor
def _get_random_data_with_fidelity(
batch_shape: torch.Size, m: int, n_fidelity: int, d: int = 1, n: int = 10, **tkwargs
) -> Tuple[Tensor, Tensor]:
r"""Construct test data.
For this test, by convention the trailing dimensions are the fidelity dimensions
"""
train_x, train_y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, n=n, **tkwargs
)
s = torch.rand(n, n_fidelity, **tkwargs).repeat(batch_shape + torch.Size([1, 1]))
train_x = torch.cat((train_x, s), dim=-1)
train_y = train_y + (1 - s).pow(2).sum(dim=-1).unsqueeze(-1)
return train_x, train_y
class TestSingleTaskMultiFidelityGP(BotorchTestCase):
FIDELITY_TEST_PAIRS = (
(None, [1]),
(1, None),
(None, [-1]),
(-1, None),
(1, [2]),
(1, [2, 3]),
(None, [1, 2]),
(-1, [1, -2]),
)
def _get_model_and_data(
self,
iteration_fidelity,
data_fidelities,
batch_shape,
m,
lin_truncated,
outcome_transform=None,
input_transform=None,
**tkwargs,
):
model_kwargs = {}
n_fidelity = iteration_fidelity is not None
if data_fidelities is not None:
n_fidelity += len(data_fidelities)
model_kwargs["data_fidelities"] = data_fidelities
train_X, train_Y = _get_random_data_with_fidelity(
batch_shape=batch_shape, m=m, n_fidelity=n_fidelity, **tkwargs
)
model_kwargs.update(
{
"train_X": train_X,
"train_Y": train_Y,
"iteration_fidelity": iteration_fidelity,
"linear_truncated": lin_truncated,
}
)
if outcome_transform is not None:
model_kwargs["outcome_transform"] = outcome_transform
if input_transform is not None:
model_kwargs["input_transform"] = input_transform
model = SingleTaskMultiFidelityGP(**model_kwargs)
return model, model_kwargs
def test_init_error(self):
train_X = torch.rand(2, 2, device=self.device)
train_Y = torch.rand(2, 1)
for lin_truncated in (True, False):
with self.assertRaises(UnsupportedError):
SingleTaskMultiFidelityGP(
train_X, train_Y, linear_truncated=lin_truncated
)
with self.assertRaises(ValueError):
SingleTaskMultiFidelityGP(
train_X, train_Y, data_fidelities=[1], data_fidelity=2
)
with self.assertWarnsRegex(DeprecationWarning, "data_fidelity"):
SingleTaskMultiFidelityGP(
train_X, train_Y, data_fidelity=1, linear_truncated=False
)
def test_gp(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
num_dim = 1 + (iteration_fidelity is not None)
if data_fidelities is not None:
num_dim += len(data_fidelities)
bounds = torch.zeros(2, num_dim)
bounds[1] = 1
for (
batch_shape,
m,
dtype,
lin_trunc,
use_octf,
use_intf,
) in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
(False, True),
(False, True),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
intf = Normalize(d=num_dim, bounds=bounds) if use_intf else None
model, model_kwargs = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=m,
lin_truncated=lin_trunc,
outcome_transform=octf,
input_transform=intf,
**tkwargs,
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.to(**tkwargs)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
fit_gpytorch_mll(
mll,
optimizer_kwargs={"options": {"maxiter": 1}},
sequential=False,
)
# test init
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
if use_octf:
self.assertIsInstance(model.outcome_transform, Standardize)
if use_intf:
self.assertIsInstance(model.input_transform, Normalize)
# permute output dim
train_X, train_Y, _ = model._transform_tensor_args(
X=model_kwargs["train_X"], Y=model_kwargs["train_Y"]
)
# check that the train inputs have been transformed and set on the
# model
self.assertTrue(torch.equal(model.train_inputs[0], intf(train_X)))
# test param sizes
params = dict(model.named_parameters())
if data_fidelities is not None and len(data_fidelities) == 1:
for p in params:
self.assertEqual(
params[p].numel(),
m * torch.tensor(batch_shape).prod().item(),
)
# test posterior
# test non batch evaluation
X = torch.rand(*batch_shape, 3, num_dim, **tkwargs)
expected_shape = batch_shape + torch.Size([3, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
self.assertEqual(posterior.variance.shape, expected_shape)
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
pp_tf = model.posterior(X)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(pp_tf).variance
self.assertAllClose(posterior.variance, expected_var)
# test batch evaluation
X = torch.rand(2, *batch_shape, 3, num_dim, **tkwargs)
expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
self.assertEqual(posterior.variance.shape, expected_shape)
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
pp_tf = model.posterior(X)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(pp_tf).variance
self.assertAllClose(posterior.variance, expected_var)
def test_condition_on_observations(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
n_fidelity = iteration_fidelity is not None
if data_fidelities is not None:
n_fidelity += len(data_fidelities)
num_dim = 1 + n_fidelity
for batch_shape, m, dtype, lin_trunc in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=m,
lin_truncated=lin_trunc,
**tkwargs,
)
# evaluate model
model.posterior(torch.rand(torch.Size([4, num_dim]), **tkwargs))
# test condition_on_observations
fant_shape = torch.Size([2])
# fantasize at different input points
X_fant, Y_fant = _get_random_data_with_fidelity(
fant_shape + batch_shape, m, n_fidelity=n_fidelity, n=3, **tkwargs
)
c_kwargs = (
{"noise": torch.full_like(Y_fant, 0.01)}
if isinstance(model, FixedNoiseGP)
else {}
)
cm = model.condition_on_observations(X_fant, Y_fant, **c_kwargs)
# fantasize at different same input points
c_kwargs_same_inputs = (
{"noise": torch.full_like(Y_fant[0], 0.01)}
if isinstance(model, FixedNoiseGP)
else {}
)
cm_same_inputs = model.condition_on_observations(
X_fant[0], Y_fant, **c_kwargs_same_inputs
)
test_Xs = [
# test broadcasting single input across fantasy and
# model batches
torch.rand(4, num_dim, **tkwargs),
# separate input for each model batch and broadcast across
# fantasy batches
torch.rand(batch_shape + torch.Size([4, num_dim]), **tkwargs),
# separate input for each model and fantasy batch
torch.rand(
fant_shape + batch_shape + torch.Size([4, num_dim]), **tkwargs
),
]
for test_X in test_Xs:
posterior = cm.posterior(test_X)
self.assertEqual(
posterior.mean.shape,
fant_shape + batch_shape + torch.Size([4, m]),
)
posterior_same_inputs = cm_same_inputs.posterior(test_X)
self.assertEqual(
posterior_same_inputs.mean.shape,
fant_shape + batch_shape + torch.Size([4, m]),
)
# check that fantasies of batched model are correct
if len(batch_shape) > 0 and test_X.dim() == 2:
state_dict_non_batch = {
key: (val[0] if val.numel() > 1 else val)
for key, val in model.state_dict().items()
}
model_kwargs_non_batch = {}
for k, v in model_kwargs.items():
if k in (
"iteration_fidelity",
"data_fidelities",
"linear_truncated",
"input_transform",
):
model_kwargs_non_batch[k] = v
else:
model_kwargs_non_batch[k] = v[0]
model_non_batch = type(model)(**model_kwargs_non_batch)
model_non_batch.load_state_dict(state_dict_non_batch)
model_non_batch.eval()
model_non_batch.likelihood.eval()
model_non_batch.posterior(
torch.rand(torch.Size([4, num_dim]), **tkwargs)
)
c_kwargs = (
{"noise": torch.full_like(Y_fant[0, 0, :], 0.01)}
if isinstance(model, FixedNoiseGP)
else {}
)
mnb = model_non_batch
cm_non_batch = mnb.condition_on_observations(
X_fant[0][0], Y_fant[:, 0, :], **c_kwargs
)
non_batch_posterior = cm_non_batch.posterior(test_X)
self.assertTrue(
torch.allclose(
posterior_same_inputs.mean[:, 0, ...],
non_batch_posterior.mean,
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
posterior_same_inputs.distribution.covariance_matrix[
:, 0, :, :
],
non_batch_posterior.distribution.covariance_matrix,
atol=1e-3,
)
)
def test_fantasize(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
n_fidelity = iteration_fidelity is not None
if data_fidelities is not None:
n_fidelity += len(data_fidelities)
num_dim = 1 + n_fidelity
for batch_shape, m, dtype, lin_trunc in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=m,
lin_truncated=lin_trunc,
**tkwargs,
)
# fantasize
X_f = torch.rand(
torch.Size(batch_shape + torch.Size([4, num_dim])), **tkwargs
)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([3]))
fm = model.fantasize(X=X_f, sampler=sampler)
self.assertIsInstance(fm, model.__class__)
fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
self.assertIsInstance(fm, model.__class__)
def test_subset_model(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
num_dim = 1 + (iteration_fidelity is not None)
if data_fidelities is not None:
num_dim += len(data_fidelities)
for batch_shape, dtype, lin_trunc in itertools.product(
(torch.Size(), torch.Size([2])),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, _ = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=2,
lin_truncated=lin_trunc,
outcome_transform=None, # TODO: Subset w/ outcome transform
**tkwargs,
)
subset_model = model.subset_output([0])
X = torch.rand(
torch.Size(batch_shape + torch.Size([3, num_dim])), **tkwargs
)
p = model.posterior(X)
p_sub = subset_model.posterior(X)
self.assertTrue(
torch.allclose(p_sub.mean, p.mean[..., [0]], atol=1e-4, rtol=1e-4)
)
self.assertTrue(
torch.allclose(
p_sub.variance, p.variance[..., [0]], atol=1e-4, rtol=1e-4
)
)
def test_construct_inputs(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
for batch_shape, dtype, lin_trunc in itertools.product(
(torch.Size(), torch.Size([2])),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, kwargs = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=1,
lin_truncated=lin_trunc,
**tkwargs,
)
training_data = SupervisedDataset(kwargs["train_X"], kwargs["train_Y"])
# missing fidelity features
with self.assertRaisesRegex(TypeError, "argument: 'fidelity_features'"):
model.construct_inputs(training_data)
data_dict = model.construct_inputs(training_data, fidelity_features=[1])
self.assertTrue("data_fidelities" in data_dict)
self.assertEqual(data_dict["data_fidelities"], [1])
self.assertTrue(kwargs["train_X"].equal(data_dict["train_X"]))
self.assertTrue(kwargs["train_Y"].equal(data_dict["train_Y"]))
class TestFixedNoiseMultiFidelityGP(TestSingleTaskMultiFidelityGP):
def _get_model_and_data(
self,
iteration_fidelity,
data_fidelities,
batch_shape,
m,
lin_truncated,
outcome_transform=None,
input_transform=None,
**tkwargs,
):
model_kwargs = {}
n_fidelity = iteration_fidelity is not None
if data_fidelities is not None:
n_fidelity += len(data_fidelities)
model_kwargs["data_fidelities"] = data_fidelities
train_X, train_Y = _get_random_data_with_fidelity(
batch_shape=batch_shape, m=m, n_fidelity=n_fidelity, **tkwargs
)
train_Yvar = torch.full_like(train_Y, 0.01)
model_kwargs.update(
{
"train_X": train_X,
"train_Y": train_Y,
"train_Yvar": train_Yvar,
"iteration_fidelity": iteration_fidelity,
"linear_truncated": lin_truncated,
}
)
if outcome_transform is not None:
model_kwargs["outcome_transform"] = outcome_transform
if input_transform is not None:
model_kwargs["input_transform"] = input_transform
model = FixedNoiseMultiFidelityGP(**model_kwargs)
return model, model_kwargs
def test_init_error(self):
train_X = torch.rand(2, 2, device=self.device)
train_Y = torch.rand(2, 1)
train_Yvar = torch.full_like(train_Y, 0.01)
for lin_truncated in (True, False):
with self.assertRaises(UnsupportedError):
FixedNoiseMultiFidelityGP(
train_X, train_Y, train_Yvar, linear_truncated=lin_truncated
)
with self.assertRaises(ValueError):
FixedNoiseMultiFidelityGP(
train_X, train_Y, train_Yvar, data_fidelities=[1], data_fidelity=2
)
with self.assertWarnsRegex(DeprecationWarning, "data_fidelity"):
FixedNoiseMultiFidelityGP(
train_X, train_Y, train_Yvar, data_fidelity=1, linear_truncated=False
)
def test_fixed_noise_likelihood(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
for batch_shape, m, dtype, lin_trunc in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, model_kwargs = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=m,
lin_truncated=lin_trunc,
**tkwargs,
)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertTrue(
torch.equal(
model.likelihood.noise.contiguous().view(-1),
model_kwargs["train_Yvar"].contiguous().view(-1),
)
)
def test_construct_inputs(self):
for (iteration_fidelity, data_fidelities) in self.FIDELITY_TEST_PAIRS:
for batch_shape, dtype, lin_trunc in itertools.product(
(torch.Size(), torch.Size([2])),
(torch.float, torch.double),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
model, kwargs = self._get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
batch_shape=batch_shape,
m=1,
lin_truncated=lin_trunc,
**tkwargs,
)
training_data = SupervisedDataset(kwargs["train_X"], kwargs["train_Y"])
data_dict = model.construct_inputs(training_data, fidelity_features=[1])
self.assertTrue("train_Yvar" not in data_dict)
# len(Xs) == len(Ys) == 1
training_data = SupervisedDataset(
X=kwargs["train_X"],
Y=kwargs["train_Y"],
Yvar=torch.full(kwargs["train_Y"].shape[:-1] + (1,), 0.1),
)
# missing fidelity features
with self.assertRaisesRegex(TypeError, "argument: 'fidelity_features'"):
model.construct_inputs(training_data)
data_dict = model.construct_inputs(training_data, fidelity_features=[1])
self.assertTrue("train_Yvar" in data_dict)
self.assertEqual(data_dict.get("data_fidelities", None), [1])
self.assertTrue(kwargs["train_X"].equal(data_dict["train_X"]))
self.assertTrue(kwargs["train_Y"].equal(data_dict["train_Y"]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import math
import warnings
from typing import List, Optional
import torch
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.exceptions import OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models.multitask import (
FixedNoiseMultiTaskGP,
KroneckerMultiTaskGP,
MultiTaskGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.posteriors import GPyTorchPosterior
from botorch.posteriors.transformed import TransformedPosterior
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from gpytorch.kernels import (
IndexKernel,
MaternKernel,
MultitaskKernel,
RBFKernel,
ScaleKernel,
)
from gpytorch.likelihoods import (
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
MultitaskGaussianLikelihood,
)
from gpytorch.means import ConstantMean, MultitaskMean
from gpytorch.means.linear_mean import LinearMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.priors import GammaPrior, LogNormalPrior, SmoothedBoxPrior
from gpytorch.priors.lkj_prior import LKJCovariancePrior
from gpytorch.settings import max_cholesky_size, max_root_decomposition_size
from torch.nn.functional import pad
def _gen_datasets(yvar: Optional[float] = None, **tkwargs):
X = torch.linspace(0, 0.95, 10, **tkwargs) + 0.05 * torch.rand(10, **tkwargs)
X = X.unsqueeze(dim=-1)
Y1 = torch.sin(X * (2 * math.pi)) + torch.randn_like(X) * 0.2
Y2 = torch.cos(X * (2 * math.pi)) + torch.randn_like(X) * 0.2
train_X = torch.cat([pad(X, (1, 0), value=i) for i in range(2)])
train_Y = torch.cat([Y1, Y2])
if yvar is None:
return SupervisedDataset.dict_from_iter(X, (Y1, Y2)), (train_X, train_Y)
Yvar1 = torch.full_like(Y1, yvar)
Yvar2 = torch.full_like(Y2, yvar)
train_Yvar = torch.cat([Yvar1, Yvar2])
datasets = {0: SupervisedDataset(X, Y1, Yvar1), 1: SupervisedDataset(X, Y2, Yvar2)}
return datasets, (train_X, train_Y, train_Yvar)
def _gen_model_and_data(
task_feature: int = 0,
output_tasks: Optional[List[int]] = None,
input_transform=None,
outcome_transform=None,
**tkwargs
):
datasets, (train_X, train_Y) = _gen_datasets(**tkwargs)
model = MultiTaskGP(
train_X,
train_Y,
task_feature=task_feature,
output_tasks=output_tasks,
input_transform=input_transform,
outcome_transform=outcome_transform,
)
return model.to(**tkwargs), datasets, (train_X, train_Y)
def _gen_model_single_output(**tkwargs):
_, (train_X, train_Y) = _gen_datasets(**tkwargs)
model = MultiTaskGP(train_X, train_Y, task_feature=0, output_tasks=[1])
return model.to(**tkwargs)
def _gen_fixed_noise_model_and_data(
task_feature: int = 0,
input_transform=None,
outcome_transform=None,
use_fixed_noise_model_class: bool = False,
**tkwargs
):
datasets, (train_X, train_Y, train_Yvar) = _gen_datasets(yvar=0.05, **tkwargs)
model_class = FixedNoiseMultiTaskGP if use_fixed_noise_model_class else MultiTaskGP
model = model_class(
train_X,
train_Y,
train_Yvar=train_Yvar,
task_feature=task_feature,
input_transform=input_transform,
outcome_transform=outcome_transform,
)
return model.to(**tkwargs), datasets, (train_X, train_Y, train_Yvar)
def _gen_fixed_noise_model_single_output(**tkwargs):
_, (train_X, train_Y, train_Yvar) = _gen_datasets(yvar=0.05, **tkwargs)
model = FixedNoiseMultiTaskGP(
train_X, train_Y, train_Yvar, task_feature=0, output_tasks=[1]
)
return model.to(**tkwargs)
def _gen_fixed_prior_model(**tkwargs):
_, (train_X, train_Y) = _gen_datasets(**tkwargs)
sd_prior = GammaPrior(2.0, 0.15)
sd_prior._event_shape = torch.Size([2])
model = MultiTaskGP(
train_X,
train_Y,
task_feature=0,
task_covar_prior=LKJCovariancePrior(2, 0.6, sd_prior),
)
return model.to(**tkwargs)
def _gen_given_covar_module_model(**tkwargs):
_, (train_X, train_Y) = _gen_datasets(**tkwargs)
model = MultiTaskGP(
train_X,
train_Y,
task_feature=0,
covar_module=RBFKernel(lengthscale_prior=LogNormalPrior(0.0, 1.0)),
)
return model.to(**tkwargs)
def _gen_fixed_noise_and_prior_model(**tkwargs):
_, (train_X, train_Y, train_Yvar) = _gen_datasets(yvar=0.05, **tkwargs)
sd_prior = GammaPrior(2.0, 0.15)
sd_prior._event_shape = torch.Size([2])
model = FixedNoiseMultiTaskGP(
train_X,
train_Y,
train_Yvar,
task_feature=1,
task_covar_prior=LKJCovariancePrior(2, 0.6, sd_prior),
)
return model.to(**tkwargs)
def _gen_fixed_noise_and_given_covar_module_model(**tkwargs):
_, (train_X, train_Y, train_Yvar) = _gen_datasets(yvar=0.05, **tkwargs)
model = FixedNoiseMultiTaskGP(
train_X,
train_Y,
train_Yvar,
task_feature=1,
covar_module=MaternKernel(nu=1.5, lengthscale_prior=GammaPrior(1.0, 1.0)),
)
return model.to(**tkwargs)
def _gen_random_kronecker_mt_data(batch_shape=None, **tkwargs):
batch_shape = batch_shape or torch.Size()
train_X = (
torch.linspace(0, 0.95, 10, **tkwargs).unsqueeze(-1).expand(*batch_shape, 10, 1)
)
train_X = train_X + 0.05 * torch.rand(*batch_shape, 10, 2, **tkwargs)
train_y1 = (
torch.sin(train_X[..., 0] * (2 * math.pi))
+ torch.randn_like(train_X[..., 0]) * 0.2
)
train_y2 = (
torch.cos(train_X[..., 1] * (2 * math.pi))
+ torch.randn_like(train_X[..., 0]) * 0.2
)
train_Y = torch.stack([train_y1, train_y2], dim=-1)
return train_X, train_Y
def _gen_kronecker_model_and_data(model_kwargs=None, batch_shape=None, **tkwargs):
model_kwargs = model_kwargs or {}
train_X, train_Y = _gen_random_kronecker_mt_data(batch_shape=batch_shape, **tkwargs)
model = KroneckerMultiTaskGP(train_X, train_Y, **model_kwargs)
return model.to(**tkwargs), train_X, train_Y
class TestMultiTaskGP(BotorchTestCase):
def test_MultiTaskGP(self):
bounds = torch.tensor([[-1.0, 0.0], [1.0, 1.0]])
for dtype, use_intf, use_octf in itertools.product(
(torch.float, torch.double), (False, True), (False, True)
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=1) if use_octf else None
intf = (
Normalize(d=2, bounds=bounds.to(**tkwargs), transform_on_train=True)
if use_intf
else None
)
model, datasets, (train_X, train_Y) = _gen_model_and_data(
input_transform=intf, outcome_transform=octf, **tkwargs
)
self.assertIsInstance(model, MultiTaskGP)
self.assertEqual(model.num_outputs, 2)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertEqual(model._rank, 2)
self.assertEqual(
model.task_covar_module.covar_factor.shape[-1], model._rank
)
if use_intf:
self.assertIsInstance(model.input_transform, Normalize)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test posterior
test_x = torch.rand(2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultitaskMultivariateNormal)
self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))
# check that training data has input transform applied
# check that the train inputs have been transformed and set on the model
if use_intf:
self.assertTrue(
model.train_inputs[0].equal(model.input_transform(train_X))
)
# test that posterior w/ observation noise raises appropriate error
with self.assertRaises(NotImplementedError):
model.posterior(test_x, observation_noise=True)
with self.assertRaises(NotImplementedError):
model.posterior(test_x, observation_noise=torch.rand(2, **tkwargs))
# test posterior w/ single output index
posterior_f = model.posterior(test_x, output_indices=[0])
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
self.assertEqual(posterior_f.mean.shape, torch.Size([2, 1]))
self.assertEqual(posterior_f.variance.shape, torch.Size([2, 1]))
# test posterior w/ bad output index
with self.assertRaises(ValueError):
model.posterior(test_x, output_indices=[2])
# test posterior (batch eval)
test_x = torch.rand(3, 2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultitaskMultivariateNormal)
# test posterior with X including the task features
posterior_expected = model.posterior(test_x, output_indices=[0])
test_x = torch.cat([torch.zeros_like(test_x), test_x], dim=-1)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
self.assertAllClose(posterior_f.mean, posterior_expected.mean)
self.assertAllClose(
posterior_f.covariance_matrix, posterior_expected.covariance_matrix
)
# test task features in X and output_indices is not None.
with self.assertRaisesRegex(ValueError, "`output_indices` must be None"):
model.posterior(test_x, output_indices=[0, 1])
# test invalid task feature in X.
invalid_x = test_x.clone()
invalid_x[0, 0, 0] = 3
with self.assertRaisesRegex(ValueError, "task features in `X`"):
model.posterior(invalid_x)
# test that unsupported batch shape MTGPs throw correct error
with self.assertRaises(ValueError):
MultiTaskGP(torch.rand(2, 2, 2), torch.rand(2, 2, 1), 0)
# test that bad feature index throws correct error
_, (train_X, train_Y) = _gen_datasets(**tkwargs)
with self.assertRaises(ValueError):
MultiTaskGP(train_X, train_Y, 2)
# test that bad output task throws correct error
with self.assertRaises(RuntimeError):
MultiTaskGP(train_X, train_Y, 0, output_tasks=[2])
# test outcome transform
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
p_utf = model.posterior(test_x)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(p_utf).variance
self.assertAllClose(posterior_f.variance, expected_var)
def test_MultiTaskGP_single_output(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model = _gen_model_single_output(**tkwargs)
self.assertIsInstance(model, MultiTaskGP)
self.assertEqual(model.num_outputs, 1)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertEqual(model._rank, 2)
self.assertEqual(
model.task_covar_module.covar_factor.shape[-1], model._rank
)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test posterior
test_x = torch.rand(2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
# test posterior (batch eval)
test_x = torch.rand(3, 2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
# test posterior transform
post_tf = ScalarizedPosteriorTransform(weights=torch.ones(1, **tkwargs))
posterior_f_tf = model.posterior(test_x, posterior_transform=post_tf)
self.assertTrue(torch.equal(posterior_f.mean, posterior_f_tf.mean))
def test_MultiTaskGP_fixed_prior(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model = _gen_fixed_prior_model(**tkwargs)
self.assertIsInstance(model, MultiTaskGP)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertIsInstance(
model.task_covar_module.IndexKernelPrior, LKJCovariancePrior
)
def test_MultiTaskGP_given_covar_module(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model = _gen_given_covar_module_model(**tkwargs)
self.assertIsInstance(model, MultiTaskGP)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertIsInstance(model.covar_module, RBFKernel)
self.assertIsInstance(model.covar_module.lengthscale_prior, LogNormalPrior)
self.assertAlmostEqual(model.covar_module.lengthscale_prior.loc, 0.0)
self.assertAlmostEqual(model.covar_module.lengthscale_prior.scale, 1.0)
def test_custom_mean_and_likelihood(self):
tkwargs = {"device": self.device, "dtype": torch.double}
_, (train_X, train_Y) = _gen_datasets(**tkwargs)
mean_module = LinearMean(input_size=train_X.shape[-1])
likelihood = GaussianLikelihood(noise_prior=LogNormalPrior(0, 1))
model = MultiTaskGP(
train_X,
train_Y,
task_feature=0,
mean_module=mean_module,
likelihood=likelihood,
)
self.assertIs(model.mean_module, mean_module)
self.assertIs(model.likelihood, likelihood)
class TestFixedNoiseMultiTaskGP(BotorchTestCase):
def test_deprecation_warning(self):
tkwargs = {"device": self.device, "dtype": torch.float}
with self.assertWarnsRegex(DeprecationWarning, "FixedNoise"):
_gen_fixed_noise_model_and_data(use_fixed_noise_model_class=True, **tkwargs)
def test_FixedNoiseMultiTaskGP(self):
bounds = torch.tensor([[-1.0, 0.0], [1.0, 1.0]])
for dtype, use_intf, use_octf in itertools.product(
(torch.float, torch.double), (False, True), (False, True)
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=1) if use_octf else None
intf = (
Normalize(d=2, bounds=bounds.to(**tkwargs), transform_on_train=True)
if use_intf
else None
)
model, _, (train_X, _, _) = _gen_fixed_noise_model_and_data(
input_transform=intf, outcome_transform=octf, **tkwargs
)
self.assertIsInstance(model, MultiTaskGP)
self.assertEqual(model.num_outputs, 2)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertEqual(model._rank, 2)
self.assertEqual(
model.task_covar_module.covar_factor.shape[-1], model._rank
)
if use_octf:
self.assertIsInstance(model.outcome_transform, Standardize)
if use_intf:
self.assertIsInstance(model.input_transform, Normalize)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# check that training data has input transform applied
# check that the train inputs have been transformed and set on the model
if use_intf:
self.assertTrue(
torch.equal(model.train_inputs[0], model.input_transform(train_X))
)
# test posterior
test_x = torch.rand(2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultitaskMultivariateNormal)
self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))
# check posterior transform is applied
if use_octf:
posterior_pred = model.posterior(test_x)
tmp_tf = model.outcome_transform
del model.outcome_transform
pp_tf = model.posterior(test_x)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(pp_tf).variance
self.assertAllClose(posterior_pred.variance, expected_var)
# test that posterior w/ observation noise raises appropriate error
with self.assertRaises(NotImplementedError):
model.posterior(test_x, observation_noise=True)
with self.assertRaises(NotImplementedError):
model.posterior(test_x, observation_noise=torch.rand(2, **tkwargs))
# test posterior w/ single output index
posterior_f = model.posterior(test_x, output_indices=[0])
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
self.assertEqual(posterior_f.mean.shape, torch.Size([2, 1]))
self.assertEqual(posterior_f.variance.shape, torch.Size([2, 1]))
# test posterior w/ bad output index
with self.assertRaises(ValueError):
model.posterior(test_x, output_indices=[2])
# test posterior (batch eval)
test_x = torch.rand(3, 2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultitaskMultivariateNormal)
# test that unsupported batch shape MTGPs throw correct error
with self.assertRaises(ValueError):
FixedNoiseMultiTaskGP(
torch.rand(2, 2, 2), torch.rand(2, 2, 1), torch.rand(2, 2, 1), 0
)
# test that bad feature index throws correct error
_, (train_X, train_Y) = _gen_datasets(**tkwargs)
train_Yvar = torch.full_like(train_Y, 0.05)
with self.assertRaises(ValueError):
FixedNoiseMultiTaskGP(train_X, train_Y, train_Yvar, 2)
# test that bad output task throws correct error
with self.assertRaises(RuntimeError):
FixedNoiseMultiTaskGP(train_X, train_Y, train_Yvar, 0, output_tasks=[2])
def test_FixedNoiseMultiTaskGP_single_output(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model = _gen_fixed_noise_model_single_output(**tkwargs)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertEqual(model.num_outputs, 1)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertEqual(model._rank, 2)
self.assertEqual(
model.task_covar_module.covar_factor.shape[-1], model._rank
)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test posterior
test_x = torch.rand(2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
# test posterior (batch eval)
test_x = torch.rand(3, 2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
def test_FixedNoiseMultiTaskGP_fixed_prior(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model = _gen_fixed_noise_and_prior_model(**tkwargs)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertIsInstance(
model.task_covar_module.IndexKernelPrior, LKJCovariancePrior
)
def test_FixedNoiseMultiTaskGP_given_covar_module(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
model = _gen_fixed_noise_and_given_covar_module_model(**tkwargs)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertIsInstance(model.covar_module, MaternKernel)
self.assertAlmostEqual(model.covar_module.nu, 1.5)
self.assertIsInstance(model.covar_module.lengthscale_prior, GammaPrior)
self.assertAlmostEqual(
model.covar_module.lengthscale_prior.concentration, 1.0
)
self.assertAlmostEqual(model.covar_module.lengthscale_prior.rate, 1.0)
def test_MultiTaskGP_construct_inputs(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
task_feature = 0
model, datasets, (train_X, train_Y) = _gen_model_and_data(
task_feature=task_feature, **tkwargs
)
# Validate prior config.
with self.assertRaisesRegex(
ValueError, ".* only config for LKJ prior is supported"
):
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
prior_config={"use_LKJ_prior": False},
)
# Validate eta.
with self.assertRaisesRegex(ValueError, "eta must be a real number"):
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
prior_config={"use_LKJ_prior": True, "eta": "not_number"},
)
# Test that presence of `prior` and `prior_config` kwargs at the
# same time causes error.
with self.assertRaisesRegex(ValueError, "Only one of"):
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
task_covar_prior=1,
prior_config={"use_LKJ_prior": True, "eta": "not_number"},
)
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
output_tasks=[0],
prior_config={"use_LKJ_prior": True, "eta": 0.6},
)
self.assertEqual(data_dict["output_tasks"], [0])
self.assertEqual(data_dict["task_feature"], task_feature)
self.assertTrue(torch.equal(data_dict["train_X"], train_X))
self.assertTrue(torch.equal(data_dict["train_Y"], train_Y))
self.assertIsInstance(data_dict["task_covar_prior"], LKJCovariancePrior)
def test_FixedNoiseMultiTaskGP_construct_inputs(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
task_feature = 0
(
model,
datasets,
(train_X, train_Y, train_Yvar),
) = _gen_fixed_noise_model_and_data(task_feature=task_feature, **tkwargs)
# Test only one of `task_covar_prior` and `prior_config` can be passed.
with self.assertRaisesRegex(ValueError, "Only one of"):
model.construct_inputs(
datasets,
task_feature=task_feature,
task_covar_prior=1,
prior_config=1,
)
# Validate prior config.
with self.assertRaisesRegex(
ValueError, ".* only config for LKJ prior is supported"
):
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
prior_config={"use_LKJ_prior": False},
)
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
prior_config={"use_LKJ_prior": True, "eta": 0.6},
)
self.assertTrue(torch.equal(data_dict["train_X"], train_X))
self.assertTrue(torch.equal(data_dict["train_Y"], train_Y))
self.assertAllClose(data_dict["train_Yvar"], train_Yvar)
self.assertEqual(data_dict["task_feature"], task_feature)
self.assertIsInstance(data_dict["task_covar_prior"], LKJCovariancePrior)
class TestKroneckerMultiTaskGP(BotorchTestCase):
def test_KroneckerMultiTaskGP_default(self):
bounds = torch.tensor([[-1.0, 0.0], [1.0, 1.0]])
for batch_shape, dtype, use_intf, use_octf in itertools.product(
(torch.Size(),), # torch.Size([3])), TODO: Fix and test batch mode
(torch.float, torch.double),
(False, True),
(False, True),
):
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=2) if use_octf else None
intf = (
Normalize(d=2, bounds=bounds.to(**tkwargs), transform_on_train=True)
if use_intf
else None
)
# initialization with default settings
model, train_X, _ = _gen_kronecker_model_and_data(
model_kwargs={"outcome_transform": octf, "input_transform": intf},
batch_shape=batch_shape,
**tkwargs,
)
self.assertIsInstance(model, KroneckerMultiTaskGP)
self.assertEqual(model.num_outputs, 2)
self.assertIsInstance(model.likelihood, MultitaskGaussianLikelihood)
self.assertEqual(model.likelihood.rank, 0)
self.assertIsInstance(model.mean_module, MultitaskMean)
self.assertIsInstance(model.covar_module, MultitaskKernel)
base_kernel = model.covar_module
self.assertIsInstance(base_kernel.data_covar_module, MaternKernel)
self.assertIsInstance(base_kernel.task_covar_module, IndexKernel)
task_covar_prior = base_kernel.task_covar_module.IndexKernelPrior
self.assertIsInstance(task_covar_prior, LKJCovariancePrior)
self.assertEqual(task_covar_prior.correlation_prior.eta, 1.5)
self.assertIsInstance(task_covar_prior.sd_prior, SmoothedBoxPrior)
lengthscale_prior = base_kernel.data_covar_module.lengthscale_prior
self.assertIsInstance(lengthscale_prior, GammaPrior)
self.assertEqual(lengthscale_prior.concentration, 3.0)
self.assertEqual(lengthscale_prior.rate, 6.0)
self.assertEqual(base_kernel.task_covar_module.covar_factor.shape[-1], 2)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test posterior
test_x = torch.rand(2, 2, **tkwargs)
posterior_f = model.posterior(test_x)
if not use_octf:
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(
posterior_f.distribution, MultitaskMultivariateNormal
)
else:
self.assertIsInstance(posterior_f, TransformedPosterior)
self.assertIsInstance(
posterior_f._posterior.distribution, MultitaskMultivariateNormal
)
self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))
if use_octf:
# ensure un-transformation is applied
tmp_tf = model.outcome_transform
del model.outcome_transform
p_tf = model.posterior(test_x)
model.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(p_tf).variance
self.assertAllClose(posterior_f.variance, expected_var)
else:
# test observation noise
# TODO: outcome transform + likelihood noise?
posterior_noisy = model.posterior(test_x, observation_noise=True)
self.assertTrue(
torch.allclose(
posterior_noisy.variance,
model.likelihood(posterior_f.distribution).variance,
)
)
# test posterior (batch eval)
test_x = torch.rand(3, 2, 2, **tkwargs)
posterior_f = model.posterior(test_x)
if not use_octf:
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(
posterior_f.distribution, MultitaskMultivariateNormal
)
else:
self.assertIsInstance(posterior_f, TransformedPosterior)
self.assertIsInstance(
posterior_f._posterior.distribution, MultitaskMultivariateNormal
)
self.assertEqual(posterior_f.mean.shape, torch.Size([3, 2, 2]))
self.assertEqual(posterior_f.variance.shape, torch.Size([3, 2, 2]))
# test that using a posterior transform throws error
post_tf = ScalarizedPosteriorTransform(weights=torch.ones(2, **tkwargs))
with self.assertRaises(NotImplementedError):
model.posterior(test_x, posterior_transform=post_tf)
def test_KroneckerMultiTaskGP_custom(self):
for batch_shape, dtype in itertools.product(
(torch.Size(),), # torch.Size([3])), TODO: Fix and test batch mode
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
# initialization with custom settings
likelihood = MultitaskGaussianLikelihood(
num_tasks=2,
rank=1,
batch_shape=batch_shape,
)
data_covar_module = MaternKernel(
nu=1.5,
lengthscale_prior=GammaPrior(2.0, 4.0),
)
task_covar_prior = LKJCovariancePrior(
n=2,
eta=torch.tensor(0.5, **tkwargs),
sd_prior=SmoothedBoxPrior(math.exp(-3), math.exp(2), 0.1),
)
model_kwargs = {
"likelihood": likelihood,
"data_covar_module": data_covar_module,
"task_covar_prior": task_covar_prior,
"rank": 1,
}
model, train_X, _ = _gen_kronecker_model_and_data(
model_kwargs=model_kwargs, batch_shape=batch_shape, **tkwargs
)
self.assertIsInstance(model, KroneckerMultiTaskGP)
self.assertEqual(model.num_outputs, 2)
self.assertIsInstance(model.likelihood, MultitaskGaussianLikelihood)
self.assertEqual(model.likelihood.rank, 1)
self.assertIsInstance(model.mean_module, MultitaskMean)
self.assertIsInstance(model.covar_module, MultitaskKernel)
base_kernel = model.covar_module
self.assertIsInstance(base_kernel.data_covar_module, MaternKernel)
self.assertIsInstance(base_kernel.task_covar_module, IndexKernel)
task_covar_prior = base_kernel.task_covar_module.IndexKernelPrior
self.assertIsInstance(task_covar_prior, LKJCovariancePrior)
self.assertEqual(task_covar_prior.correlation_prior.eta, 0.5)
lengthscale_prior = base_kernel.data_covar_module.lengthscale_prior
self.assertIsInstance(lengthscale_prior, GammaPrior)
self.assertEqual(lengthscale_prior.concentration, 2.0)
self.assertEqual(lengthscale_prior.rate, 4.0)
self.assertEqual(base_kernel.task_covar_module.covar_factor.shape[-1], 1)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test posterior
max_cholesky_sizes = [1, 800]
for max_cholesky in max_cholesky_sizes:
model.train()
test_x = torch.rand(2, 2, **tkwargs)
# small root decomp to enforce zero padding
with max_cholesky_size(max_cholesky), max_root_decomposition_size(3):
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(
posterior_f.distribution, MultitaskMultivariateNormal
)
self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))
# test observation noise
posterior_noisy = model.posterior(test_x, observation_noise=True)
self.assertTrue(
torch.allclose(
posterior_noisy.variance,
model.likelihood(posterior_f.distribution).variance,
)
)
# test posterior (batch eval)
test_x = torch.rand(3, 2, 2, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultitaskMultivariateNormal)
self.assertEqual(posterior_f.mean.shape, torch.Size([3, 2, 2]))
self.assertEqual(posterior_f.variance.shape, torch.Size([3, 2, 2]))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from unittest import mock
import pyro
import torch
from botorch import fit_fully_bayesian_model_nuts
from botorch.acquisition.analytic import (
ExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
UpperConfidenceBound,
)
from botorch.acquisition.logei import (
qLogExpectedImprovement,
qLogNoisyExpectedImprovement,
)
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.multi_objective import (
prune_inferior_points_multi_objective,
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.utils import prune_inferior_points
from botorch.models import ModelList, ModelListGP
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.fully_bayesian import (
MCMC_DIM,
MIN_INFERRED_NOISE_LEVEL,
PyroModel,
SaasFullyBayesianSingleTaskGP,
SaasPyroModel,
)
from botorch.models.transforms import Normalize, Standardize
from botorch.posteriors.fully_bayesian import batched_bisect, FullyBayesianPosterior
from botorch.sampling.get_sampler import get_sampler
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood, GaussianLikelihood
from gpytorch.means import ConstantMean
from linear_operator.operators import to_linear_operator
from pyro.ops.integrator import (
_EXCEPTION_HANDLERS,
potential_grad,
register_exception_handler,
)
EXPECTED_KEYS = [
"mean_module.raw_constant",
"covar_module.raw_outputscale",
"covar_module.base_kernel.raw_lengthscale",
"covar_module.base_kernel.raw_lengthscale_constraint.lower_bound",
"covar_module.base_kernel.raw_lengthscale_constraint.upper_bound",
"covar_module.raw_outputscale_constraint.lower_bound",
"covar_module.raw_outputscale_constraint.upper_bound",
]
EXPECTED_KEYS_NOISE = EXPECTED_KEYS + [
"likelihood.noise_covar.raw_noise",
"likelihood.noise_covar.raw_noise_constraint.lower_bound",
"likelihood.noise_covar.raw_noise_constraint.upper_bound",
]
class CustomPyroModel(PyroModel):
def sample(self) -> None:
pass
def postprocess_mcmc_samples(self, mcmc_samples, **kwargs):
pass
def load_mcmc_samples(self, mcmc_samples):
pass
class TestFullyBayesianSingleTaskGP(BotorchTestCase):
def _get_data_and_model(self, infer_noise: bool, **tkwargs):
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.rand(10, 4, **tkwargs)
train_Y = torch.sin(train_X[:, :1])
train_Yvar = (
None
if infer_noise
else torch.arange(0.1, 1.1, 0.1, **tkwargs).unsqueeze(-1)
)
model = SaasFullyBayesianSingleTaskGP(
train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar
)
return train_X, train_Y, train_Yvar, model
def _get_unnormalized_data(self, infer_noise: bool, **tkwargs):
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = 5 + 5 * torch.rand(10, 4, **tkwargs)
train_Y = 10 + torch.sin(train_X[:, :1])
test_X = 5 + 5 * torch.rand(5, 4, **tkwargs)
train_Yvar = (
None if infer_noise else 0.1 * torch.arange(10, **tkwargs).unsqueeze(-1)
)
return train_X, train_Y, train_Yvar, test_X
def _get_mcmc_samples(
self, num_samples: int, dim: int, infer_noise: bool, **tkwargs
):
mcmc_samples = {
"lengthscale": torch.rand(num_samples, 1, dim, **tkwargs),
"outputscale": torch.rand(num_samples, **tkwargs),
"mean": torch.randn(num_samples, **tkwargs),
}
if infer_noise:
mcmc_samples["noise"] = torch.rand(num_samples, 1, **tkwargs)
return mcmc_samples
def test_raises(self):
tkwargs = {"device": self.device, "dtype": torch.double}
with self.assertRaisesRegex(
ValueError,
"Expected train_X to have shape n x d and train_Y to have shape n x 1",
):
SaasFullyBayesianSingleTaskGP(
train_X=torch.rand(10, 4, **tkwargs), train_Y=torch.randn(10, **tkwargs)
)
with self.assertRaisesRegex(
ValueError,
"Expected train_X to have shape n x d and train_Y to have shape n x 1",
):
SaasFullyBayesianSingleTaskGP(
train_X=torch.rand(10, 4, **tkwargs),
train_Y=torch.randn(12, 1, **tkwargs),
)
with self.assertRaisesRegex(
ValueError,
"Expected train_X to have shape n x d and train_Y to have shape n x 1",
):
SaasFullyBayesianSingleTaskGP(
train_X=torch.rand(10, **tkwargs),
train_Y=torch.randn(10, 1, **tkwargs),
)
with self.assertRaisesRegex(
ValueError,
"Expected train_Yvar to be None or have the same shape as train_Y",
):
SaasFullyBayesianSingleTaskGP(
train_X=torch.rand(10, 4, **tkwargs),
train_Y=torch.randn(10, 1, **tkwargs),
train_Yvar=torch.rand(10, **tkwargs),
)
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
infer_noise=True, **tkwargs
)
# Make sure an exception is raised if the model has not been fitted
not_fitted_error_msg = (
"Model has not been fitted. You need to call "
"`fit_fully_bayesian_model_nuts` to fit the model."
)
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.num_mcmc_samples
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.median_lengthscale
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.forward(torch.rand(1, 4, **tkwargs))
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.posterior(torch.rand(1, 4, **tkwargs))
def test_fit_model(self):
for infer_noise, dtype in itertools.product(
[True, False], [torch.float, torch.double]
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
infer_noise=infer_noise, **tkwargs
)
n, d = train_X.shape
# Test init
self.assertIsNone(model.mean_module)
self.assertIsNone(model.covar_module)
self.assertIsNone(model.likelihood)
self.assertIsInstance(model.pyro_model, SaasPyroModel)
self.assertAllClose(train_X, model.pyro_model.train_X)
self.assertAllClose(train_Y, model.pyro_model.train_Y)
if infer_noise:
self.assertIsNone(model.pyro_model.train_Yvar)
else:
self.assertAllClose(
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL),
model.pyro_model.train_Yvar,
)
# Fit a model and check that the hyperparameters have the correct shape
fit_fully_bayesian_model_nuts(
model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
self.assertEqual(model.batch_shape, torch.Size([3]))
self.assertEqual(model._aug_batch_shape, torch.Size([3]))
# Using mock here since multi-output is currently not supported.
with mock.patch.object(model, "_num_outputs", 2):
self.assertEqual(model._aug_batch_shape, torch.Size([3, 2]))
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertEqual(model.mean_module.raw_constant.shape, model.batch_shape)
self.assertIsInstance(model.covar_module, ScaleKernel)
self.assertEqual(model.covar_module.outputscale.shape, model.batch_shape)
self.assertIsInstance(model.covar_module.base_kernel, MaternKernel)
self.assertEqual(
model.covar_module.base_kernel.lengthscale.shape, torch.Size([3, 1, d])
)
self.assertIsInstance(
model.likelihood,
GaussianLikelihood if infer_noise else FixedNoiseGaussianLikelihood,
)
if infer_noise:
self.assertEqual(model.likelihood.noise.shape, torch.Size([3, 1]))
else:
self.assertEqual(model.likelihood.noise.shape, torch.Size([3, n]))
self.assertAllClose(
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL).squeeze(-1).repeat(3, 1),
model.likelihood.noise,
)
# Predict on some test points
for batch_shape in [[5], [6, 5, 2]]:
test_X = torch.rand(*batch_shape, d, **tkwargs)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, FullyBayesianPosterior)
# Mean/variance
expected_shape = (
*batch_shape[: MCMC_DIM + 2],
*model.batch_shape,
*batch_shape[MCMC_DIM + 2 :],
1,
)
expected_shape = torch.Size(expected_shape)
mean, var = posterior.mean, posterior.variance
self.assertEqual(mean.shape, expected_shape)
self.assertEqual(var.shape, expected_shape)
# Mixture mean/variance/median/quantiles
mixture_mean = posterior.mixture_mean
mixture_variance = posterior.mixture_variance
quantile1 = posterior.quantile(value=torch.tensor(0.01))
quantile2 = posterior.quantile(value=torch.tensor(0.99))
self.assertEqual(mixture_mean.shape, torch.Size(batch_shape + [1]))
self.assertEqual(mixture_variance.shape, torch.Size(batch_shape + [1]))
self.assertTrue(mixture_variance.min() > 0.0)
self.assertEqual(quantile1.shape, torch.Size(batch_shape + [1]))
self.assertEqual(quantile2.shape, torch.Size(batch_shape + [1]))
self.assertTrue((quantile2 > quantile1).all())
quantile12 = posterior.quantile(value=torch.tensor([0.01, 0.99]))
self.assertAllClose(
quantile12, torch.stack([quantile1, quantile2], dim=0)
)
dist = torch.distributions.Normal(
loc=posterior.mean, scale=posterior.variance.sqrt()
)
self.assertAllClose(
dist.cdf(quantile1.unsqueeze(MCMC_DIM)).mean(dim=MCMC_DIM),
torch.full(batch_shape + [1], 0.01, **tkwargs),
atol=1e-6,
)
self.assertAllClose(
dist.cdf(quantile2.unsqueeze(MCMC_DIM)).mean(dim=MCMC_DIM),
torch.full(batch_shape + [1], 0.99, **tkwargs),
atol=1e-6,
)
# Invalid quantile should raise
for q in [-1.0, 0.0, 1.0, 1.3333]:
with self.assertRaisesRegex(
ValueError, "value is expected to be in the range"
):
posterior.quantile(value=torch.tensor(q))
# Test model lists with fully Bayesian models and mixed modeling
deterministic = GenericDeterministicModel(f=lambda x: x[..., :1])
for ModelListClass, model2 in zip(
[ModelList, ModelListGP], [deterministic, model]
):
expected_shape = (
*batch_shape[: MCMC_DIM + 2],
*model.batch_shape,
*batch_shape[MCMC_DIM + 2 :],
2,
)
expected_shape = torch.Size(expected_shape)
model_list = ModelListClass(model, model2)
posterior = model_list.posterior(test_X)
mean, var = posterior.mean, posterior.variance
self.assertEqual(mean.shape, expected_shape)
self.assertEqual(var.shape, expected_shape)
# This check is only for ModelListGP.
self.assertEqual(model_list.batch_shape, model.batch_shape)
# Mixing fully Bayesian models with different batch shapes isn't supported
_, _, _, model2 = self._get_data_and_model(
infer_noise=infer_noise, **tkwargs
)
fit_fully_bayesian_model_nuts(
model2, warmup_steps=1, num_samples=1, thinning=1, disable_progbar=True
)
with self.assertRaisesRegex(
NotImplementedError, "All MCMC batch dimensions"
):
ModelList(model, model2).posterior(test_X)._extended_shape()
with self.assertRaisesRegex(
NotImplementedError,
"All MCMC batch dimensions must have the same size, got",
):
ModelList(model, model2).posterior(test_X).mean
# Check properties
median_lengthscale = model.median_lengthscale
self.assertEqual(median_lengthscale.shape, torch.Size([4]))
self.assertEqual(model.num_mcmc_samples, 3)
# Check the keys in the state dict
true_keys = EXPECTED_KEYS_NOISE if infer_noise else EXPECTED_KEYS
self.assertEqual(set(model.state_dict().keys()), set(true_keys))
for i in range(2): # Test loading via state dict
m = model if i == 0 else ModelList(model, deterministic)
state_dict = m.state_dict()
_, _, _, m_new = self._get_data_and_model(
infer_noise=infer_noise, **tkwargs
)
m_new = m_new if i == 0 else ModelList(m_new, deterministic)
if i == 0:
self.assertEqual(m_new.state_dict(), {})
m_new.load_state_dict(state_dict)
self.assertEqual(m.state_dict().keys(), m_new.state_dict().keys())
for k in m.state_dict().keys():
self.assertTrue((m.state_dict()[k] == m_new.state_dict()[k]).all())
preds1, preds2 = m.posterior(test_X), m_new.posterior(test_X)
self.assertTrue(torch.equal(preds1.mean, preds2.mean))
self.assertTrue(torch.equal(preds1.variance, preds2.variance))
# Make sure the model shapes are set correctly
self.assertEqual(model.pyro_model.train_X.shape, torch.Size([n, d]))
self.assertAllClose(model.pyro_model.train_X, train_X)
model.train() # Put the model in train mode
self.assertAllClose(train_X, model.pyro_model.train_X)
self.assertIsNone(model.mean_module)
self.assertIsNone(model.covar_module)
self.assertIsNone(model.likelihood)
def test_transforms(self):
for infer_noise in [True, False]:
tkwargs = {"device": self.device, "dtype": torch.double}
train_X, train_Y, train_Yvar, test_X = self._get_unnormalized_data(
infer_noise=infer_noise, **tkwargs
)
n, d = train_X.shape
lb, ub = train_X.min(dim=0).values, train_X.max(dim=0).values
mu, sigma = train_Y.mean(), train_Y.std()
# Fit without transforms
with torch.random.fork_rng():
torch.manual_seed(0)
gp1 = SaasFullyBayesianSingleTaskGP(
train_X=(train_X - lb) / (ub - lb),
train_Y=(train_Y - mu) / sigma,
train_Yvar=train_Yvar / sigma**2
if train_Yvar is not None
else train_Yvar,
)
fit_fully_bayesian_model_nuts(
gp1, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
posterior1 = gp1.posterior((test_X - lb) / (ub - lb))
pred_mean1 = mu + sigma * posterior1.mean
pred_var1 = (sigma**2) * posterior1.variance
# Fit with transforms
with torch.random.fork_rng():
torch.manual_seed(0)
gp2 = SaasFullyBayesianSingleTaskGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
input_transform=Normalize(d=train_X.shape[-1]),
outcome_transform=Standardize(m=1),
)
fit_fully_bayesian_model_nuts(
gp2, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
posterior2 = gp2.posterior(test_X)
pred_mean2, pred_var2 = posterior2.mean, posterior2.variance
self.assertAllClose(pred_mean1, pred_mean2)
self.assertAllClose(pred_var1, pred_var2)
def test_acquisition_functions(self):
tkwargs = {"device": self.device, "dtype": torch.double}
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
infer_noise=True, **tkwargs
)
fit_fully_bayesian_model_nuts(
model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
deterministic = GenericDeterministicModel(f=lambda x: x[..., :1])
# due to ModelList type, setting cache_root=False for all noisy EI variants
list_gp = ModelListGP(model, model)
mixed_list = ModelList(deterministic, model)
simple_sampler = get_sampler(
posterior=model.posterior(train_X), sample_shape=torch.Size([2])
)
list_gp_sampler = get_sampler(
posterior=list_gp.posterior(train_X), sample_shape=torch.Size([2])
)
mixed_list_sampler = get_sampler(
posterior=mixed_list.posterior(train_X), sample_shape=torch.Size([2])
)
acquisition_functions = [
ExpectedImprovement(model=model, best_f=train_Y.max()),
ProbabilityOfImprovement(model=model, best_f=train_Y.max()),
PosteriorMean(model=model),
UpperConfidenceBound(model=model, beta=4),
qLogExpectedImprovement(
model=model, best_f=train_Y.max(), sampler=simple_sampler
),
qExpectedImprovement(
model=model, best_f=train_Y.max(), sampler=simple_sampler
),
qLogNoisyExpectedImprovement(
model=model,
X_baseline=train_X,
sampler=simple_sampler,
cache_root=False,
),
qNoisyExpectedImprovement(
model=model,
X_baseline=train_X,
sampler=simple_sampler,
cache_root=False,
),
qProbabilityOfImprovement(
model=model, best_f=train_Y.max(), sampler=simple_sampler
),
qSimpleRegret(model=model, sampler=simple_sampler),
qUpperConfidenceBound(model=model, beta=4, sampler=simple_sampler),
qNoisyExpectedHypervolumeImprovement(
model=list_gp,
X_baseline=train_X,
ref_point=torch.zeros(2, **tkwargs),
sampler=list_gp_sampler,
cache_root=False,
),
qExpectedHypervolumeImprovement(
model=list_gp,
ref_point=torch.zeros(2, **tkwargs),
sampler=list_gp_sampler,
partitioning=NondominatedPartitioning(
ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
),
),
# qEHVI/qNEHVI with mixed models
qNoisyExpectedHypervolumeImprovement(
model=mixed_list,
X_baseline=train_X,
ref_point=torch.zeros(2, **tkwargs),
sampler=mixed_list_sampler,
cache_root=False,
),
qExpectedHypervolumeImprovement(
model=mixed_list,
ref_point=torch.zeros(2, **tkwargs),
sampler=mixed_list_sampler,
partitioning=NondominatedPartitioning(
ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
),
),
]
for acqf in acquisition_functions:
for batch_shape in [[5], [6, 5, 2]]:
test_X = torch.rand(*batch_shape, 1, 4, **tkwargs)
self.assertEqual(acqf(test_X).shape, torch.Size(batch_shape))
# Test prune_inferior_points
X_pruned = prune_inferior_points(model=model, X=train_X)
self.assertTrue(X_pruned.ndim == 2 and X_pruned.shape[-1] == 4)
# Test prune_inferior_points_multi_objective
for model_list in [ModelListGP(model, model), ModelList(deterministic, model)]:
X_pruned = prune_inferior_points_multi_objective(
model=model_list,
X=train_X,
ref_point=torch.zeros(2, **tkwargs),
)
self.assertTrue(X_pruned.ndim == 2 and X_pruned.shape[-1] == 4)
def test_load_samples(self):
for infer_noise, dtype in itertools.product(
[True, False], [torch.float, torch.double]
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
infer_noise=infer_noise, **tkwargs
)
n, d = train_X.shape
mcmc_samples = self._get_mcmc_samples(
num_samples=3, dim=train_X.shape[-1], infer_noise=infer_noise, **tkwargs
)
model.load_mcmc_samples(mcmc_samples)
self.assertAllClose(
model.covar_module.base_kernel.lengthscale,
mcmc_samples["lengthscale"],
)
self.assertAllClose(
model.covar_module.outputscale,
mcmc_samples["outputscale"],
)
self.assertAllClose(
model.mean_module.raw_constant.data,
mcmc_samples["mean"],
)
if infer_noise:
self.assertAllClose(
model.likelihood.noise_covar.noise, mcmc_samples["noise"]
)
else:
self.assertAllClose(
model.likelihood.noise_covar.noise,
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL).squeeze(-1).repeat(3, 1),
)
def test_construct_inputs(self):
for infer_noise, dtype in itertools.product(
(True, False), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
X, Y, Yvar, model = self._get_data_and_model(
infer_noise=infer_noise, **tkwargs
)
training_data = SupervisedDataset(X, Y, Yvar)
data_dict = model.construct_inputs(training_data)
self.assertTrue(X.equal(data_dict["train_X"]))
self.assertTrue(Y.equal(data_dict["train_Y"]))
if infer_noise:
self.assertTrue("train_Yvar" not in data_dict)
else:
self.assertTrue(Yvar.equal(data_dict["train_Yvar"]))
def test_custom_pyro_model(self):
for infer_noise, dtype in itertools.product(
(True, False), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y, train_Yvar, _ = self._get_unnormalized_data(
infer_noise=infer_noise, **tkwargs
)
model = SaasFullyBayesianSingleTaskGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
pyro_model=CustomPyroModel(),
)
with self.assertRaisesRegex(
NotImplementedError, "load_state_dict only works for SaasPyroModel"
):
model.load_state_dict({})
self.assertIsInstance(model.pyro_model, CustomPyroModel)
self.assertAllClose(model.pyro_model.train_X, train_X)
self.assertAllClose(model.pyro_model.train_Y, train_Y)
if infer_noise:
self.assertIsNone(model.pyro_model.train_Yvar)
else:
self.assertAllClose(
model.pyro_model.train_Yvar,
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL),
)
# Use transforms
model = SaasFullyBayesianSingleTaskGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
input_transform=Normalize(d=train_X.shape[-1]),
outcome_transform=Standardize(m=1),
pyro_model=CustomPyroModel(),
)
self.assertIsInstance(model.pyro_model, CustomPyroModel)
lb, ub = train_X.min(dim=0).values, train_X.max(dim=0).values
self.assertAllClose(model.pyro_model.train_X, (train_X - lb) / (ub - lb))
mu, sigma = train_Y.mean(dim=0), train_Y.std(dim=0)
self.assertAllClose(model.pyro_model.train_Y, (train_Y - mu) / sigma)
if not infer_noise:
self.assertAllClose(
model.pyro_model.train_Yvar,
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL) / (sigma**2),
atol=5e-4,
)
def test_bisect(self):
def f(x):
return 1 + x
for dtype, batch_shape in itertools.product(
(torch.float, torch.double), ([5], [6, 5, 2])
):
tkwargs = {"device": self.device, "dtype": dtype}
bounds = torch.stack(
(
torch.zeros(batch_shape, **tkwargs),
torch.ones(batch_shape, **tkwargs),
)
)
for target, tol in itertools.product([1.01, 1.5, 1.99], [1e-3, 1e-6]):
x = batched_bisect(f=f, target=target, bounds=bounds, tol=tol)
self.assertAllClose(
f(x), torch.full(batch_shape, target, **tkwargs), atol=tol
)
# Do one step and make sure we didn't converge in this case
x = batched_bisect(f=f, target=1.71, bounds=bounds, max_steps=1)
self.assertAllClose(x, torch.full(batch_shape, 0.75, **tkwargs), atol=tol)
# Target outside the bounds should raise
with self.assertRaisesRegex(
ValueError,
"The target is not contained in the interval specified by the bounds",
):
batched_bisect(f=f, target=2.1, bounds=bounds)
# Test analytic solution when there is only one MCMC sample
mean = torch.randn(1, 5, **tkwargs)
variance = torch.rand(1, 5, **tkwargs)
covar = torch.diag_embed(variance)
mvn = MultivariateNormal(mean, to_linear_operator(covar))
posterior = FullyBayesianPosterior(distribution=mvn)
dist = torch.distributions.Normal(
loc=mean.unsqueeze(-1), scale=variance.unsqueeze(-1).sqrt()
)
for q in [0.1, 0.5, 0.9]:
x = posterior.quantile(value=torch.tensor(q))
self.assertAllClose(
dist.cdf(x), q * torch.ones(1, 5, 1, **tkwargs), atol=1e-4
)
class TestPyroCatchNumericalErrors(BotorchTestCase):
def tearDown(self):
super().tearDown()
# Remove exception handler so they don't affect the tests on rerun
# TODO: Add functionality to pyro to clear the handlers so this
# does not require touching the internals.
del _EXCEPTION_HANDLERS["foo_runtime"]
def test_pyro_catch_error(self):
def potential_fn(z):
mvn = pyro.distributions.MultivariateNormal(
loc=torch.zeros(2),
covariance_matrix=z["K"],
)
return mvn.log_prob(torch.zeros(2))
# Test base case where everything is fine
z = {"K": torch.eye(2)}
grads, val = potential_grad(potential_fn, z)
self.assertAllClose(grads["K"], -0.5 * torch.eye(2))
norm_mvn = torch.distributions.Normal(0, 1)
self.assertAllClose(val, 2 * norm_mvn.log_prob(torch.tensor(0.0)))
# Default behavior should catch the ValueError when trying to instantiate
# the MVN and return NaN instead
z = {"K": torch.ones(2, 2)}
_, val = potential_grad(potential_fn, z)
self.assertTrue(torch.isnan(val))
# Default behavior should catch the LinAlgError when peforming a
# Cholesky decomposition and return NaN instead
def potential_fn_chol(z):
return torch.linalg.cholesky(z["K"])
_, val = potential_grad(potential_fn_chol, z)
self.assertTrue(torch.isnan(val))
# Default behavior should not catch other errors
def potential_fn_rterr_foo(z):
raise RuntimeError("foo")
with self.assertRaisesRegex(RuntimeError, "foo"):
potential_grad(potential_fn_rterr_foo, z)
# But once we register this specific error then it should
def catch_runtime_error(e):
return type(e) is RuntimeError and "foo" in str(e)
register_exception_handler("foo_runtime", catch_runtime_error)
_, val = potential_grad(potential_fn_rterr_foo, z)
self.assertTrue(torch.isnan(val))
# Unless the error message is different
def potential_fn_rterr_bar(z):
raise RuntimeError("bar")
with self.assertRaisesRegex(RuntimeError, "bar"):
potential_grad(potential_fn_rterr_bar, z)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.fit import fit_gpytorch_mll
from botorch.models.contextual import LCEAGP, SACGP
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.kernels.contextual_lcea import LCEAKernel
from botorch.models.kernels.contextual_sac import SACKernel
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.means import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
class TestContextualGP(BotorchTestCase):
def test_SACGP(self):
for dtype in (torch.float, torch.double):
train_X = torch.tensor(
[[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]],
device=self.device,
dtype=dtype,
)
train_Y = torch.tensor(
[[1.0], [2.0], [3.0]], device=self.device, dtype=dtype
)
train_Yvar = 0.01 * torch.ones(3, 1, device=self.device, dtype=dtype)
self.decomposition = {"1": [0, 3], "2": [1, 2]}
model = SACGP(train_X, train_Y, train_Yvar, self.decomposition)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 1}})
self.assertIsInstance(model, FixedNoiseGP)
self.assertDictEqual(model.decomposition, self.decomposition)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, SACKernel)
# test number of named parameters
num_of_mean = 0
num_of_lengthscales = 0
num_of_outputscales = 0
for param_name, param in model.named_parameters():
if param_name == "mean_module.raw_constant":
num_of_mean += param.data.shape.numel()
elif "raw_lengthscale" in param_name:
num_of_lengthscales += param.data.shape.numel()
elif "raw_outputscale" in param_name:
num_of_outputscales += param.data.shape.numel()
self.assertEqual(num_of_mean, 1)
self.assertEqual(num_of_lengthscales, 2)
self.assertEqual(num_of_outputscales, 2)
test_x = torch.rand(5, 4, device=self.device, dtype=dtype)
posterior = model(test_x)
self.assertIsInstance(posterior, MultivariateNormal)
def testLCEAGP(self):
for dtype in (torch.float, torch.double):
train_X = torch.tensor(
[[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]],
device=self.device,
dtype=dtype,
)
train_Y = torch.tensor(
[[1.0], [2.0], [3.0]], device=self.device, dtype=dtype
)
train_Yvar = 0.01 * torch.ones(3, 1, device=self.device, dtype=dtype)
# Test setting attributes
decomposition = {"1": [0, 1], "2": [2, 3]}
# test instantiate model
model = LCEAGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
decomposition=decomposition,
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 1}})
self.assertIsInstance(model, LCEAGP)
self.assertIsInstance(model.covar_module, LCEAKernel)
self.assertDictEqual(model.decomposition, decomposition)
test_x = torch.rand(5, 4, device=self.device, dtype=dtype)
posterior = model(test_x)
self.assertIsInstance(posterior, MultivariateNormal)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from unittest import mock
import torch
from botorch.acquisition.objective import PosteriorTransform
from botorch.models import HigherOrderGP
from botorch.models.higher_order_gp import FlattenedStandardize
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.optim.fit import fit_gpytorch_mll_torch
from botorch.posteriors import GPyTorchPosterior, TransformedPosterior
from botorch.sampling import IIDNormalSampler
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import RBFKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.settings import max_cholesky_size, skip_posterior_variances
class DummyPosteriorTransform(PosteriorTransform):
def evaluate(self, Y):
return Y
def forward(self, posterior):
return posterior
class TestHigherOrderGP(BotorchTestCase):
def setUp(self):
super().setUp()
torch.random.manual_seed(0)
train_x = torch.rand(2, 10, 1, device=self.device)
train_y = torch.randn(2, 10, 3, 5, device=self.device)
self.model = HigherOrderGP(train_x, train_y)
# check that we can assign different kernels and likelihoods
model_2 = HigherOrderGP(
train_X=train_x,
train_Y=train_y,
covar_modules=[RBFKernel(), RBFKernel(), RBFKernel()],
likelihood=GaussianLikelihood(),
)
model_3 = HigherOrderGP(
train_X=train_x,
train_Y=train_y,
covar_modules=[RBFKernel(), RBFKernel(), RBFKernel()],
likelihood=GaussianLikelihood(),
latent_init="gp",
)
for m in [self.model, model_2, model_3]:
mll = ExactMarginalLogLikelihood(m.likelihood, m)
fit_gpytorch_mll_torch(mll, step_limit=1)
def test_num_output_dims(self):
for dtype in [torch.float, torch.double]:
train_x = torch.rand(2, 10, 1, device=self.device, dtype=dtype)
train_y = torch.randn(2, 10, 3, 5, device=self.device, dtype=dtype)
model = HigherOrderGP(train_x, train_y)
# check that it correctly inferred that this is a batched model
self.assertEqual(model._num_outputs, 2)
train_x = torch.rand(10, 1, device=self.device, dtype=dtype)
train_y = torch.randn(10, 3, 5, 2, device=self.device, dtype=dtype)
model = HigherOrderGP(train_x, train_y)
# non-batched case
self.assertEqual(model._num_outputs, 1)
train_x = torch.rand(3, 2, 10, 1, device=self.device, dtype=dtype)
train_y = torch.randn(3, 2, 10, 3, 5, device=self.device, dtype=dtype)
# check the error when using multi-dim batch_shape
with self.assertRaises(NotImplementedError):
model = HigherOrderGP(train_x, train_y)
def test_posterior(self):
for dtype in [torch.float, torch.double]:
for mcs in [800, 10]:
torch.random.manual_seed(0)
with max_cholesky_size(mcs):
test_x = torch.rand(2, 12, 1).to(device=self.device, dtype=dtype)
self.model.to(dtype)
# clear caches
self.model.train()
self.model.eval()
# test the posterior works
posterior = self.model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
# test that a posterior transform raises an error
with self.assertRaises(NotImplementedError):
self.model.posterior(
test_x, posterior_transform=DummyPosteriorTransform()
)
# test the posterior works with observation noise
posterior = self.model.posterior(test_x, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
# test the posterior works with no variances
# some funkiness in MVNs registration so the variance is non-zero.
with skip_posterior_variances():
posterior = self.model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertLessEqual(posterior.variance.max(), 1e-6)
def test_transforms(self):
for dtype in [torch.float, torch.double]:
train_x = torch.rand(10, 3, device=self.device, dtype=dtype)
train_y = torch.randn(10, 4, 5, device=self.device, dtype=dtype)
# test handling of Standardize
with self.assertWarns(RuntimeWarning):
model = HigherOrderGP(
train_X=train_x, train_Y=train_y, outcome_transform=Standardize(m=5)
)
self.assertIsInstance(model.outcome_transform, FlattenedStandardize)
self.assertEqual(model.outcome_transform.output_shape, train_y.shape[1:])
self.assertEqual(model.outcome_transform.batch_shape, torch.Size())
model = HigherOrderGP(
train_X=train_x,
train_Y=train_y,
input_transform=Normalize(d=3),
outcome_transform=FlattenedStandardize(train_y.shape[1:]),
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll_torch(mll, step_limit=1)
test_x = torch.rand(2, 5, 3, device=self.device, dtype=dtype)
test_y = torch.randn(2, 5, 4, 5, device=self.device, dtype=dtype)
with mock.patch.object(
HigherOrderGP, "transform_inputs", wraps=model.transform_inputs
) as mock_intf:
posterior = model.posterior(test_x)
mock_intf.assert_called_once()
self.assertIsInstance(posterior, TransformedPosterior)
conditioned_model = model.condition_on_observations(test_x, test_y)
self.assertIsInstance(conditioned_model, HigherOrderGP)
self.check_transform_forward(model, dtype)
self.check_transform_untransform(model, dtype)
def check_transform_forward(self, model, dtype):
train_y = torch.randn(2, 10, 4, 5, device=self.device, dtype=dtype)
train_y_var = torch.rand(2, 10, 4, 5, device=self.device, dtype=dtype)
output, output_var = model.outcome_transform.forward(train_y)
self.assertEqual(output.shape, torch.Size((2, 10, 4, 5)))
self.assertEqual(output_var, None)
output, output_var = model.outcome_transform.forward(train_y, train_y_var)
self.assertEqual(output.shape, torch.Size((2, 10, 4, 5)))
self.assertEqual(output_var.shape, torch.Size((2, 10, 4, 5)))
def check_transform_untransform(self, model, dtype):
output, output_var = model.outcome_transform.untransform(
torch.randn(2, 2, 4, 5, device=self.device, dtype=dtype)
)
self.assertEqual(output.shape, torch.Size((2, 2, 4, 5)))
self.assertEqual(output_var, None)
output, output_var = model.outcome_transform.untransform(
torch.randn(2, 2, 4, 5, device=self.device, dtype=dtype),
torch.rand(2, 2, 4, 5, device=self.device, dtype=dtype),
)
self.assertEqual(output.shape, torch.Size((2, 2, 4, 5)))
self.assertEqual(output_var.shape, torch.Size((2, 2, 4, 5)))
def test_condition_on_observations(self):
for dtype in [torch.float, torch.double]:
torch.random.manual_seed(0)
test_x = torch.rand(2, 5, 1, device=self.device, dtype=dtype)
test_y = torch.randn(2, 5, 3, 5, device=self.device, dtype=dtype)
self.model.to(dtype)
if dtype == torch.double:
# need to clear float caches
self.model.train()
self.model.eval()
# dummy call to ensure caches have been computed
_ = self.model.posterior(test_x)
conditioned_model = self.model.condition_on_observations(test_x, test_y)
self.assertIsInstance(conditioned_model, HigherOrderGP)
def test_fantasize(self):
for dtype in [torch.float, torch.double]:
torch.random.manual_seed(0)
test_x = torch.rand(2, 5, 1, device=self.device, dtype=dtype)
sampler = IIDNormalSampler(sample_shape=torch.Size([32]))
self.model.to(dtype)
if dtype == torch.double:
# need to clear float caches
self.model.train()
self.model.eval()
_ = self.model.posterior(test_x)
fantasy_model = self.model.fantasize(test_x, sampler=sampler)
self.assertIsInstance(fantasy_model, HigherOrderGP)
self.assertEqual(
fantasy_model.train_inputs[0].shape[:2], torch.Size((32, 2))
)
def test_initialize_latents(self):
for dtype in [torch.float, torch.double]:
torch.random.manual_seed(0)
train_x = torch.rand(10, 1, device=self.device, dtype=dtype)
train_y = torch.randn(10, 3, 5, device=self.device, dtype=dtype)
for latent_dim_sizes, latent_init in itertools.product(
[[1, 1], [2, 3]],
["gp", "default"],
):
self.model = HigherOrderGP(
train_x,
train_y,
num_latent_dims=latent_dim_sizes,
latent_init=latent_init,
)
self.assertEqual(
self.model.latent_parameters[0].shape,
torch.Size((3, latent_dim_sizes[0])),
)
self.assertEqual(
self.model.latent_parameters[1].shape,
torch.Size((5, latent_dim_sizes[1])),
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.exceptions.warnings import InputDataWarning, OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models.converter import batched_to_model_list
from botorch.models.gp_regression_mixed import MixedSingleTaskGP
from botorch.models.kernels.categorical import CategoricalKernel
from botorch.models.transforms import Normalize
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.kernels.kernel import AdditiveKernel, ProductKernel
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.means import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from .test_gp_regression import _get_pvar_expected
class TestMixedSingleTaskGP(BotorchTestCase):
def test_gp(self):
d = 3
bounds = torch.tensor([[-1.0] * d, [1.0] * d])
for batch_shape, m, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(0, 1, 3),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
ord_dims = sorted(set(range(d)) - set(cat_dims))
# test correct indices
if (ncat < 3) and (ncat > 0):
MixedSingleTaskGP(
train_X,
train_Y,
cat_dims=cat_dims,
input_transform=Normalize(
d=d,
bounds=bounds.to(**tkwargs),
transform_on_train=True,
indices=ord_dims,
),
)
if len(cat_dims) == 0:
with self.assertRaises(ValueError):
MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
continue
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
self.assertEqual(model._ignore_X_dims_scaling_check, cat_dims)
mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
fit_gpytorch_mll(
mll, optimizer_kwargs={"options": {"maxiter": 1}}, max_attempts=1
)
# test init
self.assertIsInstance(model.mean_module, ConstantMean)
if ncat < 3:
self.assertIsInstance(model.covar_module, AdditiveKernel)
sum_kernel, prod_kernel = model.covar_module.kernels
self.assertIsInstance(sum_kernel, ScaleKernel)
self.assertIsInstance(sum_kernel.base_kernel, AdditiveKernel)
self.assertIsInstance(prod_kernel, ScaleKernel)
self.assertIsInstance(prod_kernel.base_kernel, ProductKernel)
sum_cont_kernel, sum_cat_kernel = sum_kernel.base_kernel.kernels
prod_cont_kernel, prod_cat_kernel = prod_kernel.base_kernel.kernels
self.assertIsInstance(sum_cont_kernel, MaternKernel)
self.assertIsInstance(sum_cat_kernel, ScaleKernel)
self.assertIsInstance(sum_cat_kernel.base_kernel, CategoricalKernel)
self.assertIsInstance(prod_cont_kernel, MaternKernel)
self.assertIsInstance(prod_cat_kernel, CategoricalKernel)
else:
self.assertIsInstance(model.covar_module, ScaleKernel)
self.assertIsInstance(model.covar_module.base_kernel, CategoricalKernel)
# test posterior
# test non batch evaluation
X = torch.rand(batch_shape + torch.Size([4, d]), **tkwargs)
expected_shape = batch_shape + torch.Size([4, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
self.assertEqual(posterior.variance.shape, expected_shape)
# test adding observation noise
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
self.assertEqual(posterior_pred.variance.shape, expected_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)
# test batch evaluation
X = torch.rand(2, *batch_shape, 3, d, **tkwargs)
expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
# test adding observation noise in batch mode
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)
# test that model converter throws an exception
with self.assertRaisesRegex(NotImplementedError, "not supported"):
batched_to_model_list(model)
def test_condition_on_observations(self):
d = 3
for batch_shape, m, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
# evaluate model
model.posterior(torch.rand(torch.Size([4, d]), **tkwargs))
# test condition_on_observations
fant_shape = torch.Size([2])
# fantasize at different input points
X_fant, Y_fant = _get_random_data(
fant_shape + batch_shape, m=m, d=d, n=3, **tkwargs
)
cm = model.condition_on_observations(X_fant, Y_fant)
# fantasize at same input points (check proper broadcasting)
cm_same_inputs = model.condition_on_observations(
X_fant[0],
Y_fant,
)
test_Xs = [
# test broadcasting single input across fantasy and model batches
torch.rand(4, d, **tkwargs),
# separate input for each model batch and broadcast across
# fantasy batches
torch.rand(batch_shape + torch.Size([4, d]), **tkwargs),
# separate input for each model and fantasy batch
torch.rand(fant_shape + batch_shape + torch.Size([4, d]), **tkwargs),
]
for test_X in test_Xs:
posterior = cm.posterior(test_X)
self.assertEqual(
posterior.mean.shape, fant_shape + batch_shape + torch.Size([4, m])
)
posterior_same_inputs = cm_same_inputs.posterior(test_X)
self.assertEqual(
posterior_same_inputs.mean.shape,
fant_shape + batch_shape + torch.Size([4, m]),
)
# check that fantasies of batched model are correct
if len(batch_shape) > 0 and test_X.dim() == 2:
state_dict_non_batch = {
key: (val[0] if val.ndim > 1 else val)
for key, val in model.state_dict().items()
}
model_kwargs_non_batch = {
"train_X": train_X[0],
"train_Y": train_Y[0],
"cat_dims": cat_dims,
}
model_non_batch = type(model)(**model_kwargs_non_batch)
model_non_batch.load_state_dict(state_dict_non_batch)
model_non_batch.eval()
model_non_batch.likelihood.eval()
model_non_batch.posterior(torch.rand(torch.Size([4, d]), **tkwargs))
cm_non_batch = model_non_batch.condition_on_observations(
X_fant[0][0],
Y_fant[:, 0, :],
)
non_batch_posterior = cm_non_batch.posterior(test_X)
self.assertTrue(
torch.allclose(
posterior_same_inputs.mean[:, 0, ...],
non_batch_posterior.mean,
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
posterior_same_inputs.distribution.covariance_matrix[
:, 0, :, :
],
non_batch_posterior.distribution.covariance_matrix,
atol=1e-3,
)
)
def test_fantasize(self):
d = 3
for batch_shape, m, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
# fantasize
X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, d])), **tkwargs)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([3]))
fm = model.fantasize(X=X_f, sampler=sampler)
self.assertIsInstance(fm, model.__class__)
fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
self.assertIsInstance(fm, model.__class__)
def test_subset_model(self):
d, m = 3, 2
for batch_shape, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
with self.assertRaises(NotImplementedError):
model.subset_output([0])
# TODO: Support subsetting MixedSingleTaskGP models
# X = torch.rand(torch.Size(batch_shape + torch.Size([3, d])), **tkwargs)
# p = model.posterior(X)
# p_sub = subset_model.posterior(X)
# self.assertTrue(
# torch.allclose(p_sub.mean, p.mean[..., [0]], atol=1e-4, rtol=1e-4)
# )
# self.assertTrue(
# torch.allclose(
# p_sub.variance, p.variance[..., [0]], atol=1e-4, rtol=1e-4
# )
# )
def test_construct_inputs(self):
d = 3
for batch_shape, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
X, Y = _get_random_data(batch_shape=batch_shape, m=1, d=d, **tkwargs)
cat_dims = list(range(ncat))
training_data = SupervisedDataset(X, Y)
model_kwargs = MixedSingleTaskGP.construct_inputs(
training_data, categorical_features=cat_dims
)
self.assertTrue(X.equal(model_kwargs["train_X"]))
self.assertTrue(Y.equal(model_kwargs["train_Y"]))
self.assertEqual(model_kwargs["cat_dims"], cat_dims)
self.assertIsNone(model_kwargs["likelihood"])
# With train_Yvar.
training_data = SupervisedDataset(X, Y, Y)
with self.assertWarnsRegex(InputDataWarning, "train_Yvar"):
model_kwargs = MixedSingleTaskGP.construct_inputs(
training_data, categorical_features=cat_dims
)
self.assertTrue(X.equal(model_kwargs["train_X"]))
self.assertTrue(Y.equal(model_kwargs["train_Y"]))
self.assertNotIn("train_Yvar", model_kwargs)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.fit import fit_gpytorch_mll
from botorch.models.contextual_multioutput import FixedNoiseLCEMGP, LCEMGP
from botorch.models.multitask import MultiTaskGP
from botorch.posteriors import GPyTorchPosterior
from botorch.utils.testing import BotorchTestCase
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from linear_operator.operators import LinearOperator
from torch import Tensor
class ContextualMultiOutputTest(BotorchTestCase):
def testLCEMGP(self):
d = 1
for dtype, fixed_noise in ((torch.float, True), (torch.double, False)):
# test with batch evaluation
train_x = torch.rand(10, d, device=self.device, dtype=dtype)
train_y = torch.cos(train_x)
# 2 contexts here
task_indices = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
device=self.device,
dtype=dtype,
)
train_x = torch.cat([train_x, task_indices.unsqueeze(-1)], axis=1)
if fixed_noise:
train_yvar = torch.ones(10, 1, device=self.device, dtype=dtype) * 0.01
model = LCEMGP(
train_X=train_x,
train_Y=train_y,
task_feature=d,
train_Yvar=train_yvar,
)
else:
model = LCEMGP(train_X=train_x, train_Y=train_y, task_feature=d)
self.assertIsInstance(model, LCEMGP)
self.assertIsInstance(model, MultiTaskGP)
self.assertIsNone(model.context_emb_feature)
self.assertIsInstance(model.context_cat_feature, Tensor)
self.assertEqual(model.context_cat_feature.shape, torch.Size([2, 1]))
self.assertEqual(len(model.emb_layers), 1)
self.assertEqual(model.emb_dims, [(2, 1)])
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 1}})
context_covar = model._eval_context_covar()
self.assertIsInstance(context_covar, LinearOperator)
self.assertEqual(context_covar.shape, torch.Size([2, 2]))
embeddings = model._task_embeddings()
self.assertIsInstance(embeddings, Tensor)
self.assertEqual(embeddings.shape, torch.Size([2, 1]))
test_x = torch.rand(5, d, device=self.device, dtype=dtype)
task_indices = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0], device=self.device, dtype=dtype
)
test_x = torch.cat([test_x, task_indices.unsqueeze(-1)], axis=1)
self.assertIsInstance(model(test_x), MultivariateNormal)
# test posterior
posterior_f = model.posterior(test_x[:, :d])
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultitaskMultivariateNormal)
# test posterior w/ single output index
posterior_f = model.posterior(test_x[:, :d], output_indices=[0])
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.distribution, MultivariateNormal)
# test input embs_dim_list (one categorical feature)
# test input pre-trained emb context_emb_feature
model2 = LCEMGP(
train_X=train_x,
train_Y=train_y,
task_feature=d,
embs_dim_list=[2], # increase dim from 1 to 2
context_emb_feature=torch.Tensor([[0.2], [0.3]]),
)
self.assertIsInstance(model2, LCEMGP)
self.assertIsInstance(model2, MultiTaskGP)
self.assertIsNotNone(model2.context_emb_feature)
self.assertIsInstance(model2.context_cat_feature, Tensor)
self.assertEqual(model2.context_cat_feature.shape, torch.Size([2, 1]))
self.assertEqual(len(model2.emb_layers), 1)
self.assertEqual(model2.emb_dims, [(2, 2)])
embeddings2 = model2._task_embeddings()
self.assertIsInstance(embeddings2, Tensor)
self.assertEqual(embeddings2.shape, torch.Size([2, 3]))
def testFixedNoiseLCEMGP(self):
d = 1
for dtype in (torch.float, torch.double):
train_x = torch.rand(10, d, device=self.device, dtype=dtype)
train_y = torch.cos(train_x)
task_indices = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0], device=self.device
)
train_x = torch.cat([train_x, task_indices.unsqueeze(-1)], axis=1)
train_yvar = torch.ones(10, 1, device=self.device, dtype=dtype) * 0.01
model = FixedNoiseLCEMGP(
train_X=train_x, train_Y=train_y, train_Yvar=train_yvar, task_feature=d
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_mll(mll, optimizer_kwargs={"options": {"maxiter": 1}})
self.assertIsInstance(model, FixedNoiseLCEMGP)
test_x = torch.rand(5, d, device=self.device, dtype=dtype)
task_indices = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0], device=self.device, dtype=dtype
)
test_x = torch.cat(
[test_x, task_indices.unsqueeze(-1)],
axis=1,
)
self.assertIsInstance(model(test_x), MultivariateNormal)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.fit import fit_gpytorch_mll
from botorch.models.approximate_gp import (
_SingleTaskVariationalGP,
ApproximateGPyTorchModel,
SingleTaskVariationalGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Log
from botorch.models.utils.inducing_point_allocators import (
GreedyImprovementReduction,
GreedyVarianceReduction,
)
from botorch.posteriors import GPyTorchPosterior, TransformedPosterior
from botorch.utils.testing import BotorchTestCase
from gpytorch.likelihoods import GaussianLikelihood, MultitaskGaussianLikelihood
from gpytorch.mlls import VariationalELBO
from gpytorch.variational import (
IndependentMultitaskVariationalStrategy,
VariationalStrategy,
)
class TestApproximateGP(BotorchTestCase):
def setUp(self):
super().setUp()
self.train_X = torch.rand(10, 1, device=self.device)
self.train_Y = torch.sin(self.train_X) + torch.randn_like(self.train_X) * 0.2
def test_initialization(self):
# test non batch case
model = ApproximateGPyTorchModel(train_X=self.train_X, train_Y=self.train_Y)
self.assertIsInstance(model.model, _SingleTaskVariationalGP)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertIsInstance(model.model.variational_strategy, VariationalStrategy)
self.assertEqual(model.num_outputs, 1)
# test batch case
stacked_y = torch.cat((self.train_Y, self.train_Y), dim=-1)
model = ApproximateGPyTorchModel(
train_X=self.train_X, train_Y=stacked_y, num_outputs=2
)
self.assertIsInstance(model.model, _SingleTaskVariationalGP)
self.assertIsInstance(model.likelihood, MultitaskGaussianLikelihood)
self.assertIsInstance(
model.model.variational_strategy, IndependentMultitaskVariationalStrategy
)
self.assertEqual(model.num_outputs, 2)
class TestSingleTaskVariationalGP(BotorchTestCase):
def setUp(self):
super().setUp()
train_X = torch.rand(10, 1, device=self.device)
train_y = torch.sin(train_X) + torch.randn_like(train_X) * 0.2
self.model = SingleTaskVariationalGP(
train_X=train_X, likelihood=GaussianLikelihood()
).to(self.device)
mll = VariationalELBO(self.model.likelihood, self.model.model, num_data=10)
loss = -mll(self.model.likelihood(self.model(train_X)), train_y).sum()
loss.backward()
def test_posterior(self):
# basic test of checking that the posterior works as intended
test_x = torch.rand(30, 1, device=self.device)
posterior = self.model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
posterior = self.model.posterior(test_x, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
# now loop through all possibilities
train_X = torch.rand(3, 10, 1, device=self.device)
train_Y = torch.randn(3, 10, 2, device=self.device)
test_X = torch.rand(3, 5, 1, device=self.device)
all_tests = {
"non_batched": [train_X[0], train_Y[0, :, :1], test_X[0]],
"non_batched_mo": [train_X[0], train_Y[0], test_X[0]],
"batched": [train_X, train_Y[..., :1], test_X],
# batched multi-output is not supported at this time
# "batched_mo": [train_X, train_Y, test_X],
"non_batched_to_batched": [train_X[0], train_Y[0], test_X],
}
for test_name, [tx, ty, test] in all_tests.items():
with self.subTest(test_name=test_name):
model = SingleTaskVariationalGP(tx, ty, inducing_points=tx)
posterior = model.posterior(test)
self.assertIsInstance(posterior, GPyTorchPosterior)
# test batch_shape property
self.assertEqual(model.batch_shape, tx.shape[:-2])
def test_variational_setUp(self):
for dtype in [torch.float, torch.double]:
train_X = torch.rand(10, 1, device=self.device, dtype=dtype)
train_y = torch.randn(10, 3, device=self.device, dtype=dtype)
for ty, num_out in [[train_y, 3], [train_y, 1], [None, 3]]:
batched_model = SingleTaskVariationalGP(
train_X,
train_Y=ty,
num_outputs=num_out,
learn_inducing_points=False,
).to(self.device)
mll = VariationalELBO(
batched_model.likelihood, batched_model.model, num_data=10
)
with torch.enable_grad():
loss = -mll(
batched_model.likelihood(batched_model(train_X)), train_y
).sum()
loss.backward()
# ensure that inducing points do not require grad
model_var_strat = batched_model.model.variational_strategy
self.assertEqual(
model_var_strat.base_variational_strategy.inducing_points.grad,
None,
)
# but that the covariance does have a gradient
self.assertIsNotNone(
batched_model.model.covar_module.raw_outputscale.grad
)
# check that we always have three outputs
self.assertEqual(batched_model._num_outputs, 3)
self.assertIsInstance(
batched_model.likelihood, MultitaskGaussianLikelihood
)
def test_likelihood(self):
self.assertIsInstance(self.model.likelihood, GaussianLikelihood)
self.assertTrue(self.model._is_custom_likelihood, True)
def test_initializations(self):
train_X = torch.rand(15, 1, device=self.device)
train_Y = torch.rand(15, 1, device=self.device)
stacked_train_X = torch.cat((train_X, train_X), dim=0)
for X, num_ind in [[train_X, 5], [stacked_train_X, 20], [stacked_train_X, 5]]:
model = SingleTaskVariationalGP(train_X=X, inducing_points=num_ind)
if num_ind == 5:
self.assertLessEqual(
model.model.variational_strategy.inducing_points.shape,
torch.Size((5, 1)),
)
else:
# should not have 20 inducing points when 15 singular dimensions
# are passed
self.assertLess(
model.model.variational_strategy.inducing_points.shape[-2], num_ind
)
test_X = torch.rand(5, 1, device=self.device)
# test transforms
for inp_trans, out_trans in itertools.product(
[None, Normalize(d=1)], [None, Log()]
):
model = SingleTaskVariationalGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=out_trans,
input_transform=inp_trans,
)
if inp_trans is not None:
self.assertIsInstance(model.input_transform, Normalize)
else:
self.assertFalse(hasattr(model, "input_transform"))
if out_trans is not None:
self.assertIsInstance(model.outcome_transform, Log)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, TransformedPosterior)
else:
self.assertFalse(hasattr(model, "outcome_transform"))
# test default inducing point allocator
self.assertIsInstance(model._inducing_point_allocator, GreedyVarianceReduction)
# test that can specify an inducing point allocator
for ipa in [
GreedyVarianceReduction(),
GreedyImprovementReduction(model, maximize=True),
]:
model = SingleTaskVariationalGP(train_X, inducing_point_allocator=ipa)
self.assertTrue(type(model._inducing_point_allocator), type(ipa))
# test warning when learning on and custom IPA provided
with self.assertWarnsRegex(
UserWarning, r"set `learn_inducing_points` to False"
):
SingleTaskVariationalGP(
train_X,
learn_inducing_points=True,
inducing_point_allocator=GreedyVarianceReduction(),
)
def test_inducing_point_init(self):
train_X_1 = torch.rand(15, 1, device=self.device)
train_X_2 = torch.rand(15, 1, device=self.device)
# single-task
model_1 = SingleTaskVariationalGP(train_X=train_X_1, inducing_points=5)
model_1.init_inducing_points(train_X_2)
model_1_inducing = model_1.model.variational_strategy.inducing_points
model_2 = SingleTaskVariationalGP(train_X=train_X_2, inducing_points=5)
model_2_inducing = model_2.model.variational_strategy.inducing_points
self.assertEqual(model_1_inducing.shape, (5, 1))
self.assertEqual(model_2_inducing.shape, (5, 1))
self.assertAllClose(model_1_inducing, model_2_inducing)
# multi-task
model_1 = SingleTaskVariationalGP(
train_X=train_X_1, inducing_points=5, num_outputs=2
)
model_1.init_inducing_points(train_X_2)
model_1_inducing = (
model_1.model.variational_strategy.base_variational_strategy.inducing_points
)
model_2 = SingleTaskVariationalGP(
train_X=train_X_2, inducing_points=5, num_outputs=2
)
model_2_inducing = (
model_2.model.variational_strategy.base_variational_strategy.inducing_points
)
self.assertEqual(model_1_inducing.shape, (5, 1))
self.assertEqual(model_2_inducing.shape, (5, 1))
self.assertAllClose(model_1_inducing, model_2_inducing)
# batched inputs
train_X_1 = torch.rand(2, 15, 1, device=self.device)
train_X_2 = torch.rand(2, 15, 1, device=self.device)
train_Y = torch.rand(2, 15, 1, device=self.device)
model_1 = SingleTaskVariationalGP(
train_X=train_X_1, train_Y=train_Y, inducing_points=5
)
model_1.init_inducing_points(train_X_2)
model_1_inducing = model_1.model.variational_strategy.inducing_points
model_2 = SingleTaskVariationalGP(
train_X=train_X_2, train_Y=train_Y, inducing_points=5
)
model_2_inducing = model_2.model.variational_strategy.inducing_points
self.assertEqual(model_1_inducing.shape, (2, 5, 1))
self.assertEqual(model_2_inducing.shape, (2, 5, 1))
self.assertAllClose(model_1_inducing, model_2_inducing)
def test_custom_inducing_point_init(self):
train_X_0 = torch.rand(15, 1, device=self.device)
train_X_1 = torch.rand(15, 1, device=self.device)
train_X_2 = torch.rand(15, 1, device=self.device)
train_X_3 = torch.rand(15, 1, device=self.device)
model_from_previous_step = SingleTaskVariationalGP(
train_X=train_X_0, inducing_points=5
)
model_1 = SingleTaskVariationalGP(
train_X=train_X_1,
inducing_points=5,
inducing_point_allocator=GreedyImprovementReduction(
model_from_previous_step, maximize=True
),
)
model_1.init_inducing_points(train_X_2)
model_1_inducing = model_1.model.variational_strategy.inducing_points
model_2 = SingleTaskVariationalGP(
train_X=train_X_2,
inducing_points=5,
inducing_point_allocator=GreedyImprovementReduction(
model_from_previous_step, maximize=True
),
)
model_2_inducing = model_2.model.variational_strategy.inducing_points
model_3 = SingleTaskVariationalGP(
train_X=train_X_3,
inducing_points=5,
inducing_point_allocator=GreedyImprovementReduction(
model_from_previous_step, maximize=False
),
)
model_3.init_inducing_points(train_X_2)
model_3_inducing = model_3.model.variational_strategy.inducing_points
self.assertEqual(model_1_inducing.shape, (5, 1))
self.assertEqual(model_2_inducing.shape, (5, 1))
self.assertAllClose(model_1_inducing, model_2_inducing)
self.assertFalse(model_1_inducing[0, 0] == model_3_inducing[0, 0])
def test_input_transform(self) -> None:
train_X = torch.linspace(1, 3, 10, dtype=torch.double)[:, None]
y = -3 * train_X + 5
for input_transform in [None, Normalize(1)]:
with self.subTest(input_transform=input_transform):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Input data is not contained"
)
model = SingleTaskVariationalGP(
train_X=train_X, train_Y=y, input_transform=input_transform
)
mll = VariationalELBO(
model.likelihood, model.model, num_data=train_X.shape[-2]
)
fit_gpytorch_mll(mll)
post = model.posterior(torch.tensor([train_X.mean()]))
self.assertAllClose(post.mean[0][0], y.mean(), atol=1e-3, rtol=1e-3)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions import UnsupportedError
from botorch.models import (
FixedNoiseGP,
HeteroskedasticSingleTaskGP,
ModelListGP,
SingleTaskGP,
SingleTaskMultiFidelityGP,
)
from botorch.models.converter import (
batched_multi_output_to_single_output,
batched_to_model_list,
model_list_to_batched,
)
from botorch.models.transforms.input import AppendFeatures, Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import RBFKernel
from gpytorch.likelihoods import GaussianLikelihood
from .test_gpytorch import SimpleGPyTorchModel
class TestConverters(BotorchTestCase):
def test_batched_to_model_list(self):
for dtype in (torch.float, torch.double):
# test SingleTaskGP
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1)
train_Y2 = train_X[:, 0] - train_X[:, 1]
train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
batch_gp = SingleTaskGP(train_X, train_Y)
list_gp = batched_to_model_list(batch_gp)
self.assertIsInstance(list_gp, ModelListGP)
# test FixedNoiseGP
batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
list_gp = batched_to_model_list(batch_gp)
self.assertIsInstance(list_gp, ModelListGP)
# test SingleTaskMultiFidelityGP
for lin_trunc in (False, True):
batch_gp = SingleTaskMultiFidelityGP(
train_X, train_Y, iteration_fidelity=1, linear_truncated=lin_trunc
)
list_gp = batched_to_model_list(batch_gp)
self.assertIsInstance(list_gp, ModelListGP)
# test HeteroskedasticSingleTaskGP
batch_gp = HeteroskedasticSingleTaskGP(
train_X, train_Y, torch.rand_like(train_Y)
)
with self.assertRaises(NotImplementedError):
batched_to_model_list(batch_gp)
# test with transforms
input_tf = Normalize(
d=2,
bounds=torch.tensor(
[[0.0, 0.0], [1.0, 1.0]], device=self.device, dtype=dtype
),
)
octf = Standardize(m=2)
batch_gp = SingleTaskGP(
train_X, train_Y, outcome_transform=octf, input_transform=input_tf
)
list_gp = batched_to_model_list(batch_gp)
for i, m in enumerate(list_gp.models):
self.assertIsInstance(m.input_transform, Normalize)
self.assertTrue(torch.equal(m.input_transform.bounds, input_tf.bounds))
self.assertIsInstance(m.outcome_transform, Standardize)
self.assertEqual(m.outcome_transform._m, 1)
expected_octf = octf.subset_output(idcs=[i])
for attr_name in ["means", "stdvs", "_stdvs_sq"]:
self.assertTrue(
torch.equal(
m.outcome_transform.__getattr__(attr_name),
expected_octf.__getattr__(attr_name),
)
)
# test with AppendFeatures
input_tf = AppendFeatures(
feature_set=torch.rand(2, 1, device=self.device, dtype=dtype)
)
batch_gp = SingleTaskGP(
train_X, train_Y, outcome_transform=octf, input_transform=input_tf
).eval()
list_gp = batched_to_model_list(batch_gp)
self.assertIsInstance(list_gp, ModelListGP)
self.assertIsInstance(list_gp.models[0].input_transform, AppendFeatures)
def test_model_list_to_batched(self):
for dtype in (torch.float, torch.double):
# basic test
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1, keepdim=True)
train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
gp1 = SingleTaskGP(train_X, train_Y1)
gp2 = SingleTaskGP(train_X, train_Y2)
list_gp = ModelListGP(gp1, gp2)
batch_gp = model_list_to_batched(list_gp)
self.assertIsInstance(batch_gp, SingleTaskGP)
# test degenerate (single model)
batch_gp = model_list_to_batched(ModelListGP(gp1))
self.assertEqual(batch_gp._num_outputs, 1)
# test different model classes
gp2 = FixedNoiseGP(train_X, train_Y1, torch.ones_like(train_Y1))
with self.assertRaises(UnsupportedError):
model_list_to_batched(ModelListGP(gp1, gp2))
# test non-batched models
gp1_ = SimpleGPyTorchModel(train_X, train_Y1)
gp2_ = SimpleGPyTorchModel(train_X, train_Y2)
with self.assertRaises(UnsupportedError):
model_list_to_batched(ModelListGP(gp1_, gp2_))
# test list of multi-output models
train_Y = torch.cat([train_Y1, train_Y2], dim=-1)
gp2 = SingleTaskGP(train_X, train_Y)
with self.assertRaises(UnsupportedError):
model_list_to_batched(ModelListGP(gp1, gp2))
# test different training inputs
gp2 = SingleTaskGP(2 * train_X, train_Y2)
with self.assertRaises(UnsupportedError):
model_list_to_batched(ModelListGP(gp1, gp2))
# check scalar agreement
gp2 = SingleTaskGP(train_X, train_Y2)
gp2.likelihood.noise_covar.noise_prior.rate.fill_(1.0)
with self.assertRaises(UnsupportedError):
model_list_to_batched(ModelListGP(gp1, gp2))
# check tensor shape agreement
gp2 = SingleTaskGP(train_X, train_Y2)
gp2.covar_module.raw_outputscale = torch.nn.Parameter(
torch.tensor([0.0], device=self.device, dtype=dtype)
)
with self.assertRaises(UnsupportedError):
model_list_to_batched(ModelListGP(gp1, gp2))
# test HeteroskedasticSingleTaskGP
gp2 = HeteroskedasticSingleTaskGP(
train_X, train_Y1, torch.ones_like(train_Y1)
)
with self.assertRaises(NotImplementedError):
model_list_to_batched(ModelListGP(gp2))
# test custom likelihood
gp2 = SingleTaskGP(train_X, train_Y2, likelihood=GaussianLikelihood())
with self.assertRaises(NotImplementedError):
model_list_to_batched(ModelListGP(gp2))
# test non-default kernel
gp1 = SingleTaskGP(train_X, train_Y1, covar_module=RBFKernel())
gp2 = SingleTaskGP(train_X, train_Y2, covar_module=RBFKernel())
list_gp = ModelListGP(gp1, gp2)
batch_gp = model_list_to_batched(list_gp)
self.assertEqual(type(batch_gp.covar_module), RBFKernel)
# test error when component GPs have different kernel types
gp1 = SingleTaskGP(train_X, train_Y1, covar_module=RBFKernel())
gp2 = SingleTaskGP(train_X, train_Y2)
list_gp = ModelListGP(gp1, gp2)
with self.assertRaises(UnsupportedError):
model_list_to_batched(list_gp)
# test FixedNoiseGP
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1, keepdim=True)
train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
list_gp = ModelListGP(gp1_, gp2_)
batch_gp = model_list_to_batched(list_gp)
# test SingleTaskMultiFidelityGP
gp1_ = SingleTaskMultiFidelityGP(train_X, train_Y1, iteration_fidelity=1)
gp2_ = SingleTaskMultiFidelityGP(train_X, train_Y2, iteration_fidelity=1)
list_gp = ModelListGP(gp1_, gp2_)
batch_gp = model_list_to_batched(list_gp)
gp2_ = SingleTaskMultiFidelityGP(train_X, train_Y2, iteration_fidelity=2)
list_gp = ModelListGP(gp1_, gp2_)
with self.assertRaises(UnsupportedError):
model_list_to_batched(list_gp)
# test input transform
input_tf = Normalize(
d=2,
bounds=torch.tensor(
[[0.0, 0.0], [1.0, 1.0]], device=self.device, dtype=dtype
),
)
gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf)
gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf)
list_gp = ModelListGP(gp1_, gp2_)
batch_gp = model_list_to_batched(list_gp)
self.assertIsInstance(batch_gp.input_transform, Normalize)
self.assertTrue(
torch.equal(batch_gp.input_transform.bounds, input_tf.bounds)
)
# test with AppendFeatures
input_tf3 = AppendFeatures(
feature_set=torch.rand(2, 1, device=self.device, dtype=dtype)
)
gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf3)
gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf3)
list_gp = ModelListGP(gp1_, gp2_).eval()
batch_gp = model_list_to_batched(list_gp)
self.assertIsInstance(batch_gp, SingleTaskGP)
self.assertIsInstance(batch_gp.input_transform, AppendFeatures)
# test different input transforms
input_tf2 = Normalize(
d=2,
bounds=torch.tensor(
[[-1.0, -1.0], [1.0, 1.0]], device=self.device, dtype=dtype
),
)
gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf)
gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf2)
list_gp = ModelListGP(gp1_, gp2_)
with self.assertRaisesRegex(UnsupportedError, "have the same"):
model_list_to_batched(list_gp)
# test batched input transform
input_tf2 = Normalize(
d=2,
bounds=torch.tensor(
[[-1.0, -1.0], [1.0, 1.0]], device=self.device, dtype=dtype
),
batch_shape=torch.Size([3]),
)
gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf2)
gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf2)
list_gp = ModelListGP(gp1_, gp2_)
with self.assertRaises(UnsupportedError):
model_list_to_batched(list_gp)
# test outcome transform
octf = Standardize(m=1)
gp1_ = SingleTaskGP(train_X, train_Y1, outcome_transform=octf)
gp2_ = SingleTaskGP(train_X, train_Y2, outcome_transform=octf)
list_gp = ModelListGP(gp1_, gp2_)
with self.assertRaises(UnsupportedError):
model_list_to_batched(list_gp)
def test_roundtrip(self):
for dtype in (torch.float, torch.double):
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1)
train_Y2 = train_X[:, 0] - train_X[:, 1]
train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
# SingleTaskGP
batch_gp = SingleTaskGP(train_X, train_Y)
list_gp = batched_to_model_list(batch_gp)
batch_gp_recov = model_list_to_batched(list_gp)
sd_orig = batch_gp.state_dict()
sd_recov = batch_gp_recov.state_dict()
self.assertTrue(set(sd_orig) == set(sd_recov))
self.assertTrue(all(torch.equal(sd_orig[k], sd_recov[k]) for k in sd_orig))
# FixedNoiseGP
batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
list_gp = batched_to_model_list(batch_gp)
batch_gp_recov = model_list_to_batched(list_gp)
sd_orig = batch_gp.state_dict()
sd_recov = batch_gp_recov.state_dict()
self.assertTrue(set(sd_orig) == set(sd_recov))
self.assertTrue(all(torch.equal(sd_orig[k], sd_recov[k]) for k in sd_orig))
# SingleTaskMultiFidelityGP
for lin_trunc in (False, True):
batch_gp = SingleTaskMultiFidelityGP(
train_X, train_Y, iteration_fidelity=1, linear_truncated=lin_trunc
)
list_gp = batched_to_model_list(batch_gp)
batch_gp_recov = model_list_to_batched(list_gp)
sd_orig = batch_gp.state_dict()
sd_recov = batch_gp_recov.state_dict()
self.assertTrue(set(sd_orig) == set(sd_recov))
self.assertTrue(
all(torch.equal(sd_orig[k], sd_recov[k]) for k in sd_orig)
)
def test_batched_multi_output_to_single_output(self):
for dtype in (torch.float, torch.double):
# basic test
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y = torch.stack(
[
train_X.sum(dim=-1),
(train_X[:, 0] - train_X[:, 1]),
],
dim=1,
)
batched_mo_model = SingleTaskGP(train_X, train_Y)
batched_so_model = batched_multi_output_to_single_output(batched_mo_model)
self.assertIsInstance(batched_so_model, SingleTaskGP)
self.assertEqual(batched_so_model.num_outputs, 1)
# test non-batched models
non_batch_model = SimpleGPyTorchModel(train_X, train_Y[:, :1])
with self.assertRaises(UnsupportedError):
batched_multi_output_to_single_output(non_batch_model)
gp2 = HeteroskedasticSingleTaskGP(
train_X, train_Y, torch.ones_like(train_Y)
)
with self.assertRaises(NotImplementedError):
batched_multi_output_to_single_output(gp2)
# test custom likelihood
gp2 = SingleTaskGP(train_X, train_Y, likelihood=GaussianLikelihood())
with self.assertRaises(NotImplementedError):
batched_multi_output_to_single_output(gp2)
# test FixedNoiseGP
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
batched_mo_model = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
batched_so_model = batched_multi_output_to_single_output(batched_mo_model)
self.assertIsInstance(batched_so_model, FixedNoiseGP)
self.assertEqual(batched_so_model.num_outputs, 1)
# test SingleTaskMultiFidelityGP
batched_mo_model = SingleTaskMultiFidelityGP(
train_X, train_Y, iteration_fidelity=1
)
batched_so_model = batched_multi_output_to_single_output(batched_mo_model)
self.assertIsInstance(batched_so_model, SingleTaskMultiFidelityGP)
self.assertEqual(batched_so_model.num_outputs, 1)
# test input transform
input_tf = Normalize(
d=2,
bounds=torch.tensor(
[[0.0, 0.0], [1.0, 1.0]], device=self.device, dtype=dtype
),
)
batched_mo_model = SingleTaskGP(train_X, train_Y, input_transform=input_tf)
batch_so_model = batched_multi_output_to_single_output(batched_mo_model)
self.assertIsInstance(batch_so_model.input_transform, Normalize)
self.assertTrue(
torch.equal(batch_so_model.input_transform.bounds, input_tf.bounds)
)
# test with AppendFeatures
input_tf = AppendFeatures(
feature_set=torch.rand(2, 1, device=self.device, dtype=dtype)
)
batched_mo_model = SingleTaskGP(
train_X, train_Y, input_transform=input_tf
).eval()
batch_so_model = batched_multi_output_to_single_output(batched_mo_model)
self.assertIsInstance(batch_so_model.input_transform, AppendFeatures)
# test batched input transform
input_tf = Normalize(
d=2,
bounds=torch.tensor(
[[-1.0, -1.0], [1.0, 1.0]], device=self.device, dtype=dtype
),
batch_shape=torch.Size([2]),
)
batched_mo_model = SingleTaskGP(train_X, train_Y, input_transform=input_tf)
batch_so_model = batched_multi_output_to_single_output(batched_mo_model)
self.assertIsInstance(batch_so_model.input_transform, Normalize)
self.assertTrue(
torch.equal(batch_so_model.input_transform.bounds, input_tf.bounds)
)
# test outcome transform
batched_mo_model = SingleTaskGP(
train_X, train_Y, outcome_transform=Standardize(m=2)
)
with self.assertRaises(NotImplementedError):
batched_multi_output_to_single_output(batched_mo_model)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.models.ensemble import EnsembleModel
from botorch.utils.testing import BotorchTestCase
class DummyEnsembleModel(EnsembleModel):
r"""A dummy ensemble model."""
def __init__(self):
r"""Init model."""
super().__init__()
self._num_outputs = 2
self.a = torch.rand(4, 3, 2)
def forward(self, X):
return torch.stack(
[torch.einsum("...d,dm", X, self.a[i]) for i in range(4)], dim=-3
)
class TestEnsembleModels(BotorchTestCase):
def test_abstract_base_model(self):
with self.assertRaises(TypeError):
EnsembleModel()
def test_DummyEnsembleModel(self):
for shape in [(10, 3), (5, 10, 3)]:
e = DummyEnsembleModel()
X = torch.randn(*shape)
p = e.posterior(X)
self.assertEqual(p.ensemble_size, 4)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import List, Optional
import torch
from botorch import fit_fully_bayesian_model_nuts
from botorch.acquisition.analytic import (
ExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
UpperConfidenceBound,
)
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.multi_objective import (
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.models import ModelList, ModelListGP
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.fully_bayesian import MCMC_DIM, MIN_INFERRED_NOISE_LEVEL
from botorch.models.fully_bayesian_multitask import (
MultitaskSaasPyroModel,
SaasFullyBayesianMultiTaskGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.posteriors import FullyBayesianPosterior
from botorch.sampling.get_sampler import get_sampler
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood
from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood
from gpytorch.means import ConstantMean
from .test_multitask import _gen_datasets
EXPECTED_KEYS = [
"latent_features",
"mean_module.raw_constant",
"covar_module.raw_outputscale",
"covar_module.base_kernel.raw_lengthscale",
"covar_module.base_kernel.raw_lengthscale_constraint.lower_bound",
"covar_module.base_kernel.raw_lengthscale_constraint.upper_bound",
"covar_module.raw_outputscale_constraint.lower_bound",
"covar_module.raw_outputscale_constraint.upper_bound",
"task_covar_module.raw_lengthscale",
"task_covar_module.raw_lengthscale_constraint.lower_bound",
"task_covar_module.raw_lengthscale_constraint.upper_bound",
]
EXPECTED_KEYS_NOISE = EXPECTED_KEYS + [
"likelihood.noise_covar.raw_noise",
"likelihood.noise_covar.raw_noise_constraint.lower_bound",
"likelihood.noise_covar.raw_noise_constraint.upper_bound",
]
class TestFullyBayesianMultiTaskGP(BotorchTestCase):
def _get_data_and_model(
self,
task_rank: Optional[int] = 1,
output_tasks: Optional[List[int]] = None,
infer_noise: bool = False,
**tkwargs
):
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.rand(10, 4, **tkwargs)
task_indices = torch.cat(
[torch.zeros(5, 1, **tkwargs), torch.ones(5, 1, **tkwargs)], dim=0
)
self.num_tasks = 2
train_X = torch.cat([train_X, task_indices], dim=1)
train_Y = torch.sin(train_X[:, :1])
train_Yvar = 0.5 * torch.arange(10, **tkwargs).unsqueeze(-1)
model = SaasFullyBayesianMultiTaskGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=None if infer_noise else train_Yvar,
task_feature=4,
output_tasks=output_tasks,
rank=task_rank,
)
return train_X, train_Y, train_Yvar, model
def _get_unnormalized_data(self, **tkwargs):
with torch.random.fork_rng():
torch.manual_seed(0)
train_X = torch.rand(10, 4, **tkwargs)
train_Y = torch.sin(train_X[:, :1])
task_indices = torch.cat(
[torch.zeros(5, 1, **tkwargs), torch.ones(5, 1, **tkwargs)], dim=0
)
train_X = torch.cat([5 + 5 * train_X, task_indices], dim=1)
test_X = 5 + 5 * torch.rand(5, 4, **tkwargs)
train_Yvar = 0.1 * torch.arange(10, **tkwargs).unsqueeze(-1)
return train_X, train_Y, train_Yvar, test_X
def _get_mcmc_samples(self, num_samples: int, dim: int, task_rank: int, **tkwargs):
mcmc_samples = {
"lengthscale": torch.rand(num_samples, 1, dim, **tkwargs),
"outputscale": torch.rand(num_samples, **tkwargs),
"mean": torch.randn(num_samples, **tkwargs),
"noise": torch.rand(num_samples, 1, **tkwargs),
"task_lengthscale": torch.rand(num_samples, 1, task_rank, **tkwargs),
"latent_features": torch.rand(
num_samples, self.num_tasks, task_rank, **tkwargs
),
}
return mcmc_samples
def test_raises(self):
tkwargs = {"device": self.device, "dtype": torch.double}
with self.assertRaisesRegex(
ValueError,
"Expected train_X to have shape n x d and train_Y to have shape n x 1",
):
SaasFullyBayesianMultiTaskGP(
train_X=torch.rand(10, 4, **tkwargs),
train_Y=torch.randn(10, **tkwargs),
train_Yvar=torch.rand(10, 1, **tkwargs),
task_feature=4,
)
with self.assertRaisesRegex(
ValueError,
"Expected train_X to have shape n x d and train_Y to have shape n x 1",
):
SaasFullyBayesianMultiTaskGP(
train_X=torch.rand(10, 4, **tkwargs),
train_Y=torch.randn(12, 1, **tkwargs),
train_Yvar=torch.rand(12, 1, **tkwargs),
task_feature=4,
)
with self.assertRaisesRegex(
ValueError,
"Expected train_X to have shape n x d and train_Y to have shape n x 1",
):
SaasFullyBayesianMultiTaskGP(
train_X=torch.rand(10, **tkwargs),
train_Y=torch.randn(10, 1, **tkwargs),
train_Yvar=torch.rand(10, 1, **tkwargs),
task_feature=4,
)
with self.assertRaisesRegex(
ValueError,
"Expected train_Yvar to be None or have the same shape as train_Y",
):
SaasFullyBayesianMultiTaskGP(
train_X=torch.rand(10, 4, **tkwargs),
train_Y=torch.randn(10, 1, **tkwargs),
train_Yvar=torch.rand(10, **tkwargs),
task_feature=4,
)
train_X, train_Y, train_Yvar, model = self._get_data_and_model(**tkwargs)
sampler = IIDNormalSampler(sample_shape=torch.Size([2]))
with self.assertRaisesRegex(
NotImplementedError, "Fantasize is not implemented!"
):
model.fantasize(
X=torch.cat(
[torch.rand(5, 4, **tkwargs), torch.ones(5, 1, **tkwargs)], dim=1
),
sampler=sampler,
)
# Make sure an exception is raised if the model has not been fitted
not_fitted_error_msg = (
"Model has not been fitted. You need to call "
"`fit_fully_bayesian_model_nuts` to fit the model."
)
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.num_mcmc_samples
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.median_lengthscale
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.forward(torch.rand(1, 4, **tkwargs))
with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
model.posterior(torch.rand(1, 4, **tkwargs))
def test_fit_model(
self,
dtype: torch.dtype = torch.double,
infer_noise: bool = False,
task_rank: int = 1,
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
infer_noise=infer_noise, task_rank=task_rank, **tkwargs
)
n = train_X.shape[0]
d = train_X.shape[1] - 1
# Test init
self.assertIsNone(model.mean_module)
self.assertIsNone(model.covar_module)
self.assertIsNone(model.likelihood)
self.assertIsInstance(model.pyro_model, MultitaskSaasPyroModel)
self.assertAllClose(train_X, model.pyro_model.train_X)
self.assertAllClose(train_Y, model.pyro_model.train_Y)
if infer_noise:
self.assertIsNone(model.pyro_model.train_Yvar)
else:
self.assertAllClose(
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL),
model.pyro_model.train_Yvar,
)
# Fit a model and check that the hyperparameters have the correct shape
fit_fully_bayesian_model_nuts(
model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
self.assertEqual(model.batch_shape, torch.Size([3]))
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertEqual(model.mean_module.raw_constant.shape, model.batch_shape)
self.assertIsInstance(model.covar_module, ScaleKernel)
self.assertEqual(model.covar_module.outputscale.shape, model.batch_shape)
self.assertIsInstance(model.covar_module.base_kernel, MaternKernel)
self.assertEqual(
model.covar_module.base_kernel.lengthscale.shape, torch.Size([3, 1, d])
)
if infer_noise:
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertEqual(
model.likelihood.noise_covar.noise.shape, torch.Size([3, 1])
)
else:
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertIsInstance(model.task_covar_module, MaternKernel)
self.assertEqual(
model.task_covar_module.lengthscale.shape, torch.Size([3, 1, task_rank])
)
self.assertEqual(
model.latent_features.shape, torch.Size([3, self.num_tasks, task_rank])
)
# Predict on some test points
for batch_shape in [[5], [5, 2], [5, 2, 6]]:
test_X = torch.rand(*batch_shape, d, **tkwargs)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, FullyBayesianPosterior)
self.assertIsInstance(posterior, FullyBayesianPosterior)
test_X = torch.rand(*batch_shape, d, **tkwargs)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, FullyBayesianPosterior)
# Mean/variance
expected_shape = (
*batch_shape[: MCMC_DIM + 2],
*model.batch_shape,
*batch_shape[MCMC_DIM + 2 :],
self.num_tasks,
)
expected_shape = torch.Size(expected_shape)
mean, var = posterior.mean, posterior.variance
self.assertEqual(mean.shape, expected_shape)
self.assertEqual(var.shape, expected_shape)
# Mixture mean/variance/median/quantiles
mixture_mean = posterior.mixture_mean
mixture_variance = posterior.mixture_variance
quantile1 = posterior.quantile(value=torch.tensor(0.01))
quantile2 = posterior.quantile(value=torch.tensor(0.99))
# Marginalized mean/variance
self.assertEqual(
mixture_mean.shape, torch.Size(batch_shape + [self.num_tasks])
)
self.assertEqual(
mixture_variance.shape, torch.Size(batch_shape + [self.num_tasks])
)
self.assertTrue(mixture_variance.min() > 0.0)
self.assertEqual(
quantile1.shape, torch.Size(batch_shape + [self.num_tasks])
)
self.assertEqual(
quantile2.shape, torch.Size(batch_shape + [self.num_tasks])
)
self.assertTrue((quantile2 > quantile1).all())
dist = torch.distributions.Normal(
loc=posterior.mean, scale=posterior.variance.sqrt()
)
torch.allclose(
dist.cdf(quantile1.unsqueeze(MCMC_DIM)).mean(dim=MCMC_DIM),
0.05 * torch.ones(batch_shape + [1], **tkwargs),
)
torch.allclose(
dist.cdf(quantile2.unsqueeze(MCMC_DIM)).mean(dim=MCMC_DIM),
0.95 * torch.ones(batch_shape + [1], **tkwargs),
)
# Invalid quantile should raise
for q in [-1.0, 0.0, 1.0, 1.3333]:
with self.assertRaisesRegex(
ValueError, "value is expected to be in the range"
):
posterior.quantile(value=torch.tensor(q))
# Test model lists with fully Bayesian models and mixed modeling
deterministic = GenericDeterministicModel(f=lambda x: x[..., :1])
for ModelListClass, models, expected_outputs in zip(
[ModelList, ModelListGP],
[[deterministic, model], [model, model]],
[3, 4],
):
expected_shape = (
*batch_shape[: MCMC_DIM + 2],
*model.batch_shape,
*batch_shape[MCMC_DIM + 2 :],
expected_outputs,
)
expected_shape = torch.Size(expected_shape)
model_list = ModelListClass(*models)
posterior = model_list.posterior(test_X)
mean, var = posterior.mean, posterior.variance
self.assertEqual(mean.shape, expected_shape)
self.assertEqual(var.shape, expected_shape)
# Check properties
median_lengthscale = model.median_lengthscale
self.assertEqual(median_lengthscale.shape, torch.Size([d]))
self.assertEqual(model.num_mcmc_samples, 3)
# Check the keys in the state dict
true_keys = EXPECTED_KEYS_NOISE if infer_noise else EXPECTED_KEYS
self.assertEqual(set(model.state_dict().keys()), set(true_keys))
# Check that we can load the state dict.
state_dict = model.state_dict()
_, _, _, m_new = self._get_data_and_model(
infer_noise=infer_noise, task_rank=task_rank, **tkwargs
)
self.assertEqual(m_new.state_dict(), {})
m_new.load_state_dict(state_dict)
self.assertEqual(model.state_dict().keys(), m_new.state_dict().keys())
for k in model.state_dict().keys():
self.assertTrue((model.state_dict()[k] == m_new.state_dict()[k]).all())
preds1, preds2 = model.posterior(test_X), m_new.posterior(test_X)
self.assertTrue(torch.equal(preds1.mean, preds2.mean))
self.assertTrue(torch.equal(preds1.variance, preds2.variance))
# Make sure the model shapes are set correctly
self.assertEqual(model.pyro_model.train_X.shape, torch.Size([n, d + 1]))
self.assertAllClose(model.pyro_model.train_X, train_X)
model.train() # Put the model in train mode
self.assertAllClose(train_X, model.pyro_model.train_X)
self.assertIsNone(model.mean_module)
self.assertIsNone(model.covar_module)
self.assertIsNone(model.likelihood)
self.assertIsNone(model.task_covar_module)
def test_fit_model_float(self):
self.test_fit_model(dtype=torch.float)
def test_fit_model_infer_noise(self):
self.test_fit_model(infer_noise=True, task_rank=4)
def test_transforms(self, infer_noise: bool = False):
tkwargs = {"device": self.device, "dtype": torch.double}
train_X, train_Y, train_Yvar, test_X = self._get_unnormalized_data(**tkwargs)
n, d = train_X.shape
normalize_indices = torch.tensor(
list(range(train_X.shape[-1] - 1)), **{"device": self.device}
)
lb, ub = (
train_X[:, normalize_indices].min(dim=0).values,
train_X[:, normalize_indices].max(dim=0).values,
)
train_X_new = train_X.clone()
train_X_new[..., normalize_indices] = (train_X[..., normalize_indices] - lb) / (
ub - lb
)
# TODO: add testing of stratified standardization
mu, sigma = train_Y.mean(), train_Y.std()
# Fit without transforms
with torch.random.fork_rng():
torch.manual_seed(0)
gp1 = SaasFullyBayesianMultiTaskGP(
train_X=train_X_new,
train_Y=(train_Y - mu) / sigma,
train_Yvar=None if infer_noise else train_Yvar / sigma**2,
task_feature=d - 1,
)
fit_fully_bayesian_model_nuts(
gp1, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
posterior1 = gp1.posterior((test_X - lb) / (ub - lb), output_indices=[0])
pred_mean1 = mu + sigma * posterior1.mean
pred_var1 = (sigma**2) * posterior1.variance
# Fit with transforms
with torch.random.fork_rng():
torch.manual_seed(0)
gp2 = SaasFullyBayesianMultiTaskGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=None if infer_noise else train_Yvar,
task_feature=d - 1,
input_transform=Normalize(
d=train_X.shape[-1], indices=normalize_indices
),
outcome_transform=Standardize(m=1),
)
fit_fully_bayesian_model_nuts(
gp2, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
posterior2 = gp2.posterior(X=test_X, output_indices=[0])
pred_mean2, pred_var2 = posterior2.mean, posterior2.variance
self.assertAllClose(pred_mean1, pred_mean2)
self.assertAllClose(pred_var1, pred_var2)
def test_transforms_infer_noise(self):
self.test_transforms(infer_noise=True)
def test_acquisition_functions(self):
tkwargs = {"device": self.device, "dtype": torch.double}
# Using a single output model here since we test with single objective acqfs.
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
task_rank=1, output_tasks=[0], **tkwargs
)
fit_fully_bayesian_model_nuts(
model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
)
for include_task_feature in [True, False]:
if not include_task_feature:
test_X = train_X[..., :-1]
else:
test_X = train_X
deterministic = GenericDeterministicModel(f=lambda x: x[..., :1])
list_gp = ModelListGP(model, model)
mixed_list = ModelList(deterministic, model)
simple_sampler = get_sampler(
posterior=model.posterior(test_X),
sample_shape=torch.Size([2]),
)
list_gp_sampler = get_sampler(
posterior=list_gp.posterior(test_X), sample_shape=torch.Size([2])
)
mixed_list_sampler = get_sampler(
posterior=mixed_list.posterior(test_X), sample_shape=torch.Size([2])
)
acquisition_functions = [
ExpectedImprovement(model=model, best_f=train_Y.max()),
ProbabilityOfImprovement(model=model, best_f=train_Y.max()),
PosteriorMean(model=model),
UpperConfidenceBound(model=model, beta=4),
qExpectedImprovement(
model=model, best_f=train_Y.max(), sampler=simple_sampler
),
qNoisyExpectedImprovement(
model=model, X_baseline=test_X, sampler=simple_sampler
),
qProbabilityOfImprovement(
model=model, best_f=train_Y.max(), sampler=simple_sampler
),
qSimpleRegret(model=model, sampler=simple_sampler),
qUpperConfidenceBound(model=model, beta=4, sampler=simple_sampler),
qNoisyExpectedHypervolumeImprovement(
model=list_gp,
X_baseline=test_X,
ref_point=torch.zeros(2, **tkwargs),
sampler=list_gp_sampler,
),
qExpectedHypervolumeImprovement(
model=list_gp,
ref_point=torch.zeros(2, **tkwargs),
sampler=list_gp_sampler,
partitioning=NondominatedPartitioning(
ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
),
),
# qEHVI/qNEHVI with mixed models
qNoisyExpectedHypervolumeImprovement(
model=mixed_list,
X_baseline=test_X,
ref_point=torch.zeros(2, **tkwargs),
sampler=mixed_list_sampler,
),
qExpectedHypervolumeImprovement(
model=mixed_list,
ref_point=torch.zeros(2, **tkwargs),
sampler=mixed_list_sampler,
partitioning=NondominatedPartitioning(
ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
),
),
]
for acqf in acquisition_functions:
for batch_shape in [[2], [6, 2], [5, 6, 2]]:
test_X = torch.rand(*batch_shape, 1, 4, **tkwargs)
if include_task_feature:
test_X = torch.cat(
[test_X, torch.zeros_like(test_X[..., :1])], dim=-1
)
self.assertEqual(acqf(test_X).shape, torch.Size(batch_shape))
def test_load_samples(self):
for task_rank, dtype in itertools.product([1, 2], [torch.float, torch.double]):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y, train_Yvar, model = self._get_data_and_model(
task_rank=task_rank, **tkwargs
)
d = train_X.shape[1] - 1
mcmc_samples = self._get_mcmc_samples(
num_samples=3, dim=d, task_rank=task_rank, **tkwargs
)
model.load_mcmc_samples(mcmc_samples)
self.assertTrue(
torch.allclose(
model.covar_module.base_kernel.lengthscale,
mcmc_samples["lengthscale"],
)
)
self.assertTrue(
torch.allclose(
model.covar_module.outputscale,
mcmc_samples["outputscale"],
)
)
self.assertTrue(
torch.allclose(
model.mean_module.raw_constant.data,
mcmc_samples["mean"],
)
)
self.assertTrue(
torch.allclose(
model.pyro_model.train_Yvar,
train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL),
)
)
self.assertTrue(
torch.allclose(
model.task_covar_module.lengthscale,
mcmc_samples["task_lengthscale"],
)
)
self.assertTrue(
torch.allclose(
model.latent_features,
mcmc_samples["latent_features"],
)
)
def test_construct_inputs(self):
for dtype, infer_noise in [(torch.float, False), (torch.double, True)]:
tkwargs = {"device": self.device, "dtype": dtype}
task_feature = 0
if infer_noise:
datasets, (train_X, train_Y) = _gen_datasets(yvar=None, **tkwargs)
train_Yvar = None
else:
datasets, (train_X, train_Y, train_Yvar) = _gen_datasets(
yvar=0.05, **tkwargs
)
model = SaasFullyBayesianMultiTaskGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
task_feature=task_feature,
)
data_dict = model.construct_inputs(
datasets,
task_feature=task_feature,
rank=1,
)
self.assertTrue(torch.equal(data_dict["train_X"], train_X))
self.assertTrue(torch.equal(data_dict["train_Y"], train_Y))
self.assertAllClose(data_dict["train_Yvar"], train_Yvar)
self.assertEqual(data_dict["task_feature"], task_feature)
self.assertEqual(data_dict["rank"], 1)
self.assertTrue("task_covar_prior" not in data_dict)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from random import random
import torch
from botorch.models.cost import AffineFidelityCostModel
from botorch.utils.testing import BotorchTestCase
class TestCostModels(BotorchTestCase):
def test_affine_fidelity_cost_model(self):
for dtype in (torch.float, torch.double):
for batch_shape in ([], [2]):
X = torch.rand(*batch_shape, 3, 4, device=self.device, dtype=dtype)
# test default parameters
model = AffineFidelityCostModel()
self.assertEqual(model.num_outputs, 1)
self.assertEqual(model.fidelity_dims, [-1])
self.assertEqual(model.fixed_cost, 0.01)
cost = model(X)
cost_exp = 0.01 + X[..., -1:]
self.assertAllClose(cost, cost_exp)
# test custom parameters
fw = {2: 2.0, 0: 1.0}
fc = random()
model = AffineFidelityCostModel(fidelity_weights=fw, fixed_cost=fc)
self.assertEqual(model.fidelity_dims, [0, 2])
self.assertEqual(model.fixed_cost, fc)
cost = model(X)
cost_exp = fc + sum(v * X[..., i : i + 1] for i, v in fw.items())
self.assertAllClose(cost, cost_exp)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
from botorch.exceptions import UnsupportedError
from botorch.models.kernels.linear_truncated_fidelity import (
LinearTruncatedFidelityKernel,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.rbf_kernel import RBFKernel
from gpytorch.priors.torch_priors import GammaPrior, NormalPrior
from gpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestLinearTruncatedFidelityKernel(BotorchTestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2], dimension=3, **kwargs
)
def create_data_no_batch(self):
return torch.rand(50, 10)
def create_data_single_batch(self):
return torch.rand(2, 50, 3)
def create_data_double_batch(self):
return torch.rand(3, 2, 50, 3)
def test_compute_linear_truncated_kernel_no_batch(self):
x1 = torch.tensor([[1, 0.1, 0.2], [2, 0.3, 0.4]])
x2 = torch.tensor([[3, 0.5, 0.6], [4, 0.7, 0.8]])
t_1 = torch.tensor([[0.3584, 0.1856], [0.2976, 0.1584]])
for nu, fidelity_dims in itertools.product({0.5, 1.5, 2.5}, ([2], [1, 2])):
kernel = LinearTruncatedFidelityKernel(
fidelity_dims=fidelity_dims, dimension=3, nu=nu
)
kernel.power = 1
n_fid = len(fidelity_dims)
if n_fid > 1:
active_dimsM = [0]
t_2 = torch.tensor([[0.4725, 0.2889], [0.4025, 0.2541]])
t_3 = torch.tensor([[0.1685, 0.0531], [0.1168, 0.0386]])
t = 1 + t_1 + t_2 + t_3
else:
active_dimsM = [0, 1]
t = 1 + t_1
matern_ker = MaternKernel(nu=nu, active_dims=active_dimsM)
matern_term = matern_ker(x1, x2).to_dense()
actual = t * matern_term
res = kernel(x1, x2).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-4)
# test diagonal mode
res_diag = kernel(x1, x2, diag=True)
self.assertLess(torch.linalg.norm(res_diag - actual.diag()), 1e-4)
# make sure that we error out if last_dim_is_batch=True
with self.assertRaises(NotImplementedError):
kernel(x1, x2, diag=True, last_dim_is_batch=True)
def test_compute_linear_truncated_kernel_with_batch(self):
x1 = torch.tensor(
[[[1.0, 0.1, 0.2], [3.0, 0.3, 0.4]], [[5.0, 0.5, 0.6], [7.0, 0.7, 0.8]]]
)
x2 = torch.tensor(
[[[2.0, 0.8, 0.7], [4.0, 0.6, 0.5]], [[6.0, 0.4, 0.3], [8.0, 0.2, 0.1]]]
)
t_1 = torch.tensor(
[[[0.2736, 0.4400], [0.2304, 0.3600]], [[0.3304, 0.3816], [0.1736, 0.1944]]]
)
batch_shape = torch.Size([2])
for nu, fidelity_dims in itertools.product({0.5, 1.5, 2.5}, ([2], [1, 2])):
kernel = LinearTruncatedFidelityKernel(
fidelity_dims=fidelity_dims, dimension=3, nu=nu, batch_shape=batch_shape
)
kernel.power = 1
if len(fidelity_dims) > 1:
active_dimsM = [0]
t_2 = torch.tensor(
[
[[0.0527, 0.1670], [0.0383, 0.1159]],
[[0.1159, 0.1670], [0.0383, 0.0527]],
]
)
t_3 = torch.tensor(
[
[[0.1944, 0.3816], [0.1736, 0.3304]],
[[0.3600, 0.4400], [0.2304, 0.2736]],
]
)
t = 1 + t_1 + t_2 + t_3
else:
active_dimsM = [0, 1]
t = 1 + t_1
matern_ker = MaternKernel(
nu=nu, active_dims=active_dimsM, batch_shape=batch_shape
)
matern_term = matern_ker(x1, x2).to_dense()
actual = t * matern_term
res = kernel(x1, x2).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-4)
# test diagonal mode
res_diag = kernel(x1, x2, diag=True)
self.assertLess(
torch.linalg.norm(res_diag - torch.diagonal(actual, dim1=-1, dim2=-2)),
1e-4,
)
# make sure that we error out if last_dim_is_batch=True
with self.assertRaises(NotImplementedError):
kernel(x1, x2, diag=True, last_dim_is_batch=True)
def test_initialize_lengthscale_prior(self):
kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=3)
self.assertTrue(
isinstance(kernel.covar_module_unbiased.lengthscale_prior, GammaPrior)
)
self.assertTrue(
isinstance(kernel.covar_module_biased.lengthscale_prior, GammaPrior)
)
kernel2 = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2],
dimension=3,
lengthscale_prior_unbiased=NormalPrior(1, 1),
)
self.assertTrue(
isinstance(kernel2.covar_module_unbiased.lengthscale_prior, NormalPrior)
)
kernel2 = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2],
dimension=3,
lengthscale_prior_biased=NormalPrior(1, 1),
)
self.assertTrue(
isinstance(kernel2.covar_module_biased.lengthscale_prior, NormalPrior)
)
def test_initialize_power_prior(self):
kernel = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2], dimension=3, power_prior=NormalPrior(1, 1)
)
self.assertTrue(isinstance(kernel.power_prior, NormalPrior))
def test_initialize_power(self):
kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=3)
kernel.initialize(power=1)
actual_value = torch.tensor(1, dtype=torch.float).view_as(kernel.power)
self.assertLess(torch.linalg.norm(kernel.power - actual_value), 1e-5)
def test_initialize_power_batch(self):
kernel = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2], dimension=3, batch_shape=torch.Size([2])
)
power_init = torch.tensor([1, 2], dtype=torch.float)
kernel.initialize(power=power_init)
actual_value = power_init.view_as(kernel.power)
self.assertLess(torch.linalg.norm(kernel.power - actual_value), 1e-5)
def test_raise_init_errors(self):
with self.assertRaises(UnsupportedError):
LinearTruncatedFidelityKernel(fidelity_dims=[2])
with self.assertRaises(UnsupportedError):
LinearTruncatedFidelityKernel(fidelity_dims=[0, 1, 2], dimension=3)
with self.assertRaises(ValueError):
LinearTruncatedFidelityKernel(fidelity_dims=[2, 2], dimension=3)
with self.assertRaises(ValueError):
LinearTruncatedFidelityKernel(fidelity_dims=[2], dimension=2, nu=1)
def test_active_dims_list(self):
kernel = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2], dimension=10, active_dims=[0, 2, 4, 6]
)
x = self.create_data_no_batch()
covar_mat = kernel(x).evaluate_kernel().to_dense()
kernel_basic = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=4)
covar_mat_actual = kernel_basic(x[:, [0, 2, 4, 6]]).evaluate_kernel().to_dense()
self.assertLess(
torch.linalg.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(),
1e-4,
)
def test_active_dims_range(self):
active_dims = list(range(3, 9))
kernel = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2], dimension=10, active_dims=active_dims
)
x = self.create_data_no_batch()
covar_mat = kernel(x).evaluate_kernel().to_dense()
kernel_basic = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=6)
covar_mat_actual = kernel_basic(x[:, active_dims]).evaluate_kernel().to_dense()
self.assertLess(
torch.linalg.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(),
1e-4,
)
def test_error_on_fidelity_only(self):
x1 = torch.tensor([[0.1], [0.3]])
x2 = torch.tensor([[0.5], [0.7]])
kernel = LinearTruncatedFidelityKernel(fidelity_dims=[0], dimension=1, nu=2.5)
with self.assertRaises(RuntimeError):
kernel(x1, x2).to_dense()
def test_initialize_covar_module(self):
kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=3)
self.assertTrue(isinstance(kernel.covar_module_unbiased, MaternKernel))
self.assertTrue(isinstance(kernel.covar_module_biased, MaternKernel))
kernel.covar_module_unbiased = RBFKernel()
kernel.covar_module_biased = RBFKernel()
self.assertTrue(isinstance(kernel.covar_module_unbiased, RBFKernel))
self.assertTrue(isinstance(kernel.covar_module_biased, RBFKernel))
kernel2 = LinearTruncatedFidelityKernel(
fidelity_dims=[1, 2],
dimension=3,
covar_module_unbiased=RBFKernel(),
covar_module_biased=RBFKernel(),
)
self.assertTrue(isinstance(kernel2.covar_module_unbiased, RBFKernel))
self.assertTrue(isinstance(kernel2.covar_module_biased, RBFKernel))
def test_kernel_pickle_unpickle(self):
# This kernel uses priors by default, which cause this test to fail
pass
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.models.kernels.categorical import CategoricalKernel
from botorch.utils.testing import BotorchTestCase
from gpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestCategoricalKernel(BotorchTestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return CategoricalKernel(**kwargs)
def create_data_no_batch(self):
return torch.randint(3, size=(5, 10)).to(dtype=torch.float)
def create_data_single_batch(self):
return torch.randint(3, size=(2, 5, 3)).to(dtype=torch.float)
def create_data_double_batch(self):
return torch.randint(3, size=(3, 2, 5, 3)).to(dtype=torch.float)
def test_initialize_lengthscale(self):
kernel = CategoricalKernel()
kernel.initialize(lengthscale=1)
actual_value = torch.tensor(1.0).view_as(kernel.lengthscale)
self.assertLess(torch.linalg.norm(kernel.lengthscale - actual_value), 1e-5)
def test_initialize_lengthscale_batch(self):
kernel = CategoricalKernel(batch_shape=torch.Size([2]))
ls_init = torch.tensor([1.0, 2.0])
kernel.initialize(lengthscale=ls_init)
actual_value = ls_init.view_as(kernel.lengthscale)
self.assertLess(torch.linalg.norm(kernel.lengthscale - actual_value), 1e-5)
def test_forward(self):
x1 = torch.tensor([[4, 2], [3, 1], [8, 5], [7, 6]], dtype=torch.float)
x2 = torch.tensor([[4, 2], [3, 0], [4, 4]], dtype=torch.float)
lengthscale = 2
kernel = CategoricalKernel().initialize(lengthscale=lengthscale)
kernel.eval()
sc_dists = (x1.unsqueeze(-2) != x2.unsqueeze(-3)) / lengthscale
actual = torch.exp(-sc_dists.mean(-1))
res = kernel(x1, x2).to_dense()
self.assertAllClose(res, actual)
def test_active_dims(self):
x1 = torch.tensor([[4, 2], [3, 1], [8, 5], [7, 6]], dtype=torch.float)
x2 = torch.tensor([[4, 2], [3, 0], [4, 4]], dtype=torch.float)
lengthscale = 2
kernel = CategoricalKernel(active_dims=[0]).initialize(lengthscale=lengthscale)
kernel.eval()
dists = x1[:, :1].unsqueeze(-2) != x2[:, :1].unsqueeze(-3)
sc_dists = dists / lengthscale
actual = torch.exp(-sc_dists.mean(-1))
res = kernel(x1, x2).to_dense()
self.assertAllClose(res, actual)
def test_ard(self):
x1 = torch.tensor([[4, 2], [3, 1], [8, 5]], dtype=torch.float)
x2 = torch.tensor([[4, 2], [3, 0], [4, 4]], dtype=torch.float)
lengthscales = torch.tensor([1, 2], dtype=torch.float).view(1, 1, 2)
kernel = CategoricalKernel(ard_num_dims=2)
kernel.initialize(lengthscale=lengthscales)
kernel.eval()
sc_dists = x1.unsqueeze(-2) != x2.unsqueeze(-3)
sc_dists = sc_dists / lengthscales
actual = torch.exp(-sc_dists.mean(-1))
res = kernel(x1, x2).to_dense()
self.assertAllClose(res, actual)
# diag
res = kernel(x1, x2).diag()
actual = torch.diagonal(actual, dim1=-1, dim2=-2)
self.assertAllClose(res, actual)
# batch_dims
actual = torch.exp(-sc_dists).transpose(-1, -3)
res = kernel(x1, x2, last_dim_is_batch=True).to_dense()
self.assertAllClose(res, actual)
# batch_dims + diag
res = kernel(x1, x2, last_dim_is_batch=True).diag()
self.assertAllClose(res, torch.diagonal(actual, dim1=-1, dim2=-2))
def test_ard_batch(self):
x1 = torch.tensor(
[
[[4, 2, 1], [3, 1, 5]],
[[3, 2, 3], [6, 1, 7]],
],
dtype=torch.float,
)
x2 = torch.tensor([[[4, 2, 1], [6, 0, 0]]], dtype=torch.float)
lengthscales = torch.tensor([[[1, 2, 1]]], dtype=torch.float)
kernel = CategoricalKernel(batch_shape=torch.Size([2]), ard_num_dims=3)
kernel.initialize(lengthscale=lengthscales)
kernel.eval()
sc_dists = x1.unsqueeze(-2) != x2.unsqueeze(-3)
sc_dists = sc_dists / lengthscales.unsqueeze(-2)
actual = torch.exp(-sc_dists.mean(-1))
res = kernel(x1, x2).to_dense()
self.assertAllClose(res, actual)
def test_ard_separate_batch(self):
x1 = torch.tensor(
[
[[4, 2, 1], [3, 1, 5]],
[[3, 2, 3], [6, 1, 7]],
],
dtype=torch.float,
)
x2 = torch.tensor([[[4, 2, 1], [6, 0, 0]]], dtype=torch.float)
lengthscales = torch.tensor([[[1, 2, 1]], [[2, 1, 0.5]]], dtype=torch.float)
kernel = CategoricalKernel(batch_shape=torch.Size([2]), ard_num_dims=3)
kernel.initialize(lengthscale=lengthscales)
kernel.eval()
sc_dists = x1.unsqueeze(-2) != x2.unsqueeze(-3)
sc_dists = sc_dists / lengthscales.unsqueeze(-2)
actual = torch.exp(-sc_dists.mean(-1))
res = kernel(x1, x2).to_dense()
self.assertAllClose(res, actual)
# diag
res = kernel(x1, x2).diag()
actual = torch.diagonal(actual, dim1=-1, dim2=-2)
self.assertAllClose(res, actual)
# batch_dims
actual = torch.exp(-sc_dists).transpose(-1, -3)
res = kernel(x1, x2, last_dim_is_batch=True).to_dense()
self.assertAllClose(res, actual)
# batch_dims + diag
res = kernel(x1, x2, last_dim_is_batch=True).diag()
self.assertAllClose(res, torch.diagonal(actual, dim1=-1, dim2=-2))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.models.kernels.exponential_decay import ExponentialDecayKernel
from botorch.utils.testing import BotorchTestCase
from gpytorch.priors.torch_priors import GammaPrior, NormalPrior
from gpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestExponentialDecayKernel(BotorchTestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return ExponentialDecayKernel(**kwargs)
def test_subset_active_compute_exponential_decay_function(self):
a = torch.tensor([1.0, 2.0]).view(2, 1)
a_p = torch.tensor([3.0, 4.0]).view(2, 1)
a = torch.cat((a, a_p), 1)
b = torch.tensor([2.0, 4.0]).view(2, 1)
lengthscale = 1
power = 1
offset = 1
kernel = ExponentialDecayKernel(active_dims=[0])
kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
kernel.eval()
diff = torch.tensor([[4.0, 6.0], [5.0, 7.0]])
actual = offset + diff.pow(-power)
res = kernel(a, b).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_computes_exponential_decay_function(self):
a = torch.tensor([1.0, 2.0]).view(2, 1)
b = torch.tensor([2.0, 4.0]).view(2, 1)
lengthscale = 1
power = 1
offset = 1
kernel = ExponentialDecayKernel()
kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
kernel.eval()
diff = torch.tensor([[4.0, 6.0], [5.0, 7.0]])
actual = offset + torch.tensor([1.0]).div(diff.pow(power))
res = kernel(a, b).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_subset_active_exponential_decay_function_batch(self):
a = torch.tensor([[1.0, 0.0], [2.0, 0.0], [3.0, 0.0], [4.0, 0.0]]).view(2, 2, 2)
b = torch.tensor([[5.0, 6.0], [7.0, 8.0]]).view(2, 2, 1)
lengthscale = 1
power = 1
offset = 1
kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]), active_dims=[0])
kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
kernel.eval()
actual = torch.zeros(2, 2, 2)
diff = torch.tensor([[7.0, 8.0], [8.0, 9.0]])
actual[0, :, :] = offset + torch.tensor([1.0]).div(diff.pow(power))
diff = torch.tensor([[11.0, 12.0], [12.0, 13.0]])
actual[1, :, :] = offset + torch.tensor([1.0]).div(diff.pow(power))
res = kernel(a, b).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_computes_exponential_decay_function_batch(self):
a = torch.tensor([[1.0, 2.0], [3.0, 4.0]]).view(2, 2, 1)
b = torch.tensor([[5.0, 6.0], [7.0, 8.0]]).view(2, 2, 1)
lengthscale = 1
power = 1
offset = 1
kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
kernel.eval()
actual = torch.zeros(2, 2, 2)
diff = torch.tensor([[7.0, 8.0], [8.0, 9.0]])
actual[0, :, :] = offset + diff.pow(-power)
diff = torch.tensor([[11.0, 12.0], [12.0, 13.0]])
actual[1, :, :] = offset + diff.pow(-power)
res = kernel(a, b).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_initialize_lengthscale(self):
kernel = ExponentialDecayKernel()
kernel.initialize(lengthscale=1)
actual_value = torch.tensor(1.0).view_as(kernel.lengthscale)
self.assertLess(torch.linalg.norm(kernel.lengthscale - actual_value), 1e-5)
def test_initialize_lengthscale_batch(self):
kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
ls_init = torch.tensor([1.0, 2.0])
kernel.initialize(lengthscale=ls_init)
actual_value = ls_init.view_as(kernel.lengthscale)
self.assertLess(torch.linalg.norm(kernel.lengthscale - actual_value), 1e-5)
def test_initialize_offset(self):
kernel = ExponentialDecayKernel()
kernel.initialize(offset=1)
actual_value = torch.tensor(1.0).view_as(kernel.offset)
self.assertLess(torch.linalg.norm(kernel.offset - actual_value), 1e-5)
def test_initialize_offset_batch(self):
kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
off_init = torch.tensor([1.0, 2.0])
kernel.initialize(offset=off_init)
actual_value = off_init.view_as(kernel.offset)
self.assertLess(torch.linalg.norm(kernel.offset - actual_value), 1e-5)
def test_initialize_power(self):
kernel = ExponentialDecayKernel()
kernel.initialize(power=1)
actual_value = torch.tensor(1.0).view_as(kernel.power)
self.assertLess(torch.linalg.norm(kernel.power - actual_value), 1e-5)
def test_initialize_power_batch(self):
kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
power_init = torch.tensor([1.0, 2.0])
kernel.initialize(power=power_init)
actual_value = power_init.view_as(kernel.power)
self.assertLess(torch.linalg.norm(kernel.power - actual_value), 1e-5)
def test_initialize_power_prior(self):
kernel = ExponentialDecayKernel()
kernel.power_prior = NormalPrior(1, 1)
self.assertTrue(isinstance(kernel.power_prior, NormalPrior))
kernel2 = ExponentialDecayKernel(power_prior=GammaPrior(1, 1))
self.assertTrue(isinstance(kernel2.power_prior, GammaPrior))
def test_initialize_offset_prior(self):
kernel = ExponentialDecayKernel()
kernel.offset_prior = NormalPrior(1, 1)
self.assertTrue(isinstance(kernel.offset_prior, NormalPrior))
kernel2 = ExponentialDecayKernel(offset_prior=GammaPrior(1, 1))
self.assertTrue(isinstance(kernel2.offset_prior, GammaPrior))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.models.kernels.contextual_lcea import (
get_order,
get_permutation,
is_contiguous,
LCEAKernel,
)
from botorch.models.kernels.contextual_sac import SACKernel
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels.matern_kernel import MaternKernel
from torch import Tensor
from torch.nn import ModuleDict
class ContextualKernelTest(BotorchTestCase):
def test_SACKernel(self):
decomposition = {"1": [0, 3], "2": [1, 2]}
kernel = SACKernel(decomposition=decomposition, batch_shape=torch.Size([]))
self.assertIsInstance(kernel.kernel_dict, ModuleDict)
self.assertIsInstance(kernel.base_kernel, MaternKernel)
self.assertDictEqual(kernel.decomposition, decomposition)
# test diag works well for lazy tensor
x1 = torch.rand(5, 4)
x2 = torch.rand(5, 4)
res = kernel(x1, x2).to_dense()
res_diag = kernel(x1, x2, diag=True)
self.assertLess(torch.linalg.norm(res_diag - res.diag()), 1e-4)
# test raise of ValueError
with self.assertRaises(ValueError):
SACKernel(decomposition={"1": [0, 3], "2": [1]}, batch_shape=torch.Size([]))
def testLCEAKernel(self):
decomposition = {"1": [0, 3], "2": [1, 2]}
num_contexts = len(decomposition)
kernel = LCEAKernel(decomposition=decomposition, batch_shape=torch.Size([]))
# test init
self.assertListEqual(kernel.context_list, ["1", "2"])
self.assertIsInstance(kernel.base_kernel, MaternKernel)
self.assertIsInstance(kernel.task_covar_module, MaternKernel)
self.assertEqual(kernel.permutation, [0, 3, 1, 2])
# test raise of ValueError
with self.assertRaisesRegex(
ValueError, "The number of parameters needs to be same across all contexts."
):
LCEAKernel(
decomposition={"1": [0, 1], "2": [2]}, batch_shape=torch.Size([])
)
# test set_outputscale_list
kernel.initialize(outputscale_list=[0.5, 0.5])
actual_value = torch.tensor([0.5, 0.5]).view_as(kernel.outputscale_list)
self.assertLess(torch.linalg.norm(kernel.outputscale_list - actual_value), 1e-5)
self.assertTrue(kernel.train_embedding)
self.assertEqual(kernel.num_contexts, num_contexts)
self.assertEqual(kernel.n_embs, 1)
self.assertIsNone(kernel.context_emb_feature)
self.assertIsInstance(kernel.context_cat_feature, Tensor)
self.assertEqual(len(kernel.emb_layers), 1)
self.assertListEqual(kernel.emb_dims, [(num_contexts, 1)])
context_covar = kernel._eval_context_covar()
self.assertIsInstance(context_covar, Tensor)
self.assertEqual(context_covar.shape, torch.Size([num_contexts, num_contexts]))
embeddings = kernel._task_embeddings()
self.assertIsInstance(embeddings, Tensor)
self.assertEqual(embeddings.shape, torch.Size([num_contexts, 1]))
self.assertIsInstance(kernel.outputscale_list, Tensor)
self.assertEqual(kernel.outputscale_list.shape, torch.Size([num_contexts]))
# test diag works well for lazy tensor
num_obs, num_contexts, input_dim = 5, 2, 2
x1 = torch.rand(num_obs, num_contexts * input_dim)
x2 = torch.rand(num_obs, num_contexts * input_dim)
res = kernel(x1, x2).to_dense()
res_diag = kernel(x1, x2, diag=True)
self.assertAllClose(res_diag, res.diag(), atol=1e-4)
# test batch evaluation
batch_dim = 3
x1 = torch.rand(batch_dim, num_obs, num_contexts * input_dim)
x2 = torch.rand(batch_dim, num_obs, num_contexts * input_dim)
res = kernel(x1, x2).to_dense()
self.assertEqual(res.shape, torch.Size([batch_dim, num_obs, num_obs]))
# testing efficient `einsum` with naive `sum` implementation
context_covar = kernel._eval_context_covar()
if x1.dim() > context_covar.dim():
context_covar = context_covar.expand(
x1.shape[:-1] + torch.Size([x2.shape[-2]]) + context_covar.shape
)
base_covar_perm = kernel._eval_base_covar_perm(x1, x2)
expected_res = (context_covar * base_covar_perm).sum(dim=-2).sum(dim=-1)
self.assertAllClose(expected_res, res)
# diagonal batch evaluation
res_diag = kernel(x1, x2, diag=True).to_dense()
expected_res_diag = torch.diagonal(expected_res, dim1=-1, dim2=-2)
self.assertAllClose(expected_res_diag, res_diag)
# test input context_weight,
# test input embs_dim_list (one categorical feature)
# test input context_cat_feature
embs_dim_list = [2]
kernel2 = LCEAKernel(
decomposition=decomposition,
context_weight_dict={"1": 0.5, "2": 0.8},
cat_feature_dict={"1": [0], "2": [1]},
embs_dim_list=embs_dim_list, # increase dim from 1 to 2
batch_shape=torch.Size([]),
)
self.assertEqual(kernel2.num_contexts, num_contexts)
self.assertEqual(kernel2.n_embs, 2)
self.assertIsNone(kernel2.context_emb_feature)
self.assertIsInstance(kernel2.context_cat_feature, Tensor)
self.assertEqual(
kernel2.context_cat_feature.shape, torch.Size([num_contexts, 1])
)
self.assertEqual(len(kernel2.emb_layers), 1)
self.assertListEqual(kernel2.emb_dims, [(num_contexts, embs_dim_list[0])])
context_covar2 = kernel2._eval_context_covar()
self.assertIsInstance(context_covar2, Tensor)
self.assertEqual(context_covar2.shape, torch.Size([num_contexts, num_contexts]))
# test input pre-trained embedding
kernel3 = LCEAKernel(
decomposition=decomposition,
embs_feature_dict={"1": [0.2], "2": [0.5]},
batch_shape=torch.Size([]),
)
self.assertEqual(kernel3.num_contexts, num_contexts)
self.assertEqual(kernel3.n_embs, 2)
self.assertIsNotNone(kernel3.context_emb_feature)
self.assertIsInstance(kernel3.context_emb_feature, Tensor)
self.assertIsInstance(kernel3.context_cat_feature, Tensor)
self.assertEqual(
kernel3.context_cat_feature.shape, torch.Size([num_contexts, 1])
)
self.assertListEqual(kernel3.emb_dims, [(num_contexts, 1)])
embeddings3 = kernel3._task_embeddings()
self.assertEqual(embeddings3.shape, torch.Size([num_contexts, 2]))
# test only use pre-trained embedding
kernel4 = LCEAKernel(
decomposition=decomposition,
train_embedding=False,
embs_feature_dict={"1": [0.2], "2": [0.5]},
batch_shape=torch.Size([]),
)
self.assertEqual(kernel4.n_embs, 1)
self.assertIsNotNone(kernel4.context_emb_feature)
self.assertIsInstance(kernel4.context_emb_feature, Tensor)
self.assertIsInstance(kernel4.context_cat_feature, Tensor)
embeddings4 = kernel4._task_embeddings()
self.assertEqual(embeddings4.shape, torch.Size([num_contexts, 1]))
# test batch
kernel5 = LCEAKernel(decomposition=decomposition, batch_shape=torch.Size([3]))
self.assertEqual(kernel5.n_embs, 1) # one dim cat
self.assertListEqual(kernel5.emb_dims, [(num_contexts, 1)])
embeddings_batch = kernel5._task_embeddings_batch()
self.assertIsInstance(embeddings_batch, Tensor)
self.assertEqual(embeddings_batch.shape, torch.Size([3, num_contexts, 1]))
context_covar5 = kernel5._eval_context_covar()
self.assertIsInstance(context_covar5, Tensor)
self.assertEqual(
context_covar5.shape, torch.Size([3, num_contexts, num_contexts])
)
# test batch with pre-trained features
kernel6 = LCEAKernel(
decomposition=decomposition,
batch_shape=torch.Size([3]),
embs_feature_dict={"1": [0.2], "2": [0.5]},
)
self.assertEqual(kernel6.n_embs, 2) # one dim cat + one dim pre-train
self.assertListEqual(kernel6.emb_dims, [(num_contexts, 1)]) # one dim for cat
embeddings_batch = kernel6._task_embeddings_batch()
self.assertIsInstance(embeddings_batch, Tensor)
self.assertEqual(
embeddings_batch.shape, torch.Size([3, num_contexts, num_contexts])
)
context_covar6 = kernel6._eval_context_covar()
self.assertIsInstance(context_covar6, Tensor)
self.assertEqual(
context_covar6.shape, torch.Size([3, num_contexts, num_contexts])
)
def test_get_permutation(self):
decomp = {"a": [0, 1], "b": [2, 3]}
permutation = get_permutation(decomp)
self.assertIsNone(permutation)
# order mismatch
decomp = {"a": [1, 0], "b": [2, 3]}
permutation = get_permutation(decomp)
self.assertEqual(permutation, [0, 1, 2, 3])
# non-contiguous
decomp = {"a": [0, 2], "b": [1, 3]}
permutation = get_permutation(decomp)
self.assertEqual(permutation, [0, 2, 1, 3])
def test_is_contiguous(self):
self.assertFalse(is_contiguous([0, 2]))
self.assertTrue(is_contiguous([0, 1]))
def test_get_order(self):
self.assertEqual(get_order([1, 10, 3]), [1, 1, 0])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.models.kernels.orthogonal_additive_kernel import OrthogonalAdditiveKernel
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel
from gpytorch.lazy import LazyEvaluatedKernelTensor
from torch import nn, Tensor
class TestOrthogonalAdditiveKernel(BotorchTestCase):
def test_kernel(self):
n, d = 3, 5
dtypes = [torch.float, torch.double]
batch_shapes = [(), (2,), (7, 2)]
for dtype in dtypes:
tkwargs = {"dtype": dtype, "device": self.device}
for batch_shape in batch_shapes:
X = torch.rand(*batch_shape, n, d, **tkwargs)
base_kernel = MaternKernel().to(device=self.device)
oak = OrthogonalAdditiveKernel(
base_kernel,
dim=d,
second_order=False,
batch_shape=batch_shape,
**tkwargs,
)
KL = oak(X)
self.assertIsInstance(KL, LazyEvaluatedKernelTensor)
KM = KL.to_dense()
self.assertIsInstance(KM, Tensor)
self.assertEqual(KM.shape, (*batch_shape, n, n))
self.assertEqual(KM.dtype, dtype)
self.assertEqual(KM.device.type, self.device.type)
# symmetry
self.assertTrue(torch.allclose(KM, KM.transpose(-2, -1)))
# positivity
self.assertTrue(isposdef(KM))
# testing differentiability
X.requires_grad = True
oak(X).to_dense().sum().backward()
self.assertFalse(X.grad.isnan().any())
self.assertFalse(X.grad.isinf().any())
X_out_of_hypercube = torch.rand(n, d, **tkwargs) + 1
with self.assertRaisesRegex(ValueError, r"x1.*hypercube"):
oak(X_out_of_hypercube, X).to_dense()
with self.assertRaisesRegex(ValueError, r"x2.*hypercube"):
oak(X, X_out_of_hypercube).to_dense()
with self.assertRaisesRegex(UnsupportedError, "does not support"):
oak.forward(x1=X, x2=X, last_dim_is_batch=True)
oak_2nd = OrthogonalAdditiveKernel(
base_kernel,
dim=d,
second_order=True,
batch_shape=batch_shape,
**tkwargs,
)
KL2 = oak_2nd(X)
self.assertIsInstance(KL2, LazyEvaluatedKernelTensor)
KM2 = KL2.to_dense()
self.assertIsInstance(KM2, Tensor)
self.assertEqual(KM2.shape, (*batch_shape, n, n))
# symmetry
self.assertTrue(torch.allclose(KM2, KM2.transpose(-2, -1)))
# positivity
self.assertTrue(isposdef(KM2))
self.assertEqual(KM2.dtype, dtype)
self.assertEqual(KM2.device.type, self.device.type)
# testing second order coefficient matrices are upper-triangular
# and contain the transformed values in oak_2nd.raw_coeffs_2
oak_2nd.raw_coeffs_2 = nn.Parameter(
torch.randn_like(oak_2nd.raw_coeffs_2)
)
C2 = oak_2nd.coeffs_2
self.assertTrue(C2.shape == (*batch_shape, d, d))
self.assertTrue((C2.tril() == 0).all())
c2 = oak_2nd.coeff_constraint.transform(oak_2nd.raw_coeffs_2)
i, j = torch.triu_indices(d, d, offset=1)
self.assertTrue(torch.allclose(C2[..., i, j], c2))
# second order effects change the correlation structure
self.assertFalse(torch.allclose(KM, KM2))
# check orthogonality of base kernels
n_test = 7
# inputs on which to evaluate orthogonality
X_ortho = torch.rand(n_test, d, **tkwargs)
# d x quad_deg x quad_deg
K_ortho = oak._orthogonal_base_kernels(X_ortho, oak.z)
# NOTE: at each random test input x_i and for each dimension d,
# sum_j k_d(x_i, z_j) * w_j = 0.
# Note that this implies the GP mean will be orthogonal as well:
# mean(x) = sum_j k(x, x_j) alpha_j
# so
# sum_i mean(z_i) w_i
# = sum_j alpha_j (sum_i k(z_i, x_j) w_i) // exchanging summations order
# = sum_j alpha_j (0) // due to symmetry
# = 0
tol = 1e-5
self.assertTrue(((K_ortho @ oak.w).squeeze(-1) < tol).all())
def isposdef(A: Tensor) -> bool:
"""Determines whether A is positive definite or not, by attempting a Cholesky
decomposition. Expects batches of square matrices. Throws a RuntimeError otherwise.
"""
_, info = torch.linalg.cholesky_ex(A)
return not torch.any(info)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.models.kernels.downsampling import DownsamplingKernel
from botorch.utils.testing import BotorchTestCase
from gpytorch.priors.torch_priors import GammaPrior, NormalPrior
from gpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestDownsamplingKernel(BotorchTestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return DownsamplingKernel(**kwargs)
def create_data_no_batch(self):
return torch.rand(50, 1)
def create_data_single_batch(self):
return torch.rand(2, 3, 1)
def create_data_double_batch(self):
return torch.rand(3, 2, 50, 1)
def test_active_dims_list(self):
# this makes no sense for this kernel since d=1
pass
def test_active_dims_range(self):
# this makes no sense for this kernel since d=1
pass
def test_subset_active_compute_downsampling_function(self):
a = torch.tensor([0.1, 0.2]).view(2, 1)
a_p = torch.tensor([0.3, 0.4]).view(2, 1)
a = torch.cat((a, a_p), 1)
b = torch.tensor([0.2, 0.4]).view(2, 1)
power = 1
offset = 1
kernel = DownsamplingKernel(active_dims=[0])
kernel.initialize(power=power, offset=offset)
kernel.eval()
diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]])
actual = offset + diff.pow(1 + power)
res = kernel(a, b).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_computes_downsampling_function(self):
a = torch.tensor([0.1, 0.2]).view(2, 1)
b = torch.tensor([0.2, 0.4]).view(2, 1)
power = 1
offset = 1
kernel = DownsamplingKernel()
kernel.initialize(power=power, offset=offset)
kernel.eval()
diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]])
actual = offset + diff.pow(1 + power)
res = kernel(a, b).to_dense()
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_subset_computes_active_downsampling_function_batch(self):
a = torch.tensor([[0.1, 0.2, 0.2], [0.3, 0.4, 0.2], [0.5, 0.5, 0.5]]).view(
3, 3, 1
)
a_p = torch.tensor([[0.1, 0.2, 0.2], [0.3, 0.4, 0.2], [0.5, 0.5, 0.5]]).view(
3, 3, 1
)
a = torch.cat((a, a_p), 2)
b = torch.tensor([[0.5, 0.6, 0.1], [0.7, 0.8, 0.2], [0.6, 0.6, 0.5]]).view(
3, 3, 1
)
power = 1
offset = 1
kernel = DownsamplingKernel(batch_shape=torch.Size([3]), active_dims=[0])
kernel.initialize(power=power, offset=offset)
kernel.eval()
res = kernel(a, b).to_dense()
actual = torch.zeros(3, 3, 3)
diff = torch.tensor([[0.45, 0.36, 0.81], [0.4, 0.32, 0.72], [0.4, 0.32, 0.72]])
actual[0, :, :] = offset + diff.pow(1 + power)
diff = torch.tensor(
[[0.21, 0.14, 0.56], [0.18, 0.12, 0.48], [0.24, 0.16, 0.64]]
)
actual[1, :, :] = offset + diff.pow(1 + power)
diff = torch.tensor([[0.2, 0.2, 0.25], [0.2, 0.2, 0.25], [0.2, 0.2, 0.25]])
actual[2, :, :] = offset + diff.pow(1 + power)
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_computes_downsampling_function_batch(self):
a = torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]]).view(3, 2, 1)
b = torch.tensor([[0.5, 0.6], [0.7, 0.8], [0.6, 0.6]]).view(3, 2, 1)
power = 1
offset = 1
kernel = DownsamplingKernel(batch_shape=torch.Size([3]))
kernel.initialize(power=power, offset=offset)
kernel.eval()
res = kernel(a, b).to_dense()
actual = torch.zeros(3, 2, 2)
diff = torch.tensor([[0.45, 0.36], [0.4, 0.32]])
actual[0, :, :] = offset + diff.pow(1 + power)
diff = torch.tensor([[0.21, 0.14], [0.18, 0.12]])
actual[1, :, :] = offset + diff.pow(1 + power)
diff = torch.tensor([[0.2, 0.2], [0.2, 0.2]])
actual[2, :, :] = offset + diff.pow(1 + power)
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_initialize_offset(self):
kernel = DownsamplingKernel()
kernel.initialize(offset=1)
actual_value = torch.tensor(1.0).view_as(kernel.offset)
self.assertLess(torch.linalg.norm(kernel.offset - actual_value), 1e-5)
def test_initialize_offset_batch(self):
kernel = DownsamplingKernel(batch_shape=torch.Size([2]))
off_init = torch.tensor([1.0, 2.0])
kernel.initialize(offset=off_init)
actual_value = off_init.view_as(kernel.offset)
self.assertLess(torch.linalg.norm(kernel.offset - actual_value), 1e-5)
def test_initialize_power(self):
kernel = DownsamplingKernel()
kernel.initialize(power=1)
actual_value = torch.tensor(1.0).view_as(kernel.power)
self.assertLess(torch.linalg.norm(kernel.power - actual_value), 1e-5)
def test_initialize_power_batch(self):
kernel = DownsamplingKernel(batch_shape=torch.Size([2]))
power_init = torch.tensor([1.0, 2.0])
kernel.initialize(power=power_init)
actual_value = power_init.view_as(kernel.power)
self.assertLess(torch.linalg.norm(kernel.power - actual_value), 1e-5)
def test_last_dim_is_batch(self):
a = (
torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]])
.view(3, 2)
.transpose(-1, -2)
)
b = (
torch.tensor([[0.5, 0.6], [0.7, 0.8], [0.6, 0.6]])
.view(3, 2)
.transpose(-1, -2)
)
power = 1
offset = 1
kernel = DownsamplingKernel()
kernel.initialize(power=power, offset=offset)
kernel.eval()
res = kernel(a, b, last_dim_is_batch=True).to_dense()
actual = torch.zeros(3, 2, 2)
diff = torch.tensor([[0.45, 0.36], [0.4, 0.32]])
actual[0, :, :] = offset + diff.pow(1 + power)
diff = torch.tensor([[0.21, 0.14], [0.18, 0.12]])
actual[1, :, :] = offset + diff.pow(1 + power)
diff = torch.tensor([[0.2, 0.2], [0.2, 0.2]])
actual[2, :, :] = offset + diff.pow(1 + power)
self.assertLess(torch.linalg.norm(res - actual), 1e-5)
def test_diag_calculation(self):
a = torch.tensor([0.1, 0.2]).view(2, 1)
b = torch.tensor([0.2, 0.4]).view(2, 1)
power = 1
offset = 1
kernel = DownsamplingKernel()
kernel.initialize(power=power, offset=offset)
kernel.eval()
diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]])
actual = offset + diff.pow(1 + power)
res = kernel(a, b, diag=True)
self.assertLess(torch.linalg.norm(res - torch.diag(actual)), 1e-5)
def test_initialize_power_prior(self):
kernel = DownsamplingKernel()
kernel.power_prior = NormalPrior(1, 1)
self.assertTrue(isinstance(kernel.power_prior, NormalPrior))
kernel2 = DownsamplingKernel(power_prior=GammaPrior(1, 1))
self.assertTrue(isinstance(kernel2.power_prior, GammaPrior))
def test_initialize_offset_prior(self):
kernel = DownsamplingKernel()
kernel.offset_prior = NormalPrior(1, 1)
self.assertTrue(isinstance(kernel.offset_prior, NormalPrior))
kernel2 = DownsamplingKernel(offset_prior=GammaPrior(1, 1))
self.assertTrue(isinstance(kernel2.offset_prior, GammaPrior))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions import UnsupportedError
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.model import Model
from botorch.models.multitask import MultiTaskGP
from botorch.models.pairwise_gp import PairwiseGP
from botorch.models.utils.parse_training_data import parse_training_data
from botorch.utils.containers import SliceContainer
from botorch.utils.datasets import FixedNoiseDataset, RankingDataset, SupervisedDataset
from botorch.utils.testing import BotorchTestCase
from torch import cat, long, rand, Size, tensor
class TestParseTrainingData(BotorchTestCase):
def test_supervised(self):
with self.assertRaisesRegex(NotImplementedError, "Could not find signature"):
parse_training_data(Model, None)
dataset = SupervisedDataset(X=rand(3, 2), Y=rand(3, 1))
with self.assertRaisesRegex(NotImplementedError, "Could not find signature"):
parse_training_data(None, dataset)
parse = parse_training_data(Model, dataset)
self.assertIsInstance(parse, dict)
self.assertTrue(torch.equal(dataset.X, parse["train_X"]))
self.assertTrue(torch.equal(dataset.Y, parse["train_Y"]))
def test_fixedNoise(self):
# Test passing a `SupervisedDataset`
dataset = SupervisedDataset(X=rand(3, 2), Y=rand(3, 1))
parse = parse_training_data(FixedNoiseGP, dataset)
self.assertTrue("train_Yvar" not in parse)
self.assertTrue(torch.equal(dataset.X, parse["train_X"]))
self.assertTrue(torch.equal(dataset.Y, parse["train_Y"]))
# Test passing a `FixedNoiseDataset`
dataset = FixedNoiseDataset(X=rand(3, 2), Y=rand(3, 1), Yvar=rand(3, 1))
parse = parse_training_data(FixedNoiseGP, dataset)
self.assertTrue(torch.equal(dataset.X, parse["train_X"]))
self.assertTrue(torch.equal(dataset.Y, parse["train_Y"]))
self.assertTrue(torch.equal(dataset.Yvar, parse["train_Yvar"]))
def test_pairwiseGP_ranking(self):
# Test parsing Ranking Dataset for PairwiseGP
datapoints = rand(3, 2)
indices = tensor([[0, 1], [1, 2]], dtype=long)
event_shape = Size([2 * datapoints.shape[-1]])
dataset_X = SliceContainer(datapoints, indices, event_shape=event_shape)
dataset_Y = tensor([[0, 1], [1, 0]]).expand(indices.shape)
dataset = RankingDataset(X=dataset_X, Y=dataset_Y)
parse = parse_training_data(PairwiseGP, dataset)
self.assertTrue(dataset._X.values.equal(parse["datapoints"]))
comparisons = tensor([[0, 1], [2, 1]], dtype=long)
self.assertTrue(comparisons.equal(parse["comparisons"]))
def test_dict(self):
n = 3
m = 2
datasets = {i: SupervisedDataset(X=rand(n, 2), Y=rand(n, 1)) for i in range(m)}
parse_training_data(Model, {0: datasets[0]})
with self.assertRaisesRegex(UnsupportedError, "multiple datasets to single"):
parse_training_data(Model, datasets)
_datasets = datasets.copy()
_datasets[m] = SupervisedDataset(rand(n, 2), rand(n, 1), rand(n, 1))
with self.assertRaisesRegex(UnsupportedError, "Cannot combine .* hetero"):
parse_training_data(MultiTaskGP, _datasets)
with self.assertRaisesRegex(ValueError, "Missing required term"):
parse_training_data(MultiTaskGP, datasets, task_feature_container="foo")
with self.assertRaisesRegex(ValueError, "out-of-bounds"):
parse_training_data(MultiTaskGP, datasets, task_feature=-m - 2)
with self.assertRaisesRegex(ValueError, "out-of-bounds"):
parse_training_data(MultiTaskGP, datasets, task_feature=m + 1)
X = cat([dataset.X for dataset in datasets.values()])
Y = cat([dataset.Y for dataset in datasets.values()])
for i in (0, 1, 2):
parse = parse_training_data(MultiTaskGP, datasets, task_feature=i)
self.assertTrue(torch.equal(Y, parse["train_Y"]))
X2 = cat([parse["train_X"][..., :i], parse["train_X"][..., i + 1 :]], -1)
self.assertTrue(X.equal(X2))
for j, task_features in enumerate(parse["train_X"][..., i].split(n)):
self.assertTrue(task_features.eq(j).all())
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.models.approximate_gp import SingleTaskVariationalGP
from botorch.models.utils.inducing_point_allocators import (
_pivoted_cholesky_init,
ExpectedImprovementQualityFunction,
GreedyImprovementReduction,
GreedyVarianceReduction,
UnitQualityFunction,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import VariationalELBO
class TestUnitQualityFunction(BotorchTestCase):
def setUp(self):
super().setUp()
self.quality_function = UnitQualityFunction()
def test_returns_ones_and_correct_shape(self):
train_X = torch.rand(15, 1, device=self.device)
scores = self.quality_function(train_X)
self.assertTrue(torch.equal(scores, torch.ones([15], device=self.device)))
class TestExpectedImprovementQualityFunction(BotorchTestCase):
def setUp(self):
super().setUp()
train_X = torch.rand(10, 1, device=self.device)
train_y = torch.sin(train_X) + torch.randn_like(train_X) * 0.2
self.previous_model = SingleTaskVariationalGP(
train_X=train_X, likelihood=GaussianLikelihood()
).to(self.device)
mll = VariationalELBO(
self.previous_model.likelihood, self.previous_model.model, num_data=10
)
loss = -mll(
self.previous_model.likelihood(self.previous_model(train_X)), train_y
).sum()
loss.backward()
def test_returns_correct_shape(self):
train_X = torch.rand(15, 1, device=self.device)
for maximize in [True, False]:
quality_function = ExpectedImprovementQualityFunction(
self.previous_model, maximize=maximize
)
scores = quality_function(train_X)
self.assertEqual(scores.shape, torch.Size([15]))
def test_raises_for_multi_output_model(self):
train_X = torch.rand(15, 1, device=self.device)
mo_model = SingleTaskVariationalGP(
train_X=train_X, likelihood=GaussianLikelihood(), num_outputs=5
).to(self.device)
with self.assertRaises(NotImplementedError):
ExpectedImprovementQualityFunction(mo_model, maximize=True)
def test_different_for_maximize_and_minimize(self):
train_X = torch.rand(15, 1, device=self.device)
quality_function_for_max = ExpectedImprovementQualityFunction(
self.previous_model, maximize=True
)
scores_for_max = quality_function_for_max(train_X)
quality_function_for_min = ExpectedImprovementQualityFunction(
self.previous_model, maximize=False
)
scores_for_min = quality_function_for_min(train_X)
self.assertFalse(torch.equal(scores_for_min, scores_for_max))
def test_ei_calc_via_monte_carlo(self):
for maximize in [True, False]:
train_X = torch.rand(10, 1, device=self.device)
posterior = self.previous_model.posterior(train_X)
mean = posterior.mean.squeeze(-2).squeeze(-1)
sigma = posterior.variance.sqrt().view(mean.shape)
normal = torch.distributions.Normal(mean, sigma)
samples = normal.sample([1_000_000])
if maximize:
baseline = torch.min(mean)
ei = torch.clamp(samples - baseline, min=0.0).mean(axis=0)
else:
baseline = torch.max(mean)
ei = torch.clamp(baseline - samples, min=0.0).mean(axis=0)
quality_function = ExpectedImprovementQualityFunction(
self.previous_model, maximize
)
self.assertAllClose(ei, quality_function(train_X), atol=0.01, rtol=0.01)
class TestGreedyVarianceReduction(BotorchTestCase):
def setUp(self):
super().setUp()
self.ipa = GreedyVarianceReduction()
def test_initialization(self):
self.assertIsInstance(self.ipa, GreedyVarianceReduction)
def test_allocate_inducing_points_doesnt_leak(self) -> None:
"""
Run 'allocate_inducing_points' and check that all tensors allocated
in that function are garbabe-collected.
"""
def _get_n_tensors_tracked_by_gc() -> int:
gc.collect()
return sum(1 for elt in gc.get_objects() if isinstance(elt, torch.Tensor))
def f() -> None:
"""Construct and use a GreedyVarianceReduction allocator."""
x = torch.rand(7, 3).to(self.device)
kernel = ScaleKernel(MaternKernel())
allocator = GreedyVarianceReduction()
allocator.allocate_inducing_points(x, kernel, 4, x.shape[:-2])
n_tensors_before = _get_n_tensors_tracked_by_gc()
f()
n_tensors_after = _get_n_tensors_tracked_by_gc()
self.assertEqual(n_tensors_before, n_tensors_after)
def test_inducing_points_shape_and_repeatability(self):
for train_X in [
torch.rand(15, 1, device=self.device), # single task
torch.rand(2, 15, 1, device=self.device), # batched inputs
]:
inducing_points_1 = self.ipa.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=5,
input_batch_shape=torch.Size([]),
)
inducing_points_2 = self.ipa.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=5,
input_batch_shape=torch.Size([]),
)
if len(train_X) == 3: # batched inputs
self.assertEqual(inducing_points_1.shape, (2, 5, 1))
self.assertEqual(inducing_points_2.shape, (2, 5, 1))
else:
self.assertEqual(inducing_points_1.shape, (5, 1))
self.assertEqual(inducing_points_2.shape, (5, 1))
self.assertAllClose(inducing_points_1, inducing_points_2)
def test_that_we_dont_get_redundant_inducing_points(self):
train_X = torch.rand(15, 1, device=self.device)
stacked_train_X = torch.cat((train_X, train_X), dim=0)
num_inducing = 20
inducing_points_1 = self.ipa.allocate_inducing_points(
inputs=stacked_train_X,
covar_module=MaternKernel(),
num_inducing=num_inducing,
input_batch_shape=torch.Size([]),
)
# should not have 20 inducing points when 15 singular dimensions
# are passed
self.assertLess(inducing_points_1.shape[-2], num_inducing)
class TestGreedyImprovementReduction(BotorchTestCase):
def setUp(self):
super().setUp()
train_X = torch.rand(10, 1, device=self.device)
train_y = torch.sin(train_X) + torch.randn_like(train_X) * 0.2
self.previous_model = SingleTaskVariationalGP(
train_X=train_X, likelihood=GaussianLikelihood()
).to(self.device)
mll = VariationalELBO(
self.previous_model.likelihood, self.previous_model.model, num_data=10
)
loss = -mll(
self.previous_model.likelihood(self.previous_model(train_X)), train_y
).sum()
loss.backward()
self.ipa = GreedyImprovementReduction(self.previous_model, maximize=True)
def test_initialization(self):
self.assertIsInstance(self.ipa, GreedyImprovementReduction)
self.assertIsInstance(self.ipa._model, SingleTaskVariationalGP)
self.assertEqual(self.ipa._maximize, True)
def test_raises_for_multi_output_model(self):
train_X = torch.rand(10, 1, device=self.device)
model = SingleTaskVariationalGP(
train_X=train_X, likelihood=GaussianLikelihood(), num_outputs=5
).to(self.device)
ipa = GreedyImprovementReduction(model, maximize=True)
with self.assertRaises(NotImplementedError):
ipa.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=5,
input_batch_shape=torch.Size([]),
)
def test_inducing_points_shape_and_repeatability(self):
train_X = torch.rand(15, 1, device=self.device)
for train_X in [
torch.rand(15, 1, device=self.device), # single task
torch.rand(2, 15, 1, device=self.device), # batched inputs
]:
inducing_points_1 = self.ipa.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=5,
input_batch_shape=torch.Size([]),
)
inducing_points_2 = self.ipa.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=5,
input_batch_shape=torch.Size([]),
)
if len(train_X) == 3: # batched inputs
self.assertEqual(inducing_points_1.shape, (2, 5, 1))
self.assertEqual(inducing_points_2.shape, (2, 5, 1))
else:
self.assertEqual(inducing_points_1.shape, (5, 1))
self.assertEqual(inducing_points_2.shape, (5, 1))
self.assertAllClose(inducing_points_1, inducing_points_2)
def test_that_we_dont_get_redundant_inducing_points(self):
train_X = torch.rand(15, 1, device=self.device)
stacked_train_X = torch.cat((train_X, train_X), dim=0)
num_inducing = 20
inducing_points_1 = self.ipa.allocate_inducing_points(
inputs=stacked_train_X,
covar_module=MaternKernel(),
num_inducing=num_inducing,
input_batch_shape=torch.Size([]),
)
# should not have 20 inducing points when 15 singular dimensions
# are passed
self.assertLess(inducing_points_1.shape[-2], num_inducing)
def test_inducing_points_different_when_minimizing(self):
ipa_for_max = GreedyImprovementReduction(self.previous_model, maximize=True)
ipa_for_min = GreedyImprovementReduction(self.previous_model, maximize=False)
train_X = torch.rand(15, 1, device=self.device)
inducing_points_for_max = ipa_for_max.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=10,
input_batch_shape=torch.Size([]),
)
inducing_points_for_min = ipa_for_min.allocate_inducing_points(
inputs=train_X,
covar_module=MaternKernel(),
num_inducing=10,
input_batch_shape=torch.Size([]),
)
self.assertFalse(torch.equal(inducing_points_for_min, inducing_points_for_max))
class TestPivotedCholeskyInit(BotorchTestCase):
def test_raises_for_quality_function_with_invalid_shape(self):
inputs = torch.rand(15, 1, device=self.device)
with torch.no_grad():
train_train_kernel = (
MaternKernel().to(self.device)(inputs).evaluate_kernel()
)
quality_scores = torch.ones([10, 1], device=self.device)
with self.assertRaisesRegex(ValueError, ".*requires a quality score"):
_pivoted_cholesky_init(
train_inputs=inputs,
kernel_matrix=train_train_kernel,
max_length=10,
quality_scores=quality_scores,
)
def test_raises_for_kernel_with_grad(self) -> None:
inputs = torch.rand(15, 1, device=self.device)
train_train_kernel = MaternKernel().to(self.device)(inputs).evaluate_kernel()
quality_scores = torch.ones(15, device=self.device)
with self.assertRaisesRegex(
UnsupportedError,
"`_pivoted_cholesky_init` does not support using a `kernel_matrix` "
"with `requires_grad=True`.",
):
_pivoted_cholesky_init(
train_inputs=inputs,
kernel_matrix=train_train_kernel,
max_length=10,
quality_scores=quality_scores,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from botorch import settings
from botorch.exceptions import InputDataError, InputDataWarning
from botorch.models.utils import (
add_output_dim,
check_min_max_scaling,
check_no_nans,
check_standardization,
fantasize,
gpt_posterior_settings,
multioutput_to_batch_mode_transform,
validate_input_scaling,
)
from botorch.models.utils.assorted import consolidate_duplicates, detect_duplicates
from botorch.utils.testing import BotorchTestCase
from gpytorch import settings as gpt_settings
class TestMultiOutputToBatchModeTransform(BotorchTestCase):
def test_multioutput_to_batch_mode_transform(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
n = 3
num_outputs = 2
train_X = torch.rand(n, 1, **tkwargs)
train_Y = torch.rand(n, num_outputs, **tkwargs)
train_Yvar = torch.rand(n, num_outputs, **tkwargs)
X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
train_X=train_X,
train_Y=train_Y,
num_outputs=num_outputs,
train_Yvar=train_Yvar,
)
expected_X_out = train_X.unsqueeze(0).expand(num_outputs, -1, 1)
self.assertTrue(torch.equal(X_out, expected_X_out))
self.assertTrue(torch.equal(Y_out, train_Y.transpose(0, 1)))
self.assertTrue(torch.equal(Yvar_out, train_Yvar.transpose(0, 1)))
class TestAddOutputDim(BotorchTestCase):
def test_add_output_dim(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
original_batch_shape = torch.Size([2])
# check exception is raised when trailing batch dims do not line up
X = torch.rand(2, 3, 2, 1, **tkwargs)
with self.assertRaises(RuntimeError):
add_output_dim(X=X, original_batch_shape=original_batch_shape)
# test no new batch dims
X = torch.rand(2, 2, 1, **tkwargs)
X_out, output_dim_idx = add_output_dim(
X=X, original_batch_shape=original_batch_shape
)
self.assertTrue(torch.equal(X_out, X.unsqueeze(1)))
self.assertEqual(output_dim_idx, 1)
# test new batch dims
X = torch.rand(3, 2, 2, 1, **tkwargs)
X_out, output_dim_idx = add_output_dim(
X=X, original_batch_shape=original_batch_shape
)
self.assertTrue(torch.equal(X_out, X.unsqueeze(2)))
self.assertEqual(output_dim_idx, 2)
class TestInputDataChecks(BotorchTestCase):
def setUp(self) -> None:
# The super class usually disables input data warnings in unit tests.
# Don't do that here.
super().setUp(suppress_input_warnings=False)
def test_check_no_nans(self):
check_no_nans(torch.tensor([1.0, 2.0]))
with self.assertRaises(InputDataError):
check_no_nans(torch.tensor([1.0, float("nan")]))
def test_check_min_max_scaling(self):
with settings.debug(True):
# check unscaled input in unit cube
X = 0.1 + 0.8 * torch.rand(4, 2, 3)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_min_max_scaling(X=X, raise_on_fail=True)
with self.assertWarnsRegex(
expected_warning=InputDataWarning, expected_regex="not scaled"
):
check_min_max_scaling(X=X, strict=True)
with self.assertRaises(InputDataError):
check_min_max_scaling(X=X, strict=True, raise_on_fail=True)
# check proper input
Xmin, Xmax = X.min(dim=-1, keepdim=True)[0], X.max(dim=-1, keepdim=True)[0]
Xstd = (X - Xmin) / (Xmax - Xmin)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=Xstd)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_min_max_scaling(X=Xstd, raise_on_fail=True)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=Xstd, strict=True)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_min_max_scaling(X=Xstd, strict=True, raise_on_fail=True)
# check violation
X[0, 0, 0] = 2
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not contained" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_min_max_scaling(X=X, raise_on_fail=True)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X, strict=True)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not contained" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_min_max_scaling(X=X, strict=True, raise_on_fail=True)
# check ignore_dims
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X, ignore_dims=[0])
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
def test_check_standardization(self):
# Ensure that it is not filtered out.
warnings.filterwarnings("always", category=InputDataWarning)
Y = torch.randn(3, 4, 2)
# check standardized input
Yst = (Y - Y.mean(dim=-2, keepdim=True)) / Y.std(dim=-2, keepdim=True)
with warnings.catch_warnings(record=True) as ws:
check_standardization(Y=Yst)
self.assertFalse(any(issubclass(w.category, InputDataWarning) for w in ws))
check_standardization(Y=Yst, raise_on_fail=True)
# check nonzero mean
with warnings.catch_warnings(record=True) as ws:
check_standardization(Y=Yst + 1)
self.assertTrue(any(issubclass(w.category, InputDataWarning) for w in ws))
self.assertTrue(any("not standardized" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_standardization(Y=Yst + 1, raise_on_fail=True)
# check non-unit variance
with warnings.catch_warnings(record=True) as ws:
check_standardization(Y=Yst * 2)
self.assertTrue(any(issubclass(w.category, InputDataWarning) for w in ws))
self.assertTrue(any("not standardized" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_standardization(Y=Yst * 2, raise_on_fail=True)
def test_validate_input_scaling(self):
train_X = 2 + torch.rand(3, 4, 3)
train_Y = torch.randn(3, 4, 2)
# check that nothing is being checked
with settings.validate_input_scaling(False), settings.debug(True):
with warnings.catch_warnings(record=True) as ws:
validate_input_scaling(train_X=train_X, train_Y=train_Y)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
# check that warnings are being issued
with settings.debug(True), warnings.catch_warnings(record=True) as ws:
validate_input_scaling(train_X=train_X, train_Y=train_Y)
self.assertTrue(any(issubclass(w.category, InputDataWarning) for w in ws))
# check that errors are raised when requested
with settings.debug(True):
with self.assertRaises(InputDataError):
validate_input_scaling(
train_X=train_X, train_Y=train_Y, raise_on_fail=True
)
# check that no errors are being raised if everything is standardized
train_X_min = train_X.min(dim=-1, keepdim=True)[0]
train_X_max = train_X.max(dim=-1, keepdim=True)[0]
train_X_std = (train_X - train_X_min) / (train_X_max - train_X_min)
train_Y_std = (train_Y - train_Y.mean(dim=-2, keepdim=True)) / train_Y.std(
dim=-2, keepdim=True
)
with settings.debug(True), warnings.catch_warnings(record=True) as ws:
validate_input_scaling(train_X=train_X_std, train_Y=train_Y_std)
self.assertFalse(any(issubclass(w.category, InputDataWarning) for w in ws))
# test that negative variances raise an error
train_Yvar = torch.rand_like(train_Y_std)
train_Yvar[0, 0, 1] = -0.5
with settings.debug(True):
with self.assertRaises(InputDataError):
validate_input_scaling(
train_X=train_X_std, train_Y=train_Y_std, train_Yvar=train_Yvar
)
# check that NaNs raise errors
train_X_std[0, 0, 0] = float("nan")
with settings.debug(True):
with self.assertRaises(InputDataError):
validate_input_scaling(train_X=train_X_std, train_Y=train_Y_std)
class TestGPTPosteriorSettings(BotorchTestCase):
def test_gpt_posterior_settings(self):
for propagate_grads in (False, True):
with settings.propagate_grads(propagate_grads):
with gpt_posterior_settings():
self.assertTrue(gpt_settings.debug.off())
self.assertTrue(gpt_settings.fast_pred_var.on())
if settings.propagate_grads.off():
self.assertTrue(gpt_settings.detach_test_caches.on())
else:
self.assertTrue(gpt_settings.detach_test_caches.off())
class TestFantasize(BotorchTestCase):
def test_fantasize(self):
self.assertFalse(fantasize.on())
self.assertTrue(fantasize.off())
with fantasize():
self.assertTrue(fantasize.on())
self.assertFalse(fantasize.off())
with fantasize(False):
self.assertFalse(fantasize.on())
self.assertTrue(fantasize.off())
class TestConsolidation(BotorchTestCase):
def test_consolidation(self):
X = torch.tensor(
[
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[1.0, 2.0, 3.0],
[3.0, 4.0, 5.0],
]
)
Y = torch.tensor([[0, 1], [2, 3]])
expected_X = torch.tensor(
[
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0],
]
)
expected_Y = torch.tensor([[0, 1], [0, 2]])
expected_new_indices = torch.tensor([0, 1, 0, 2])
# deduped case
consolidated_X, consolidated_Y, new_indices = consolidate_duplicates(X=X, Y=Y)
self.assertTrue(torch.equal(consolidated_X, expected_X))
self.assertTrue(torch.equal(consolidated_Y, expected_Y))
self.assertTrue(torch.equal(new_indices, expected_new_indices))
# test rtol
big_X = torch.tensor(
[
[10000.0, 20000.0, 30000.0],
[20000.0, 30000.0, 40000.0],
[10000.0, 20000.0, 30001.0],
[30000.0, 40000.0, 50000.0],
]
)
expected_big_X = torch.tensor(
[
[10000.0, 20000.0, 30000.0],
[20000.0, 30000.0, 40000.0],
[30000.0, 40000.0, 50000.0],
]
)
# rtol is not used by default
consolidated_X, consolidated_Y, new_indices = consolidate_duplicates(
X=big_X, Y=Y
)
self.assertTrue(torch.equal(consolidated_X, big_X))
self.assertTrue(torch.equal(consolidated_Y, Y))
self.assertTrue(torch.equal(new_indices, torch.tensor([0, 1, 2, 3])))
# when rtol is used
consolidated_X, consolidated_Y, new_indices = consolidate_duplicates(
X=big_X, Y=Y, rtol=1e-4, atol=0
)
self.assertTrue(torch.equal(consolidated_X, expected_big_X))
self.assertTrue(torch.equal(consolidated_Y, expected_Y))
self.assertTrue(torch.equal(new_indices, expected_new_indices))
# not deduped case
no_dup_X = torch.tensor(
[
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0],
[4.0, 5.0, 6.0],
]
)
consolidated_X, consolidated_Y, new_indices = consolidate_duplicates(
X=no_dup_X, Y=Y
)
self.assertTrue(torch.equal(consolidated_X, no_dup_X))
self.assertTrue(torch.equal(consolidated_Y, Y))
self.assertTrue(torch.equal(new_indices, torch.tensor([0, 1, 2, 3])))
# test batch shape
with self.assertRaises(ValueError):
consolidate_duplicates(X=X.repeat(2, 1, 1), Y=Y.repeat(2, 1, 1))
with self.assertRaises(ValueError):
detect_duplicates(X=X.repeat(2, 1, 1))
# test chain link edge case
close_X = torch.tensor(
[
[1.0, 2.0, 3.0],
[1.0, 2.0, 3.4],
[1.0, 2.0, 3.8],
[1.0, 2.0, 4.2],
]
)
consolidated_X, consolidated_Y, new_indices = consolidate_duplicates(
X=close_X, Y=Y, rtol=0, atol=0.5
)
self.assertTrue(torch.equal(consolidated_X, close_X))
self.assertTrue(torch.equal(consolidated_Y, Y))
self.assertTrue(torch.equal(new_indices, torch.tensor([0, 1, 2, 3])))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.models.utils.gpytorch_modules import (
get_gaussian_likelihood_with_gamma_prior,
get_matern_kernel_with_gamma_prior,
MIN_INFERRED_NOISE_LEVEL,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch.constraints.constraints import GreaterThan
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood
from gpytorch.priors.torch_priors import GammaPrior
class TestGPyTorchModules(BotorchTestCase):
def test_get_matern_kernel_with_gamma_prior(self):
for batch_shape in (None, torch.Size([2])):
kernel = get_matern_kernel_with_gamma_prior(
ard_num_dims=2, batch_shape=batch_shape
)
self.assertIsInstance(kernel, ScaleKernel)
self.assertEqual(kernel.batch_shape, batch_shape or torch.Size([]))
prior = kernel.outputscale_prior
self.assertIsInstance(prior, GammaPrior)
self.assertAllClose(prior.concentration.item(), 2.0)
self.assertAllClose(prior.rate.item(), 0.15)
base_kernel = kernel.base_kernel
self.assertIsInstance(base_kernel, MaternKernel)
self.assertEqual(base_kernel.batch_shape, batch_shape or torch.Size([]))
self.assertEqual(base_kernel.ard_num_dims, 2)
prior = base_kernel.lengthscale_prior
self.assertIsInstance(prior, GammaPrior)
self.assertAllClose(prior.concentration.item(), 3.0)
self.assertAllClose(prior.rate.item(), 6.0)
def test_get_gaussian_likelihood_with_gamma_prior(self):
for batch_shape in (None, torch.Size([2])):
likelihood = get_gaussian_likelihood_with_gamma_prior(
batch_shape=batch_shape
)
self.assertIsInstance(likelihood, GaussianLikelihood)
expected_shape = (batch_shape or torch.Size([])) + (1,)
self.assertEqual(likelihood.raw_noise.shape, expected_shape)
prior = likelihood.noise_covar.noise_prior
self.assertIsInstance(prior, GammaPrior)
self.assertAllClose(prior.concentration.item(), 1.1)
self.assertAllClose(prior.rate.item(), 0.05)
constraint = likelihood.noise_covar.raw_noise_constraint
self.assertIsInstance(constraint, GreaterThan)
self.assertAllClose(constraint.lower_bound.item(), MIN_INFERRED_NOISE_LEVEL)
self.assertIsNone(constraint._transform)
self.assertAllClose(constraint.initial_value.item(), 2.0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.