python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import argparse
import re
def patch_config(
config_file: str, base_url: str = None, disable_algolia: bool = True
) -> None:
config = open(config_file, "r").read()
if base_url is not None:
config = re.sub("baseUrl = '/';", "baseUrl = '{}';".format(base_url), config)
if disable_algolia is True:
config = re.sub(
"const includeAlgolia = true;", "const includeAlgolia = false;", config
)
with open(config_file, "w") as outfile:
outfile.write(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Path Docusaurus siteConfig.js file when building site."
)
parser.add_argument(
"-f",
"--config_file",
metavar="path",
required=True,
help="Path to configuration file.",
)
parser.add_argument(
"-b",
"--base_url",
type=str,
required=False,
help="Value for baseUrl.",
default=None,
)
parser.add_argument(
"--disable_algolia",
required=False,
action="store_true",
help="Disable algolia.",
)
args = parser.parse_args()
patch_config(args.config_file, args.base_url, args.disable_algolia)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import argparse
import os
import pkgutil
import re
from typing import Set
# Paths are relative to top-level botorch directory (passed as arg below)
SPHINX_RST_PATH = os.path.join("sphinx", "source")
BOTORCH_LIBRARY_PATH = "botorch"
# Regex for automodule directive used in Sphinx docs
AUTOMODULE_REGEX = re.compile(r"\.\. automodule:: ([\.\w]*)")
# The top-level modules in botorch not to be validated
EXCLUDED_MODULES = {"version"}
def parse_rst(rst_filename: str) -> Set[str]:
"""Extract automodule directives from rst."""
ret = set()
with open(rst_filename, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
name = AUTOMODULE_REGEX.findall(line)
if name:
ret.add(name[0])
return ret
def validate_complete_sphinx(path_to_botorch: str) -> None:
"""Validate that Sphinx-based API documentation is complete.
- Every top-level module (e.g., acquisition, models, etc.) should have a
corresponding .rst sphix source file in sphinx/source.
- Every single non-package (i.e. py file) module should be included in an
.rst file `automodule::` directive. Sphinx will then automatically
include all members from the module in the documentation.
Note: this function does not validate any documentation, only its presence.
Args:
path_to_botorch: the path to the top-level botorch directory (directory
that includes botorch library, sphinx, website, etc.).
"""
# Load top-level modules used in botorch (e.g., acquisition, models)
# Exclude auxiliary packages
modules = {
modname
for importer, modname, ispkg in pkgutil.walk_packages(
path=[BOTORCH_LIBRARY_PATH], onerror=lambda x: None
)
if modname not in EXCLUDED_MODULES
}
# Load all rst files (these contain the documentation for Sphinx)
rstpath = os.path.join(path_to_botorch, SPHINX_RST_PATH)
rsts = {f.replace(".rst", "") for f in os.listdir(rstpath) if f.endswith(".rst")}
# Verify that all top-level modules have a corresponding rst
missing_rsts = modules.difference(rsts)
if not len(missing_rsts) == 0:
raise RuntimeError(
f"""Not all modules have corresponding rst:
{missing_rsts}
Please add them to the appropriate rst file in {SPHINX_RST_PATH}.
"""
)
# Track all modules that are not in docs (so can print all)
modules_not_in_docs = []
# Iterate over top-level modules
for module in modules.intersection(rsts):
# Parse rst & extract all modules use automodule directive
modules_in_rst = parse_rst(os.path.join(rstpath, module + ".rst"))
# Extract all non-package modules
for _importer, modname, ispkg in pkgutil.walk_packages(
path=[
os.path.join(BOTORCH_LIBRARY_PATH, module)
], # botorch.__path__[0], module),
prefix="botorch." + module + ".",
onerror=lambda x: None,
):
if not ispkg and ".tests" not in modname and modname not in modules_in_rst:
modules_not_in_docs.append(modname)
if not len(modules_not_in_docs) == 0:
raise RuntimeError(f"Not all modules are documented: {modules_not_in_docs}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Validate that Sphinx documentation is complete."
)
parser.add_argument(
"-p",
"--path",
metavar="path",
required=True,
help="Path to the top-level botorch directory.",
)
args = parser.parse_args()
validate_complete_sphinx(args.path)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
LOG_LEVEL_DEFAULT = logging.CRITICAL
def _get_logger(
name: str = "botorch", level: int = LOG_LEVEL_DEFAULT
) -> logging.Logger:
"""Gets a default botorch logger
Logging level can be tuned via botorch.setting.log_level
Args:
name: Name for logger instance
level: Logging threshhold for the given logger. Logs of greater or
equal severity will be printed to STDERR
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# Add timestamps to log messages.
console = logging.StreamHandler()
formatter = logging.Formatter(
fmt="[%(levelname)s %(asctime)s] %(name)s: %(message)s",
datefmt="%m-%d %H:%M:%S",
)
console.setFormatter(formatter)
logger.addHandler(console)
logger.propagate = False
return logger
def shape_to_str(shape: torch.Size) -> str:
return f"`{' x '.join(str(i) for i in shape)}`"
logger = _get_logger()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Model fitting routines."""
from __future__ import annotations
import logging
from contextlib import nullcontext
from functools import partial
from itertools import filterfalse
from typing import Any, Callable, Dict, Iterable, Optional, Sequence, Tuple, Type, Union
from warnings import catch_warnings, simplefilter, warn, warn_explicit, WarningMessage
from botorch.exceptions.errors import ModelFittingError, UnsupportedError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models.approximate_gp import ApproximateGPyTorchModel
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.optim.closures import get_loss_closure_with_grads
from botorch.optim.core import _LBFGSB_MAXITER_MAXFUN_REGEX
from botorch.optim.fit import fit_gpytorch_mll_scipy, fit_gpytorch_mll_torch
from botorch.optim.utils import (
_warning_handler_template,
get_parameters,
sample_all_priors,
)
from botorch.settings import debug
from botorch.utils.context_managers import (
module_rollback_ctx,
parameter_rollback_ctx,
requires_grad_ctx,
TensorCheckpoint,
)
from botorch.utils.dispatcher import Dispatcher, type_bypassing_encoder
from gpytorch.likelihoods import Likelihood
from gpytorch.mlls._approximate_mll import _ApproximateMarginalLogLikelihood
from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood
from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
from linear_operator.utils.errors import NotPSDError
from pyro.infer.mcmc import MCMC, NUTS
from torch import device, Tensor
from torch.nn import Parameter
from torch.utils.data import DataLoader
def _debug_warn(w: WarningMessage) -> bool:
if _LBFGSB_MAXITER_MAXFUN_REGEX.search(str(w.message)):
return True
# TODO: Better handle cases where warning handling logic
# affects both debug and rethrow functions.
return False
def _rethrow_warn(w: WarningMessage) -> bool:
if not issubclass(w.category, OptimizationWarning):
return True
if "Optimization timed out after" in str(w.message):
return True
return False
DEFAULT_WARNING_HANDLER = partial(
_warning_handler_template,
debug=_debug_warn,
rethrow=_rethrow_warn,
)
FitGPyTorchMLL = Dispatcher("fit_gpytorch_mll", encoder=type_bypassing_encoder)
def fit_gpytorch_mll(
mll: MarginalLogLikelihood,
closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None,
optimizer: Optional[Callable] = None,
closure_kwargs: Optional[Dict[str, Any]] = None,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> MarginalLogLikelihood:
r"""Clearing house for fitting models passed as GPyTorch MarginalLogLikelihoods.
Args:
mll: A GPyTorch MarginalLogLikelihood instance.
closure: Forward-backward closure for obtaining objective values and gradients.
Responsible for setting parameters' `grad` attributes. If no closure is
provided, one will be obtained by calling `get_loss_closure_with_grads`.
optimizer: User specified optimization algorithm. When `optimizer is None`,
this keyword argument is omitted when calling the dispatcher.
closure_kwargs: Keyword arguments passed when calling `closure`.
optimizer_kwargs: A dictionary of keyword arguments passed when
calling `optimizer`.
**kwargs: Keyword arguments passed down through the dispatcher to
fit subroutines. Unexpected keywords are ignored.
Returns:
The `mll` instance. If fitting succeeded, then `mll` will be in evaluation mode,
i.e. `mll.training == False`. Otherwise, `mll` will be in training mode.
"""
if optimizer is not None: # defer to per-method defaults
kwargs["optimizer"] = optimizer
return FitGPyTorchMLL(
mll,
type(mll.likelihood),
type(mll.model),
closure=closure,
closure_kwargs=closure_kwargs,
optimizer_kwargs=optimizer_kwargs,
**kwargs,
)
def fit_gpytorch_model(
mll: MarginalLogLikelihood,
optimizer: Optional[Callable] = None,
optimizer_kwargs: Optional[dict] = None,
exclude: Optional[Iterable[str]] = None,
max_retries: Optional[int] = None,
**kwargs: Any,
) -> MarginalLogLikelihood:
r"""Convenience method for fitting GPyTorch models using legacy API. For more
details, see `fit_gpytorch_mll`.
Args:
mll: A GPyTorch MarginalLogLikelihood instance.
optimizer: User specified optimization algorithm. When `optimizer is None`,
this keyword argument is omitted when calling the dispatcher from inside
`fit_gpytorch_mll`.
optimizer_kwargs: Keyword arguments passed to `optimizer`.
exclude: Legacy argument for specifying parameters `x` that should be held fixed
during optimization. Internally, used to temporarily set `x.requires_grad`
to False.
max_retries: Legacy name for `max_attempts`. When `max_retries is None`,
this keyword argument is omitted when calling `fit_gpytorch_mll`.
"""
warn(
"`fit_gpytorch_model` is marked for deprecation, consider using "
"`fit_gpytorch_mll` instead.",
DeprecationWarning,
)
if max_retries is not None:
kwargs["max_attempts"] = max_retries
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
for key in ("bounds", "options"):
if key not in kwargs:
continue
val = kwargs.pop(key)
if key in optimizer_kwargs and val is not optimizer_kwargs[key]:
raise SyntaxError(f"keyword argument repeated: {key}")
optimizer_kwargs[key] = val
with (
nullcontext()
if exclude is None
else requires_grad_ctx(mll, assignments={name: False for name in exclude})
):
try:
mll = fit_gpytorch_mll(
mll,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
**kwargs,
)
except ModelFittingError as err:
warn(str(err), RuntimeWarning)
return mll
@FitGPyTorchMLL.register(MarginalLogLikelihood, object, object)
def _fit_fallback(
mll: MarginalLogLikelihood,
_: Type[object],
__: Type[object],
*,
closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None,
optimizer: Optional[Callable] = fit_gpytorch_mll_scipy,
closure_kwargs: Optional[Dict[str, Any]] = None,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
max_attempts: int = 5,
warning_handler: Callable[[WarningMessage], bool] = DEFAULT_WARNING_HANDLER,
caught_exception_types: Tuple[Type[BaseException], ...] = (NotPSDError,),
**ignore: Any,
) -> MarginalLogLikelihood:
r"""Generic fallback method for fitting Gaussian processes.
Attempts to fit a model using the provided optimizer, then determines whether or
not to retry by evaluating a given policy on emitted warning messages. The first
attempt is run using the initialized parameter values; subsequent attempts begin
by resampling tunable parameters.
Args:
closure: Forward-backward closure for obtaining objective values and gradients.
Responsible for setting parameters' `grad` attributes. If no closure is
provided, one will be obtained by calling `get_loss_closure_with_grads`.
optimizer: The underlying optimization algorithm to run.
closure_kwargs: Keyword arguments passed to `closure`.
optimizer_kwargs: Keyword arguments passed to `optimizer`.
max_attempts: The maximum number of fit attempts allowed. The attempt budget
is NOT shared between calls to this method.
warning_handler: A function used to filter warnings produced when calling
`optimizer`. Any unfiltered warnings (those for which `warning_handler`
returns `False`) will be rethrown and trigger a model fitting retry.
caught_exception_types: A tuple of exception types whose instances should
be redirected to `logging.DEBUG`.
**ignore: This function ignores unrecognized keyword arguments.
Returns:
The `mll` instance. If fitting succeeded, then `mll` will be in evaluation mode,
i.e. `mll.training == False`. Otherwise, `mll` will be in training mode.
"""
# Setup
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
params_nograd: Dict[str, Parameter] = None # pyre-ignore [9]
ckpt_nograd: Dict[str, TensorCheckpoint] = None # pyre-ignore [9]
ckpt: Dict[str, TensorCheckpoint] = None # pyre-ignore [9]
# Build closure
mll.train()
if closure is None:
closure = get_loss_closure_with_grads(
mll, parameters=get_parameters(mll, requires_grad=True)
)
if closure_kwargs is not None:
closure = partial(closure, **closure_kwargs)
# Attempt to fit the model
for attempt in range(1, 1 + max_attempts):
# Wrap with rollback contextmanager so that each loop iteration reloads the
# original state_dict upon exiting (unless we clear `ckpt`).
with module_rollback_ctx(mll, checkpoint=ckpt, device=device("cpu")) as ckpt:
if attempt > 1: # resample free parameters
if params_nograd is None:
params_nograd = get_parameters(mll, requires_grad=False)
if ckpt_nograd is None: # reuse primary checkpoint
ckpt_nograd = {name: ckpt[name] for name in params_nograd}
with parameter_rollback_ctx(params_nograd, checkpoint=ckpt_nograd):
sample_all_priors(mll.model)
try:
# Fit the model
with catch_warnings(record=True) as warning_list, debug(True):
simplefilter("always", category=OptimizationWarning)
optimizer(mll, closure=closure, **optimizer_kwargs)
# Resolved warnings and determine whether or not to retry
done = True
for w in filterfalse(warning_handler, warning_list):
warn_explicit(str(w.message), w.category, w.filename, w.lineno)
done = False
if done:
ckpt.clear() # do not rollback upon exiting
return mll.eval()
# Ensure mll is in the right mode if fitting failed
mll = mll if mll.training else mll.train()
logging.log(
logging.DEBUG,
f"Fit attempt #{attempt} of {max_attempts} triggered retry policy"
f"{'.' if attempt == max_attempts else '; retrying...'}",
)
except caught_exception_types as err:
logging.log(
logging.DEBUG,
f"Fit attempt #{attempt} of {max_attempts} failed with exception: "
f"{err}",
)
msg = "All attempts to fit the model have failed."
if debug.off():
msg = msg + " For more information, try enabling botorch.settings.debug mode."
raise ModelFittingError(msg)
@FitGPyTorchMLL.register(SumMarginalLogLikelihood, object, ModelListGP)
def _fit_list(
mll: SumMarginalLogLikelihood,
_: Type[Likelihood],
__: Type[ModelListGP],
**kwargs: Any,
) -> SumMarginalLogLikelihood:
r"""Fitting routine for lists of independent Gaussian processes.
Args:
**kwargs: Passed to each of `mll.mlls`.
Returns:
The `mll` instance. If fitting succeeded for all of `mll.mlls`, then `mll` will
be in evaluation mode, i.e. `mll.training == False`. Otherwise, `mll` will be in
training mode.
"""
mll.train()
for sub_mll in mll.mlls:
fit_gpytorch_mll(sub_mll, **kwargs)
return mll.eval() if not any(sub_mll.training for sub_mll in mll.mlls) else mll
@FitGPyTorchMLL.register(_ApproximateMarginalLogLikelihood, object, object)
def _fit_fallback_approximate(
mll: _ApproximateMarginalLogLikelihood,
_: Type[Likelihood],
__: Type[ApproximateGPyTorchModel],
*,
closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None,
data_loader: Optional[DataLoader] = None,
optimizer: Optional[Callable] = None,
full_batch_limit: int = 1024,
**kwargs: Any,
) -> _ApproximateMarginalLogLikelihood:
r"""Fallback method for fitting approximate Gaussian processes.
Args:
closure: Forward-backward closure for obtaining objective values and gradients.
Responsible for setting parameters' `grad` attributes. If no closure is
provided, one will be obtained by calling `get_loss_closure_with_grads`.
optimizer: The underlying optimization algorithm to run. Default to
`fit_gpytorch_mll_scipy` when `closure=None` and the model's internal
training set has no more than `full_batch_cutoff` observations; otherwise,
defaults to `fit_gpytorch_mll_torch`.
data_loader: An optional DataLoader to pass to `get_loss_closure_with_grads`.
May only be provided when `closure=None`.
full_batch_limit: Threshold for determining the default choice of `optimizer`
when `closure=None`.
**kwargs: Keyword arguments passed to `_fit_fallback`.
"""
if data_loader is not None:
if closure is not None:
raise UnsupportedError(
"Only one of `data_loader` or `closure` may be passed."
)
closure = get_loss_closure_with_grads(
mll=mll,
data_loader=data_loader,
parameters=get_parameters(mll, requires_grad=True),
)
if optimizer is None:
optimizer = (
fit_gpytorch_mll_scipy
if closure is None and len(mll.model.train_targets) <= full_batch_limit
else fit_gpytorch_mll_torch
)
return _fit_fallback(mll, _, __, closure=closure, optimizer=optimizer, **kwargs)
def fit_fully_bayesian_model_nuts(
model: Union[SaasFullyBayesianSingleTaskGP, SaasFullyBayesianMultiTaskGP],
max_tree_depth: int = 6,
warmup_steps: int = 512,
num_samples: int = 256,
thinning: int = 16,
disable_progbar: bool = False,
jit_compile: bool = False,
) -> None:
r"""Fit a fully Bayesian model using the No-U-Turn-Sampler (NUTS)
Args:
model: SaasFullyBayesianSingleTaskGP to be fitted.
max_tree_depth: Maximum tree depth for NUTS
warmup_steps: The number of burn-in steps for NUTS.
num_samples: The number of MCMC samples. Note that with thinning,
num_samples / thinning samples are retained.
thinning: The amount of thinning. Every nth sample is retained.
disable_progbar: A boolean indicating whether to print the progress
bar and diagnostics during MCMC.
jit_compile: Whether to use jit. Using jit may be ~2X faster (rough estimate),
but it will also increase the memory usage and sometimes result in runtime
errors, e.g., https://github.com/pyro-ppl/pyro/issues/3136.
Example:
>>> gp = SaasFullyBayesianSingleTaskGP(train_X, train_Y)
>>> fit_fully_bayesian_model_nuts(gp)
"""
model.train()
# Do inference with NUTS
nuts = NUTS(
model.pyro_model.sample,
jit_compile=jit_compile,
full_mass=True,
ignore_jit_warnings=True,
max_tree_depth=max_tree_depth,
)
mcmc = MCMC(
nuts,
warmup_steps=warmup_steps,
num_samples=num_samples,
disable_progbar=disable_progbar,
)
mcmc.run()
# Get final MCMC samples from the Pyro model
mcmc_samples = model.pyro_model.postprocess_mcmc_samples(
mcmc_samples=mcmc.get_samples()
)
for k, v in mcmc_samples.items():
mcmc_samples[k] = v[::thinning]
# Load the MCMC samples back into the BoTorch model
model.load_mcmc_samples(mcmc_samples)
model.eval()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch.settings as gp_settings
import linear_operator.settings as linop_settings
from botorch import (
acquisition,
exceptions,
models,
optim,
posteriors,
settings,
test_functions,
)
from botorch.cross_validation import batch_cross_validation
from botorch.fit import (
fit_fully_bayesian_model_nuts,
fit_gpytorch_mll,
fit_gpytorch_model,
)
from botorch.generation.gen import (
gen_candidates_scipy,
gen_candidates_torch,
get_best_candidates,
)
from botorch.logging import logger
from botorch.utils import manual_seed
try:
from botorch.version import version as __version__
except Exception: # pragma: no cover
__version__ = "Unknown" # pragma: no cover
logger.info(
"Turning off `fast_computations` in linear operator and increasing "
"`max_cholesky_size` and `max_eager_kernel_size` to 4096, and "
"`cholesky_max_tries` to 6. The approximate computations available in "
"GPyTorch aim to speed up GP training and inference in large data "
"regime but they are generally not robust enough to be used in a BO-loop. "
"See gpytorch.settings & linear_operator.settings for more details."
)
linop_settings._fast_covar_root_decomposition._default = False
linop_settings._fast_log_prob._default = False
linop_settings._fast_solves._default = False
linop_settings.cholesky_max_tries._global_value = 6
linop_settings.max_cholesky_size._global_value = 4096
gp_settings.max_eager_kernel_size._global_value = 4096
__all__ = [
"acquisition",
"batch_cross_validation",
"exceptions",
"fit_fully_bayesian_model_nuts",
"fit_gpytorch_mll",
"fit_gpytorch_model",
"gen_candidates_scipy",
"gen_candidates_torch",
"get_best_candidates",
"manual_seed",
"models",
"optim",
"posteriors",
"settings",
"test_functions",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
BoTorch settings.
"""
from __future__ import annotations
from botorch.logging import LOG_LEVEL_DEFAULT, logger
class _Flag:
r"""Base class for context managers for a binary setting."""
_state: bool = False
@classmethod
def on(cls) -> bool:
return cls._state
@classmethod
def off(cls) -> bool:
return not cls._state
@classmethod
def _set_state(cls, state: bool) -> None:
cls._state = state
def __init__(self, state: bool = True) -> None:
self.prev = self.__class__.on()
self.state = state
def __enter__(self) -> None:
self.__class__._set_state(self.state)
def __exit__(self, *args) -> None:
self.__class__._set_state(self.prev)
class propagate_grads(_Flag):
r"""Flag for propagating gradients to model training inputs / training data.
When set to `True`, gradients will be propagated to the training inputs.
This is useful in particular for propating gradients through fantasy models.
"""
_state: bool = False
class debug(_Flag):
r"""Flag for printing verbose warnings.
To make sure a warning is only raised in debug mode:
>>> if debug.on():
>>> warnings.warn(<some warning>)
"""
_state: bool = False
@classmethod
def _set_state(cls, state: bool) -> None:
cls._state = state
class validate_input_scaling(_Flag):
r"""Flag for validating input normalization/standardization.
When set to `True`, standard botorch models will validate (up to reasonable
tolerance) that
(i) none of the inputs contain NaN values
(ii) the training data (`train_X`) is normalized to the unit cube
(iii) the training targets (`train_Y`) are standardized (zero mean, unit var)
No checks (other than the NaN check) are performed for observed variances
(`train_Y_var`) at this point.
"""
_state: bool = True
class log_level:
r"""Flag for printing verbose logging statements.
Applies the given level to logging.getLogger('botorch') calls. For
instance, when set to logging.INFO, all logger calls of level INFO or
above will be printed to STDERR
"""
level: int = LOG_LEVEL_DEFAULT
@classmethod
def _set_level(cls, level: int) -> None:
cls.level = level
logger.setLevel(level)
def __init__(self, level: int = LOG_LEVEL_DEFAULT) -> None:
r"""
Args:
level: The log level. Defaults to LOG_LEVEL_DEFAULT.
"""
self.prev = self.__class__.level
self.level = level
def __enter__(self) -> None:
self.__class__._set_level(self.level)
def __exit__(self, *args) -> None:
self.__class__._set_level(self.prev)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Cross-validation utilities using batch evaluation mode.
"""
from __future__ import annotations
from typing import Any, Dict, NamedTuple, Optional, Type
import torch
from botorch.fit import fit_gpytorch_mll
from botorch.models.gpytorch import GPyTorchModel
from botorch.optim.utils import _filter_kwargs
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood
from torch import Tensor
class CVFolds(NamedTuple):
train_X: Tensor
test_X: Tensor
train_Y: Tensor
test_Y: Tensor
train_Yvar: Optional[Tensor] = None
test_Yvar: Optional[Tensor] = None
class CVResults(NamedTuple):
model: GPyTorchModel
posterior: GPyTorchPosterior
observed_Y: Tensor
observed_Yvar: Optional[Tensor] = None
def gen_loo_cv_folds(
train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor] = None
) -> CVFolds:
r"""Generate LOO CV folds w.r.t. to `n`.
Args:
train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training
features.
train_Y: A `n x (m)` or `batch_shape x n x (m)` (batch mode) tensor of
training observations.
train_Yvar: A `batch_shape x n x (m)` or `batch_shape x n x (m)`
(batch mode) tensor of observed measurement noise.
Returns:
CVFolds tuple with the following fields
- train_X: A `n x (n-1) x d` or `batch_shape x n x (n-1) x d` tensor of
training features.
- test_X: A `n x 1 x d` or `batch_shape x n x 1 x d` tensor of test features.
- train_Y: A `n x (n-1) x m` or `batch_shape x n x (n-1) x m` tensor of
training observations.
- test_Y: A `n x 1 x m` or `batch_shape x n x 1 x m` tensor of test
observations.
- train_Yvar: A `n x (n-1) x m` or `batch_shape x n x (n-1) x m` tensor
of observed measurement noise.
- test_Yvar: A `n x 1 x m` or `batch_shape x n x 1 x m` tensor of observed
measurement noise.
Example:
>>> train_X = torch.rand(10, 1)
>>> train_Y = torch.sin(6 * train_X) + 0.2 * torch.rand_like(train_X)
>>> cv_folds = gen_loo_cv_folds(train_X, train_Y)
"""
masks = torch.eye(train_X.shape[-2], dtype=torch.uint8, device=train_X.device)
masks = masks.to(dtype=torch.bool)
if train_Y.dim() < train_X.dim():
# add output dimension
train_Y = train_Y.unsqueeze(-1)
if train_Yvar is not None:
train_Yvar = train_Yvar.unsqueeze(-1)
train_X_cv = torch.cat(
[train_X[..., ~m, :].unsqueeze(dim=-3) for m in masks], dim=-3
)
test_X_cv = torch.cat([train_X[..., m, :].unsqueeze(dim=-3) for m in masks], dim=-3)
train_Y_cv = torch.cat(
[train_Y[..., ~m, :].unsqueeze(dim=-3) for m in masks], dim=-3
)
test_Y_cv = torch.cat([train_Y[..., m, :].unsqueeze(dim=-3) for m in masks], dim=-3)
if train_Yvar is None:
train_Yvar_cv = None
test_Yvar_cv = None
else:
train_Yvar_cv = torch.cat(
[train_Yvar[..., ~m, :].unsqueeze(dim=-3) for m in masks], dim=-3
)
test_Yvar_cv = torch.cat(
[train_Yvar[..., m, :].unsqueeze(dim=-3) for m in masks], dim=-3
)
return CVFolds(
train_X=train_X_cv,
test_X=test_X_cv,
train_Y=train_Y_cv,
test_Y=test_Y_cv,
train_Yvar=train_Yvar_cv,
test_Yvar=test_Yvar_cv,
)
def batch_cross_validation(
model_cls: Type[GPyTorchModel],
mll_cls: Type[MarginalLogLikelihood],
cv_folds: CVFolds,
fit_args: Optional[Dict[str, Any]] = None,
observation_noise: bool = False,
) -> CVResults:
r"""Perform cross validation by using gpytorch batch mode.
Args:
model_cls: A GPyTorchModel class. This class must initialize the likelihood
internally. Note: Multi-task GPs are not currently supported.
mll_cls: A MarginalLogLikelihood class.
cv_folds: A CVFolds tuple.
fit_args: Arguments passed along to fit_gpytorch_mll.
Returns:
A CVResults tuple with the following fields
- model: GPyTorchModel for batched cross validation
- posterior: GPyTorchPosterior where the mean has shape `n x 1 x m` or
`batch_shape x n x 1 x m`
- observed_Y: A `n x 1 x m` or `batch_shape x n x 1 x m` tensor of observations.
- observed_Yvar: A `n x 1 x m` or `batch_shape x n x 1 x m` tensor of observed
measurement noise.
Example:
>>> train_X = torch.rand(10, 1)
>>> train_Y = torch.sin(6 * train_X) + 0.2 * torch.rand_like(train_X)
>>> cv_folds = gen_loo_cv_folds(train_X, train_Y)
>>> cv_results = batch_cross_validation(
>>> SingleTaskGP,
>>> ExactMarginalLogLikelihood,
>>> cv_folds,
>>> )
WARNING: This function is currently very memory inefficient, use it only
for problems of small size.
"""
fit_args = fit_args or {}
kwargs = {
"train_X": cv_folds.train_X,
"train_Y": cv_folds.train_Y,
"train_Yvar": cv_folds.train_Yvar,
}
model_cv = model_cls(**_filter_kwargs(model_cls, **kwargs))
mll_cv = mll_cls(model_cv.likelihood, model_cv)
mll_cv.to(cv_folds.train_X)
mll_cv = fit_gpytorch_mll(mll_cv, **fit_args)
# Evaluate on the hold-out set in batch mode
with torch.no_grad():
posterior = model_cv.posterior(
cv_folds.test_X, observation_noise=observation_noise
)
return CVResults(
model=model_cv,
posterior=posterior,
observed_Y=cv_folds.test_Y,
observed_Yvar=cv_folds.test_Yvar,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Synthetic functions for optimization benchmarks.
Most test functions (if not indicated otherwise) are taken from
[Bingham2013virtual]_.
References:
.. [Bingham2013virtual]
D. Bingham, S. Surjanovic. Virtual Library of Simulation Experiments.
https://www.sfu.ca/~ssurjano/optimization.html
.. [CoelloCoello2002constraint]
C. A. Coello Coello and E. Mezura Montes. Constraint-handling in genetic
algorithms through the use of dominance-based tournament selection.
Advanced Engineering Informatics, 16(3):193–203, 2002.
.. [Hedar2006derivfree]
A.-R. Hedar and M. Fukushima. Derivative-free filter simulated annealing
method for constrained continuous global optimization. Journal of Global
Optimization, 35(4):521–549, 2006.
.. [Lemonge2010constrained]
A. C. C. Lemonge, H. J. C. Barbosa, C. C. H. Borges, and F. B. dos Santos
Silva. Constrained optimization problems in mechanical engineering design
using a real-coded steady-state genetic algorithm. Mecánica Computacional,
XXIX:9287–9303, 2010.
"""
from __future__ import annotations
import math
from typing import List, Optional, Tuple
import torch
from botorch.test_functions.base import BaseTestProblem, ConstrainedBaseTestProblem
from botorch.test_functions.utils import round_nearest
from torch import Tensor
class SyntheticTestFunction(BaseTestProblem):
r"""Base class for synthetic test functions."""
_optimal_value: float
_optimizers: Optional[List[Tuple[float, ...]]] = None
num_objectives: int = 1
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
if bounds is not None:
self._bounds = bounds
super().__init__(noise_std=noise_std, negate=negate)
if self._optimizers is not None:
if bounds is not None:
# Ensure at least one optimizer lies within the custom bounds
def in_bounds(
optimizer: Tuple[float, ...], bounds: List[Tuple[float, float]]
) -> bool:
for i, xopt in enumerate(optimizer):
lower, upper = bounds[i]
if xopt < lower or xopt > upper:
return False
return True
if not any(
in_bounds(optimizer=optimizer, bounds=bounds)
for optimizer in self._optimizers
):
raise ValueError(
"No global optimum found within custom bounds. Please specify "
"bounds which include at least one point in "
f"`{self.__class__.__name__}._optimizers`."
)
self.register_buffer(
"optimizers", torch.tensor(self._optimizers, dtype=torch.float)
)
@property
def optimal_value(self) -> float:
r"""The global minimum (maximum if negate=True) of the function."""
return -self._optimal_value if self.negate else self._optimal_value
class Ackley(SyntheticTestFunction):
r"""Ackley test function.
d-dimensional function (usually evaluated on `[-32.768, 32.768]^d`):
f(x) = -A exp(-B sqrt(1/d sum_{i=1}^d x_i^2)) -
exp(1/d sum_{i=1}^d cos(c x_i)) + A + exp(1)
f has one minimizer for its global minimum at `z_1 = (0, 0, ..., 0)` with
`f(z_1) = 0`.
"""
_optimal_value = 0.0
_check_grad_at_opt: bool = False
def __init__(
self,
dim: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-32.768, 32.768) for _ in range(self.dim)]
self._optimizers = [tuple(0.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
self.a = 20
self.b = 0.2
self.c = 2 * math.pi
def evaluate_true(self, X: Tensor) -> Tensor:
a, b, c = self.a, self.b, self.c
part1 = -a * torch.exp(-b / math.sqrt(self.dim) * torch.linalg.norm(X, dim=-1))
part2 = -(torch.exp(torch.mean(torch.cos(c * X), dim=-1)))
return part1 + part2 + a + math.e
class Beale(SyntheticTestFunction):
dim = 2
_optimal_value = 0.0
_bounds = [(-4.5, 4.5), (-4.5, 4.5)]
_optimizers = [(3.0, 0.5)]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2 = X[..., 0], X[..., 1]
part1 = (1.5 - x1 + x1 * x2) ** 2
part2 = (2.25 - x1 + x1 * x2**2) ** 2
part3 = (2.625 - x1 + x1 * x2**3) ** 2
return part1 + part2 + part3
class Branin(SyntheticTestFunction):
r"""Branin test function.
Two-dimensional function (usually evaluated on `[-5, 10] x [0, 15]`):
B(x) = (x_2 - b x_1^2 + c x_1 - r)^2 + 10 (1-t) cos(x_1) + 10
Here `b`, `c`, `r` and `t` are constants where `b = 5.1 / (4 * math.pi ** 2)`
`c = 5 / math.pi`, `r = 6`, `t = 1 / (8 * math.pi)`
B has 3 minimizers for its global minimum at `z_1 = (-pi, 12.275)`,
`z_2 = (pi, 2.275)`, `z_3 = (9.42478, 2.475)` with `B(z_i) = 0.397887`.
"""
dim = 2
_bounds = [(-5.0, 10.0), (0.0, 15.0)]
_optimal_value = 0.397887
_optimizers = [(-math.pi, 12.275), (math.pi, 2.275), (9.42478, 2.475)]
def evaluate_true(self, X: Tensor) -> Tensor:
t1 = (
X[..., 1]
- 5.1 / (4 * math.pi**2) * X[..., 0] ** 2
+ 5 / math.pi * X[..., 0]
- 6
)
t2 = 10 * (1 - 1 / (8 * math.pi)) * torch.cos(X[..., 0])
return t1**2 + t2 + 10
class Bukin(SyntheticTestFunction):
dim = 2
_bounds = [(-15.0, -5.0), (-3.0, 3.0)]
_optimal_value = 0.0
_optimizers = [(-10.0, 1.0)]
_check_grad_at_opt: bool = False
def evaluate_true(self, X: Tensor) -> Tensor:
part1 = 100.0 * torch.sqrt(torch.abs(X[..., 1] - 0.01 * X[..., 0] ** 2))
part2 = 0.01 * torch.abs(X[..., 0] + 10.0)
return part1 + part2
class Cosine8(SyntheticTestFunction):
r"""Cosine Mixture test function.
8-dimensional function (usually evaluated on `[-1, 1]^8`):
f(x) = 0.1 sum_{i=1}^8 cos(5 pi x_i) - sum_{i=1}^8 x_i^2
f has one maximizer for its global maximum at `z_1 = (0, 0, ..., 0)` with
`f(z_1) = 0.8`
"""
dim = 8
_bounds = [(-1.0, 1.0) for _ in range(8)]
_optimal_value = 0.8
_optimizers = [tuple(0.0 for _ in range(8))]
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.sum(0.1 * torch.cos(5 * math.pi * X) - X**2, dim=-1)
class DropWave(SyntheticTestFunction):
dim = 2
_bounds = [(-5.12, 5.12), (-5.12, 5.12)]
_optimal_value = -1.0
_optimizers = [(0.0, 0.0)]
_check_grad_at_opt = False
def evaluate_true(self, X: Tensor) -> Tensor:
norm = torch.linalg.norm(X, dim=-1)
part1 = 1.0 + torch.cos(12.0 * norm)
part2 = 0.5 * norm.pow(2) + 2.0
return -part1 / part2
class DixonPrice(SyntheticTestFunction):
_optimal_value = 0.0
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
self.dim = dim
if bounds is None:
bounds = [(-10.0, 10.0) for _ in range(self.dim)]
self._optimizers = [
tuple(
math.pow(2.0, -(1.0 - 2.0 ** (-(i - 1))))
for i in range(1, self.dim + 1)
)
]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
d = self.dim
part1 = (X[..., 0] - 1) ** 2
i = X.new(range(2, d + 1))
part2 = torch.sum(i * (2.0 * X[..., 1:] ** 2 - X[..., :-1]) ** 2, dim=-1)
return part1 + part2
class EggHolder(SyntheticTestFunction):
r"""Eggholder test function.
Two-dimensional function (usually evaluated on `[-512, 512]^2`):
E(x) = (x_2 + 47) sin(R1(x)) - x_1 * sin(R2(x))
where `R1(x) = sqrt(|x_2 + x_1 / 2 + 47|)`, `R2(x) = sqrt|x_1 - (x_2 + 47)|)`.
"""
dim = 2
_bounds = [(-512.0, 512.0), (-512.0, 512.0)]
_optimal_value = -959.6407
_optimizers = [(512.0, 404.2319)]
_check_grad_at_opt: bool = False
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2 = X[..., 0], X[..., 1]
part1 = -(x2 + 47.0) * torch.sin(torch.sqrt(torch.abs(x2 + x1 / 2.0 + 47.0)))
part2 = -x1 * torch.sin(torch.sqrt(torch.abs(x1 - (x2 + 47.0))))
return part1 + part2
class Griewank(SyntheticTestFunction):
_optimal_value = 0.0
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-600.0, 600.0) for _ in range(self.dim)]
self._optimizers = [tuple(0.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
part1 = torch.sum(X**2 / 4000.0, dim=-1)
d = X.shape[-1]
part2 = -(torch.prod(torch.cos(X / torch.sqrt(X.new(range(1, d + 1)))), dim=-1))
return part1 + part2 + 1.0
class Hartmann(SyntheticTestFunction):
r"""Hartmann synthetic test function.
Most commonly used is the six-dimensional version (typically evaluated on
`[0, 1]^6`):
H(x) = - sum_{i=1}^4 ALPHA_i exp( - sum_{j=1}^6 A_ij (x_j - P_ij)**2 )
H has a 6 local minima and a global minimum at
z = (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573)
with `H(z) = -3.32237`.
"""
def __init__(
self,
dim=6,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
if dim not in (3, 4, 6):
raise ValueError(f"Hartmann with dim {dim} not defined")
self.dim = dim
if bounds is None:
bounds = [(0.0, 1.0) for _ in range(self.dim)]
# optimizers and optimal values for dim=4 not implemented
optvals = {3: -3.86278, 6: -3.32237}
optimizers = {
3: [(0.114614, 0.555649, 0.852547)],
6: [(0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573)],
}
self._optimal_value = optvals.get(self.dim)
self._optimizers = optimizers.get(self.dim)
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
self.register_buffer("ALPHA", torch.tensor([1.0, 1.2, 3.0, 3.2]))
if dim == 3:
A = [[3.0, 10, 30], [0.1, 10, 35], [3.0, 10, 30], [0.1, 10, 35]]
P = [
[3689, 1170, 2673],
[4699, 4387, 7470],
[1091, 8732, 5547],
[381, 5743, 8828],
]
elif dim == 4:
A = [
[10, 3, 17, 3.5],
[0.05, 10, 17, 0.1],
[3, 3.5, 1.7, 10],
[17, 8, 0.05, 10],
]
P = [
[1312, 1696, 5569, 124],
[2329, 4135, 8307, 3736],
[2348, 1451, 3522, 2883],
[4047, 8828, 8732, 5743],
]
elif dim == 6:
A = [
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
]
P = [
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
]
self.register_buffer("A", torch.tensor(A, dtype=torch.float))
self.register_buffer("P", torch.tensor(P, dtype=torch.float))
@property
def optimal_value(self) -> float:
if self.dim == 4:
raise NotImplementedError()
return super().optimal_value
@property
def optimizers(self) -> Tensor:
if self.dim == 4:
raise NotImplementedError()
return super().optimizers
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
inner_sum = torch.sum(self.A * (X.unsqueeze(-2) - 0.0001 * self.P) ** 2, dim=-1)
H = -(torch.sum(self.ALPHA * torch.exp(-inner_sum), dim=-1))
if self.dim == 4:
H = (1.1 + H) / 0.839
return H
class HolderTable(SyntheticTestFunction):
r"""Holder Table synthetic test function.
Two-dimensional function (typically evaluated on `[0, 10] x [0, 10]`):
`H(x) = - | sin(x_1) * cos(x_2) * exp(| 1 - ||x|| / pi | ) |`
H has 4 global minima with `H(z_i) = -19.2085` at
z_1 = ( 8.05502, 9.66459)
z_2 = (-8.05502, -9.66459)
z_3 = (-8.05502, 9.66459)
z_4 = ( 8.05502, -9.66459)
"""
dim = 2
_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
_optimal_value = -19.2085
_optimizers = [
(8.05502, 9.66459),
(-8.05502, -9.66459),
(-8.05502, 9.66459),
(8.05502, -9.66459),
]
def evaluate_true(self, X: Tensor) -> Tensor:
term = torch.abs(1 - torch.linalg.norm(X, dim=-1) / math.pi)
return -(
torch.abs(torch.sin(X[..., 0]) * torch.cos(X[..., 1]) * torch.exp(term))
)
class Levy(SyntheticTestFunction):
r"""Levy synthetic test function.
d-dimensional function (usually evaluated on `[-10, 10]^d`):
f(x) = sin^2(pi w_1) +
sum_{i=1}^{d-1} (w_i-1)^2 (1 + 10 sin^2(pi w_i + 1)) +
(w_d - 1)^2 (1 + sin^2(2 pi w_d))
where `w_i = 1 + (x_i - 1) / 4` for all `i`.
f has one minimizer for its global minimum at `z_1 = (1, 1, ..., 1)` with
`f(z_1) = 0`.
"""
_optimal_value = 0.0
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-10.0, 10.0) for _ in range(self.dim)]
self._optimizers = [tuple(1.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
w = 1.0 + (X - 1.0) / 4.0
part1 = torch.sin(math.pi * w[..., 0]) ** 2
part2 = torch.sum(
(w[..., :-1] - 1.0) ** 2
* (1.0 + 10.0 * torch.sin(math.pi * w[..., :-1] + 1.0) ** 2),
dim=-1,
)
part3 = (w[..., -1] - 1.0) ** 2 * (
1.0 + torch.sin(2.0 * math.pi * w[..., -1]) ** 2
)
return part1 + part2 + part3
class Michalewicz(SyntheticTestFunction):
r"""Michalewicz synthetic test function.
d-dim function (usually evaluated on hypercube [0, pi]^d):
M(x) = sum_{i=1}^d sin(x_i) (sin(i x_i^2 / pi)^20)
"""
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(0.0, math.pi) for _ in range(self.dim)]
optvals = {2: -1.80130341, 5: -4.687658, 10: -9.66015}
optimizers = {2: [(2.20290552, 1.57079633)]}
self._optimal_value = optvals.get(self.dim)
self._optimizers = optimizers.get(self.dim)
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
self.register_buffer(
"i", torch.tensor(tuple(range(1, self.dim + 1)), dtype=torch.float)
)
@property
def optimizers(self) -> Tensor:
if self.dim in (5, 10):
raise NotImplementedError()
return super().optimizers
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
m = 10
return -(
torch.sum(
torch.sin(X) * torch.sin(self.i * X**2 / math.pi) ** (2 * m), dim=-1
)
)
class Powell(SyntheticTestFunction):
_optimal_value = 0.0
def __init__(
self,
dim=4,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-4.0, 5.0) for _ in range(self.dim)]
self._optimizers = [tuple(0.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
result = torch.zeros_like(X[..., 0])
for i in range(self.dim // 4):
i_ = i + 1
part1 = (X[..., 4 * i_ - 4] + 10.0 * X[..., 4 * i_ - 3]) ** 2
part2 = 5.0 * (X[..., 4 * i_ - 2] - X[..., 4 * i_ - 1]) ** 2
part3 = (X[..., 4 * i_ - 3] - 2.0 * X[..., 4 * i_ - 2]) ** 4
part4 = 10.0 * (X[..., 4 * i_ - 4] - X[..., 4 * i_ - 1]) ** 4
result += part1 + part2 + part3 + part4
return result
class Rastrigin(SyntheticTestFunction):
_optimal_value = 0.0
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-5.12, 5.12) for _ in range(self.dim)]
self._optimizers = [tuple(0.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
return 10.0 * self.dim + torch.sum(
X**2 - 10.0 * torch.cos(2.0 * math.pi * X), dim=-1
)
class Rosenbrock(SyntheticTestFunction):
r"""Rosenbrock synthetic test function.
d-dimensional function (usually evaluated on `[-5, 10]^d`):
f(x) = sum_{i=1}^{d-1} (100 (x_{i+1} - x_i^2)^2 + (x_i - 1)^2)
f has one minimizer for its global minimum at `z_1 = (1, 1, ..., 1)` with
`f(z_i) = 0.0`.
"""
_optimal_value = 0.0
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-5.0, 10.0) for _ in range(self.dim)]
self._optimizers = [tuple(1.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.sum(
100.0 * (X[..., 1:] - X[..., :-1] ** 2) ** 2 + (X[..., :-1] - 1) ** 2,
dim=-1,
)
class Shekel(SyntheticTestFunction):
r"""Shekel synthtetic test function.
4-dimensional function (usually evaluated on `[0, 10]^4`):
f(x) = -sum_{i=1}^10 (sum_{j=1}^4 (x_j - A_{ji})^2 + C_i)^{-1}
f has one minimizer for its global minimum at `z_1 = (4, 4, 4, 4)` with
`f(z_1) = -10.5363`.
"""
dim = 4
_bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
_optimizers = [(4.000747, 3.99951, 4.00075, 3.99951)]
def __init__(
self,
m: int = 10,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
m: Defaults to 10.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.m = m
optvals = {5: -10.1532, 7: -10.4029, 10: -10.536443}
self._optimal_value = optvals[self.m]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
self.register_buffer(
"beta", torch.tensor([1, 2, 2, 4, 4, 6, 3, 7, 5, 5], dtype=torch.float)
)
C_t = torch.tensor(
[
[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3.6],
[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3.6],
],
dtype=torch.float,
)
self.register_buffer("C", C_t.transpose(-1, -2))
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
beta = self.beta / 10.0
result = -sum(
1 / (torch.sum((X - self.C[i]) ** 2, dim=-1) + beta[i])
for i in range(self.m)
)
return result
class SixHumpCamel(SyntheticTestFunction):
dim = 2
_bounds = [(-3.0, 3.0), (-2.0, 2.0)]
_optimal_value = -1.0316
_optimizers = [(0.0898, -0.7126), (-0.0898, 0.7126)]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2 = X[..., 0], X[..., 1]
return (
(4 - 2.1 * x1**2 + x1**4 / 3) * x1**2
+ x1 * x2
+ (4 * x2**2 - 4) * x2**2
)
class StyblinskiTang(SyntheticTestFunction):
r"""Styblinski-Tang synthtetic test function.
d-dimensional function (usually evaluated on the hypercube `[-5, 5]^d`):
H(x) = 0.5 * sum_{i=1}^d (x_i^4 - 16 * x_i^2 + 5 * x_i)
H has a single global mininimum `H(z) = -39.166166 * d` at `z = [-2.903534]^d`
"""
def __init__(
self,
dim=2,
noise_std: Optional[float] = None,
negate: bool = False,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
bounds: Custom bounds for the function specified as (lower, upper) pairs.
"""
self.dim = dim
if bounds is None:
bounds = [(-5.0, 5.0) for _ in range(self.dim)]
self._optimal_value = -39.166166 * self.dim
self._optimizers = [tuple(-2.903534 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate, bounds=bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
return 0.5 * (X**4 - 16 * X**2 + 5 * X).sum(dim=-1)
class ThreeHumpCamel(SyntheticTestFunction):
dim = 2
_bounds = [(-5.0, 5.0), (-5.0, 5.0)]
_optimal_value = 0.0
_optimizers = [(0.0, 0.0)]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2 = X[..., 0], X[..., 1]
return 2.0 * x1**2 - 1.05 * x1**4 + x1**6 / 6.0 + x1 * x2 + x2**2
# ------------ Constrained synthetic test functions ----------- #
class PressureVessel(SyntheticTestFunction, ConstrainedBaseTestProblem):
r"""Pressure vessel design problem with constraints.
The four-dimensional pressure vessel design problem with four black-box
constraints from [CoelloCoello2002constraint]_.
"""
dim = 4
num_constraints = 4
_bounds = [(0.0, 10.0), (0.0, 10.0), (10.0, 50.0), (150.0, 200.0)]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4 = X.unbind(-1)
x1 = round_nearest(x1, increment=0.0625, bounds=self._bounds[0])
x2 = round_nearest(x2, increment=0.0625, bounds=self._bounds[1])
return (
0.6224 * x1 * x3 * x4
+ 1.7781 * x2 * (x3**2)
+ 3.1661 * (x1**2) * x4
+ 19.84 * (x1**2) * x3
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4 = X.unbind(-1)
return -torch.stack(
[
-x1 + 0.0193 * x3,
-x2 + 0.00954 * x3,
-math.pi * (x3**2) * x4 - (4 / 3) * math.pi * (x3**3) + 1296000.0,
x4 - 240.0,
],
dim=-1,
)
class WeldedBeamSO(SyntheticTestFunction, ConstrainedBaseTestProblem):
r"""Welded beam design problem with constraints (single-outcome).
The four-dimensional welded beam design proble problem with six
black-box constraints from [CoelloCoello2002constraint]_.
For a (somewhat modified) multi-objective version, see
`botorch.test_functions.multi_objective.WeldedBeam`.
"""
dim = 4
num_constraints = 6
_bounds = [(0.125, 10.0), (0.1, 10.0), (0.1, 10.0), (0.1, 10.0)]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4 = X.unbind(-1)
return 1.10471 * (x1**2) * x2 + 0.04811 * x3 * x4 * (14.0 + x2)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4 = X.unbind(-1)
P = 6000.0
L = 14.0
E = 30e6
G = 12e6
t_max = 13600.0
s_max = 30000.0
d_max = 0.25
M = P * (L + x2 / 2)
R = torch.sqrt(0.25 * (x2**2 + (x1 + x3) ** 2))
J = 2 * math.sqrt(2) * x1 * x2 * (x2**2 / 12 + 0.25 * (x1 + x3) ** 2)
P_c = (
4.013
* E
* x3
* (x4**3)
* 6
/ (L**2)
* (1 - 0.25 * x3 * math.sqrt(E / G) / L)
)
t1 = P / (math.sqrt(2) * x1 * x2)
t2 = M * R / J
t = torch.sqrt(t1**2 + t1 * t2 * x2 / R + t2**2)
s = 6 * P * L / (x4 * x3**2)
d = 4 * P * L**3 / (E * x3**3 * x4)
g1 = t - t_max
g2 = s - s_max
g3 = x1 - x4
g4 = 0.10471 * x1**2 + 0.04811 * x3 * x4 * (14.0 + x2) - 5.0
g5 = d - d_max
g6 = P - P_c
return -torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)
class TensionCompressionString(SyntheticTestFunction, ConstrainedBaseTestProblem):
r"""Tension compression string optimization problem with constraints.
The three-dimensional tension compression string optimization problem with
four black-box constraints from [Hedar2006derivfree]_.
"""
dim = 3
num_constraints = 4
_bounds = [(0.01, 1.0), (0.01, 1.0), (0.01, 20.0)]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2, x3 = X.unbind(-1)
return (x1**2) * x2 * (x3 + 2)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
x1, x2, x3 = X.unbind(-1)
constraints = torch.stack(
[
1 - (x2**3) * x3 / (71785 * (x1**4)),
(4 * (x2**2) - x1 * x2) / (12566 * (x1**3) * (x2 - x1))
+ 1 / (5108 * (x1**2))
- 1,
1 - 140.45 * x1 / (x3 * (x2**2)),
(x1 + x2) / 1.5 - 1,
],
dim=-1,
)
return -constraints.clamp_max(100)
class SpeedReducer(SyntheticTestFunction, ConstrainedBaseTestProblem):
r"""Speed Reducer design problem with constraints.
The seven-dimensional speed reducer design problem with eleven black-box
constraints from [Lemonge2010constrained]_.
"""
dim = 7
num_constraints = 11
_bounds = [
(2.6, 3.6),
(0.7, 0.8),
(17.0, 28.0),
(7.3, 8.3),
(7.8, 8.3),
(2.9, 3.9),
(5.0, 5.5),
]
def evaluate_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4, x5, x6, x7 = X.unbind(-1)
return (
0.7854 * x1 * (x2**2) * (3.3333 * (x3**2) + 14.9334 * x3 - 43.0934)
+ -1.508 * x1 * (x6**2 + x7**2)
+ 7.4777 * (x6**3 + x7**3)
+ 0.7854 * (x4 * (x6**2) + x5 * (x7**2))
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4, x5, x6, x7 = X.unbind(-1)
return -torch.stack(
[
27.0 * (1 / x1) * (1 / (x2**2)) * (1 / x3) - 1,
397.5 * (1 / x1) * (1 / (x2**2)) * (1 / (x3**2)) - 1,
1.93 * (1 / x2) * (1 / x3) * (x4**3) * (1 / (x6**4)) - 1,
1.93 * (1 / x2) * (1 / x3) * (x5**3) * (1 / (x7**4)) - 1,
1
/ (0.1 * (x6**3))
* torch.sqrt((745 * x4 / (x2 * x3)) ** 2 + 16.9 * 1e6)
- 1100,
1
/ (0.1 * (x7**3))
* torch.sqrt((745 * x5 / (x2 * x3)) ** 2 + 157.5 * 1e6)
- 850,
x2 * x3 - 40,
5 - x1 / x2,
x1 / x2 - 12,
(1.5 * x6 + 1.9) / x4 - 1,
(1.1 * x7 + 1.9) / x5 - 1,
],
dim=-1,
)
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-objective optimization benchmark problems.
References
.. [Daulton2022]
S. Daulton, S. Cakmak, M. Balandat, M. A. Osborne, E. Zhou, and E. Bakshy.
Robust Multi-Objective Bayesian Optimization Under Input Noise.
Proceedings of the 39th International Conference on Machine Learning, 2022.
.. [Deb2005dtlz]
K. Deb, L. Thiele, M. Laumanns, E. Zitzler, A. Abraham, L. Jain, and
R. Goldberg. Scalable test problems for evolutionary multi-objective
optimization. Evolutionary Multiobjective Optimization, Springer-Verlag,
pp. 105-145, 2005.
.. [Deb2005robust]
K. Deb and H. Gupta. Searching for Robust Pareto-Optimal Solutions in
Multi-objective Optimization. Evolutionary Multi-Criterion Optimization,
Springer-Berlin, pp. 150-164, 2005.
.. [Frohlich2020]
L. Frohlich, E. Klenske, J. Vinogradska, C. Daniel, and M. Zeilinger.
Noisy-Input Entropy Search for Efficient Robust Bayesian Optimization.
Proceedings of the Twenty Third International Conference on Artificial
Intelligence and Statistics, PMLR 108:2262-2272, 2020.
.. [GarridoMerchan2020]
E. C. Garrido-Merch ́an and D. Hern ́andez-Lobato. Parallel Predictive Entropy
Search for Multi-objective Bayesian Optimization with Constraints.
arXiv e-prints, arXiv:2004.00601, Apr. 2020.
.. [Gelbart2014]
Michael A. Gelbart, Jasper Snoek, and Ryan P. Adams. 2014. Bayesian
optimization with unknown constraints. In Proceedings of the Thirtieth
Conference on Uncertainty in Artificial Intelligence (UAI’14).
AUAI Press, Arlington, Virginia, USA, 250–259.
.. [Liang2021]
Q. Liang and L. Lai, Scalable Bayesian Optimization Accelerates Process
Optimization of Penicillin Production. NeurIPS 2021 AI for Science Workshop, 2021.
.. [Ma2019]
Z. Ma and Y. Wang. Evolutionary Constrained Multiobjective Optimization:
Test Suite Construction and Performance Comparisons. IEEE Transactions
on Evolutionary Computation, 23(6):972–986, December 2019.
.. [Oszycka1995]
A. Osyczka and S. Kundu. A new method to solve generalized
multicriteria optimization problems using the simple genetic algorithm.
In Structural Optimization 10. 94–99, 1995.
.. [Tanabe2020]
Ryoji Tanabe and Hisao Ishibuchi. An easy-to-use real-world multi-objective
optimization problem suite, Applied Soft Computing,Volume 89, 2020.
.. [Yang2019a]
K. Yang, M. Emmerich, A. Deutz, and T. Bäck. Multi-Objective Bayesian
Global Optimization using expected hypervolume improvement gradient.
Swarm and evolutionary computation 44, pp. 945--956, 2019.
.. [Zitzler2000]
E. Zitzler, K. Deb, and L. Thiele. Comparison of multiobjective
evolutionary algorithms: Empirical results. Evolutionary Computation, vol.
8, no. 2,pp. 173–195, 2000.
"""
from __future__ import annotations
import math
from abc import ABC, abstractmethod
from math import pi
from typing import Optional
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.test_functions.base import (
ConstrainedBaseTestProblem,
MultiObjectiveTestProblem,
)
from botorch.test_functions.synthetic import Branin, Levy
from botorch.utils.sampling import sample_hypersphere, sample_simplex
from botorch.utils.transforms import unnormalize
from scipy.special import gamma
from torch import Tensor
from torch.distributions import MultivariateNormal
class BraninCurrin(MultiObjectiveTestProblem):
r"""Two objective problem composed of the Branin and Currin functions.
Branin (rescaled):
f(x) = (
15*x_1 - 5.1 * (15 * x_0 - 5) ** 2 / (4 * pi ** 2) + 5 * (15 * x_0 - 5)
/ pi - 5
) ** 2 + (10 - 10 / (8 * pi)) * cos(15 * x_0 - 5))
Currin:
f(x) = (1 - exp(-1 / (2 * x_1))) * (
2300 * x_0 ** 3 + 1900 * x_0 ** 2 + 2092 * x_0 + 60
) / 100 * x_0 ** 3 + 500 * x_0 ** 2 + 4 * x_0 + 20
"""
dim = 2
num_objectives = 2
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_ref_point = [18.0, 6.0]
_max_hv = 59.36011874867746 # this is approximated using NSGA-II
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
"""
super().__init__(noise_std=noise_std, negate=negate)
self._branin = Branin()
def _rescaled_branin(self, X: Tensor) -> Tensor:
# return to Branin bounds
x_0 = 15 * X[..., 0] - 5
x_1 = 15 * X[..., 1]
return self._branin(torch.stack([x_0, x_1], dim=-1))
@staticmethod
def _currin(X: Tensor) -> Tensor:
x_0 = X[..., 0]
x_1 = X[..., 1]
factor1 = 1 - torch.exp(-1 / (2 * x_1))
numer = 2300 * x_0.pow(3) + 1900 * x_0.pow(2) + 2092 * x_0 + 60
denom = 100 * x_0.pow(3) + 500 * x_0.pow(2) + 4 * x_0 + 20
return factor1 * numer / denom
def evaluate_true(self, X: Tensor) -> Tensor:
# branin rescaled with inputsto [0,1]^2
branin = self._rescaled_branin(X=X)
currin = self._currin(X=X)
return torch.stack([branin, currin], dim=-1)
class DH(MultiObjectiveTestProblem, ABC):
r"""Base class for DH problems for robust multi-objective optimization.
In their paper, [Deb2005robust]_ consider these problems under a mean-robustness
setting, and use uniformly distributed input perturbations from the box with
edge lengths `delta_0 = delta`, `delta_i = 2 * delta, i > 0`, with `delta` ranging
up to `0.01` for DH1 and DH2, and `delta = 0.03` for DH3 and DH4.
These are d-dimensional problems with two objectives:
f_0(x) = x_0
f_1(x) = h(x) + g(x) * S(x) for DH1 and DH2
f_1(x) = h(x) * (g(x) + S(x)) for DH3 and DH4
The goal is to minimize both objectives. See [Deb2005robust]_ for more details
on DH. The reference points were set using `infer_reference_point`.
"""
num_objectives = 2
_ref_point: float = [1.1, 1.1]
_x_1_lb: float
_area_under_curve: float
_min_dim: int
def __init__(
self,
dim: int,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
if dim < self._min_dim:
raise ValueError(f"dim must be >= {self._min_dim}, but got dim={dim}!")
self.dim = dim
self._bounds = [(0.0, 1.0), (self._x_1_lb, 1.0)] + [
(-1.0, 1.0) for _ in range(dim - 2)
]
# max_hv is the area of the box minus the area of the curve formed by the PF.
self._max_hv = self._ref_point[0] * self._ref_point[1] - self._area_under_curve
super().__init__(noise_std=noise_std, negate=negate)
@abstractmethod
def _h(self, X: Tensor) -> Tensor:
pass # pragma: no cover
@abstractmethod
def _g(self, X: Tensor) -> Tensor:
pass # pragma: no cover
@abstractmethod
def _S(self, X: Tensor) -> Tensor:
pass # pragma: no cover
class DH1(DH):
r"""DH1 test problem.
d-dimensional problem evaluated on `[0, 1] x [-1, 1]^{d-1}`:
f_0(x) = x_0
f_1(x) = h(x_0) + g(x) * S(x_0)
h(x_0) = 1 - x_0^2
g(x) = \sum_{i=1}^{d-1} (10 + x_i^2 - 10 * cos(4 * pi * x_i))
S(x_0) = alpha / (0.2 + x_0) + beta * x_0^2
where alpha = 1 and beta = 1.
The Pareto front corresponds to the equation `f_1 = 1 - f_0^2`, and it is found at
`x_i = 0` for `i > 0` and any value of `x_0` in `(0, 1]`.
"""
alpha = 1.0
beta = 1.0
_x_1_lb = -1.0
_area_under_curve = 2.0 / 3.0
_min_dim = 2
def _h(self, X: Tensor) -> Tensor:
return 1 - X[..., 0].pow(2)
def _g(self, X: Tensor) -> Tensor:
x_1_to = X[..., 1:]
return torch.sum(
10 + x_1_to.pow(2) - 10 * torch.cos(4 * math.pi * x_1_to),
dim=-1,
)
def _S(self, X: Tensor) -> Tensor:
x_0 = X[..., 0]
return self.alpha / (0.2 + x_0) + self.beta * x_0.pow(2)
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
# This may encounter 0 / 0, which we set to 0.
f_1 = self._h(X) + torch.nan_to_num(self._g(X) * self._S(X))
return torch.stack([f_0, f_1], dim=-1)
class DH2(DH1):
r"""DH2 test problem.
This is identical to DH1 except for having `beta = 10.0`.
"""
beta = 10.0
class DH3(DH):
r"""DH3 test problem.
d-dimensional problem evaluated on `[0, 1]^2 x [-1, 1]^{d-2}`:
f_0(x) = x_0
f_1(x) = h(x_1) * (g(x) + S(x_0))
h(x_1) = 2 - 0.8 * exp(-((x_1 - 0.35) / 0.25)^2) - exp(-((x_1 - 0.85) / 0.03)^2)
g(x) = \sum_{i=2}^{d-1} (50 * x_i^2)
S(x_0) = 1 - sqrt(x_0)
The Pareto front is found at `x_i = 0` for `i > 1`. There's a local and a global
Pareto front, which are found at `x_1 = 0.35` and `x_1 = 0.85`, respectively.
The approximate relationships between the objectives at local and global Pareto
fronts are given by `f_1 = 1.2 (1 - sqrt(f_0))` and `f_1 = 1 - f_0`, respectively.
The specific values on the Pareto fronts can be found by varying `x_0`.
"""
_x_1_lb = 0.0
_area_under_curve = 0.328449169794718
_min_dim = 3
@staticmethod
def _exp_args(x: Tensor) -> Tensor:
exp_arg_1 = -((x - 0.35) / 0.25).pow(2)
exp_arg_2 = -((x - 0.85) / 0.03).pow(2)
return exp_arg_1, exp_arg_2
def _h(self, X: Tensor) -> Tensor:
exp_arg_1, exp_arg_2 = self._exp_args(X[..., 1])
return 2 - 0.8 * torch.exp(exp_arg_1) - torch.exp(exp_arg_2)
def _g(self, X: Tensor) -> Tensor:
return 50 * X[..., 2:].pow(2).sum(dim=-1)
def _S(self, X: Tensor) -> Tensor:
return 1 - X[..., 0].sqrt()
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
f_1 = self._h(X) * (self._g(X) + self._S(X))
return torch.stack([f_0, f_1], dim=-1)
class DH4(DH3):
r"""DH4 test problem.
This is similar to DH3 except that it is evaluated on
`[0, 1] x [-0.15, 1] x [-1, 1]^{d-2}` and:
h(x_0, x_1) = 2 - x_0 - 0.8 * exp(-((x_0 + x_1 - 0.35) / 0.25)^2)
- exp(-((x_0 + x_1 - 0.85) / 0.03)^2)
The Pareto front is found at `x_i = 0` for `i > 2`, with the local one being
near `x_0 + x_1 = 0.35` and the global one near `x_0 + x_1 = 0.85`.
"""
_x_1_lb = -0.15
_area_under_curve = 0.22845
def _h(self, X: Tensor) -> Tensor:
exp_arg_1, exp_arg_2 = self._exp_args(X[..., :2].sum(dim=-1))
return 2 - X[..., 0] - 0.8 * torch.exp(exp_arg_1) - torch.exp(exp_arg_2)
class DTLZ(MultiObjectiveTestProblem):
r"""Base class for DTLZ problems.
See [Deb2005dtlz]_ for more details on DTLZ.
"""
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
r"""
Args:
dim: The (input) dimension of the function.
num_objectives: Must be less than dim.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
if dim <= num_objectives:
raise ValueError(
f"dim must be > num_objectives, but got {dim} and {num_objectives}."
)
self.num_objectives = num_objectives
self.dim = dim
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
class DTLZ1(DTLZ):
r"""DLTZ1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = 0.5 * x_0 * (1 + g(x))
f_1(x) = 0.5 * (1 - x_0) * (1 + g(x))
g(x) = 100 * \sum_{i=m}^{d-1} (
k + (x_i - 0.5)^2 - cos(20 * pi * (x_i - 0.5))
)
where k = d - m + 1.
The pareto front is given by the line (or hyperplane) \sum_i f_i(x) = 0.5.
The goal is to minimize both objectives. The reference point comes from [Yang2019]_.
"""
_ref_val = 400.0
@property
def _max_hv(self) -> float:
return self._ref_val**self.num_objectives - 1 / 2**self.num_objectives
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
X_m_minus_half = X_m - 0.5
sum_term = (
X_m_minus_half.pow(2) - torch.cos(20 * math.pi * X_m_minus_half)
).sum(dim=-1)
g_X_m = 100 * (self.k + sum_term)
g_X_m_term = 0.5 * (1 + g_X_m)
fs = []
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_m_term * X[..., :idx].prod(dim=-1)
if i > 0:
f_i *= 1 - X[..., idx]
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points randomly sampled from the hyperplane sum_i f(x_i) = 0.5.
"""
f_X = 0.5 * sample_simplex(
n=n,
d=self.num_objectives,
qmc=True,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
)
if self.negate:
f_X *= -1
return f_X
class DTLZ2(DTLZ):
r"""DLTZ2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = (1 + g(x)) * cos(x_0 * pi / 2)
f_1(x) = (1 + g(x)) * sin(x_0 * pi / 2)
g(x) = \sum_{i=m}^{d-1} (x_i - 0.5)^2
The pareto front is given by the unit hypersphere \sum{i} f_i^2 = 1.
Note: the pareto front is completely concave. The goal is to minimize
both objectives.
"""
_ref_val = 1.1
@property
def _max_hv(self) -> float:
# hypercube - volume of hypersphere in R^d such that all coordinates are
# positive
hypercube_vol = self._ref_val**self.num_objectives
pos_hypersphere_vol = (
math.pi ** (self.num_objectives / 2)
/ gamma(self.num_objectives / 2 + 1)
/ 2**self.num_objectives
)
return hypercube_vol - pos_hypersphere_vol
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
g_X = (X_m - 0.5).pow(2).sum(dim=-1)
g_X_plus1 = 1 + g_X
fs = []
pi_over_2 = math.pi / 2
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_plus1.clone()
f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(X[..., idx] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points are randomly sampled from the hypersphere's
positive section.
"""
f_X = sample_hypersphere(
n=n,
d=self.num_objectives,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
qmc=True,
).abs()
if self.negate:
f_X *= -1
return f_X
class DTLZ3(DTLZ2):
r"""DTLZ3 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = (1 + g(x)) * cos(x_0 * pi / 2)
f_1(x) = (1 + g(x)) * sin(x_0 * pi / 2)
g(x) = 100 * [k + \sum_{i=m}^{n-1} (x_i - 0.5)^2 - cos(20 * pi * (x_i - 0.5))]
`g(x)` introduces (`3k−1`) local Pareto fronts that are parallel to
the one global Pareto-optimal front.
The global Pareto-optimal front corresponds to x_i = 0.5 for x_i in X_m.
"""
_ref_val = 10000.0
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
g_X = 100 * (
X_m.shape[-1]
+ ((X_m - 0.5).pow(2) - torch.cos(20 * math.pi * (X_m - 0.5))).sum(dim=-1)
)
g_X_plus1 = 1 + g_X
fs = []
pi_over_2 = math.pi / 2
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_plus1.clone()
f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(X[..., idx] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
class DTLZ4(DTLZ2):
r"""DTLZ4 test problem.
This is the same as DTLZ2, but with alpha=100 as the exponent,
resulting in dense solutions near the f_M-f_1 plane.
The global Pareto-optimal front corresponds to x_i = 0.5 for x_i in X_m.
"""
_alpha = 100.0
class DTLZ5(DTLZ):
r"""DTLZ5 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = (1 + g(x)) * cos(theta_0 * pi / 2)
f_1(x) = (1 + g(x)) * sin(theta_0 * pi / 2)
theta_i = pi / (4 * (1 + g(X_m)) * (1 + 2 * g(X_m) * x_i)) for i = 1, ... , M-2
g(x) = \sum_{i=m}^{d-1} (x_i - 0.5)^2
The global Pareto-optimal front corresponds to x_i = 0.5 for x_i in X_m.
"""
_ref_val = 10.0
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
X_ = X[..., : -self.k]
g_X = (X_m - 0.5).pow(2).sum(dim=-1)
theta = 1 / (2 * (1 + g_X.unsqueeze(-1))) * (1 + 2 * g_X.unsqueeze(-1) * X_)
theta = torch.cat([X[..., :1], theta[..., 1:]], dim=-1)
fs = []
pi_over_2 = math.pi / 2
g_X_plus1 = g_X + 1
for i in range(self.num_objectives):
f_i = g_X_plus1.clone()
f_i *= torch.cos(theta[..., : theta.shape[-1] - i] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(theta[..., theta.shape[-1] - i] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
class DTLZ7(DTLZ):
r"""DTLZ7 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = x_1
...
f_{M-1}(x) = (1 + g(X_m)) * h(f_0, f_1, ..., f_{M-2}, g, x)
h(f_0, f_1, ..., f_{M-2}, g, x) =
M - sum_{i=0}^{M-2} f_i(x)/(1+g(x)) * (1 + sin(3 * pi * f_i(x)))
This test problem has 2M-1 disconnected Pareto-optimal regions in the search space.
The pareto frontier corresponds to X_m = 0.
"""
_ref_val = 15.0
def evaluate_true(self, X):
f = []
for i in range(0, self.num_objectives - 1):
f.append(X[..., i])
f = torch.stack(f, dim=-1)
g_X = 1 + 9 / self.k * torch.sum(X[..., -self.k :], dim=-1)
h = self.num_objectives - torch.sum(
f / (1 + g_X.unsqueeze(-1)) * (1 + torch.sin(3 * math.pi * f)), dim=-1
)
return torch.cat([f, ((1 + g_X) * h).unsqueeze(-1)], dim=-1)
class GMM(MultiObjectiveTestProblem):
r"""A test problem where each objective is a Gaussian mixture model.
This implementation is adapted from the single objective version (proposed by
[Frohlich2020]_) at
https://github.com/boschresearch/NoisyInputEntropySearch/blob/master/
core/util/objectives.py.
See [Daulton2022]_ for details on this multi-objective problem.
"""
dim = 2
_bounds = [(0.0, 1.0), (0.0, 1.0)]
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
num_objectives: int = 2,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
num_objectives: The number of objectives.
"""
if num_objectives not in (2, 3, 4):
raise UnsupportedError("GMM only currently supports 2 to 4 objectives.")
self._ref_point = [-0.2338, -0.2211]
if num_objectives > 2:
self._ref_point.append(-0.5180)
if num_objectives > 3:
self._ref_point.append(-0.1866)
self.num_objectives = num_objectives
super().__init__(noise_std=noise_std, negate=negate)
gmm_pos = torch.tensor(
[
[[0.2, 0.2], [0.8, 0.2], [0.5, 0.7]],
[[0.07, 0.2], [0.4, 0.8], [0.85, 0.1]],
]
)
gmm_var = torch.tensor([[0.20, 0.10, 0.10], [0.2, 0.1, 0.05]]).pow(2)
gmm_norm = 2 * pi * gmm_var * torch.tensor([0.5, 0.7, 0.7])
if num_objectives > 2:
gmm_pos = torch.cat(
[gmm_pos, torch.tensor([[[0.08, 0.21], [0.45, 0.75], [0.86, 0.11]]])],
dim=0,
)
gmm_var = torch.cat(
[gmm_var, torch.tensor([[0.2, 0.1, 0.07]]).pow(2)], dim=0
)
gmm_norm = torch.cat(
[
gmm_norm,
2 * pi * gmm_var[2] * torch.tensor([[0.5, 0.7, 0.9]]),
],
dim=0,
)
if num_objectives > 3:
gmm_pos = torch.cat(
[gmm_pos, torch.tensor([[[0.09, 0.19], [0.44, 0.72], [0.89, 0.13]]])],
dim=0,
)
gmm_var = torch.cat(
[gmm_var, torch.tensor([[0.15, 0.07, 0.09]]).pow(2)], dim=0
)
gmm_norm = torch.cat(
[
gmm_norm,
2 * pi * gmm_var[3] * torch.tensor([[0.5, 0.7, 0.9]]),
],
dim=0,
)
gmm_covar = gmm_var.view(*gmm_var.shape, 1, 1) * torch.eye(
2, dtype=gmm_var.dtype, device=gmm_var.device
)
self.register_buffer("gmm_pos", gmm_pos)
self.register_buffer("gmm_covar", gmm_covar)
self.register_buffer("gmm_norm", gmm_norm)
def evaluate_true(self, X: Tensor) -> Tensor:
r"""Evaluate the GMMs."""
# This needs to be reinstantiated because MVN apparently does not
# have a `to` method to make it device/dtype agnostic.
mvn = MultivariateNormal(loc=self.gmm_pos, covariance_matrix=self.gmm_covar)
view_shape = (
X.shape[:-1]
+ torch.Size([1] * (self.gmm_pos.ndim - 1))
+ self.gmm_pos.shape[-1:]
)
expand_shape = X.shape[:-1] + self.gmm_pos.shape
pdf_X = mvn.log_prob(X.view(view_shape).expand(expand_shape)).exp()
# Multiply by -1 to make this a minimization problem by default
return -(self.gmm_norm * pdf_X).sum(dim=-1)
class Penicillin(MultiObjectiveTestProblem):
r"""A penicillin production simulator from [Liang2021]_.
This implementation is adapted from
https://github.com/HarryQL/TuRBO-Penicillin.
The goal is to maximize the penicillin yield while minimizing
time to ferment and the CO2 byproduct.
The function is defined for minimization of all objectives.
The reference point was set using the `infer_reference_point` heuristic
on the Pareto frontier over a large discrete set of random designs.
"""
dim = 7
num_objectives = 3
_bounds = [
(60.0, 120.0),
(0.05, 18.0),
(293.0, 303.0),
(0.05, 18.0),
(0.01, 0.5),
(500.0, 700.0),
(5.0, 6.5),
]
_ref_point = [1.85, 86.93, 514.70]
Y_xs = 0.45
Y_ps = 0.90
K_1 = 10 ** (-10)
K_2 = 7 * 10 ** (-5)
m_X = 0.014
alpha_1 = 0.143
alpha_2 = 4 * 10 ** (-7)
alpha_3 = 10 ** (-4)
mu_X = 0.092
K_X = 0.15
mu_p = 0.005
K_p = 0.0002
K_I = 0.10
K = 0.04
k_g = 7.0 * 10**3
E_g = 5100.0
k_d = 10.0**33
E_d = 50000.0
lambd = 2.5 * 10 ** (-4)
T_v = 273.0 # Kelvin
T_o = 373.0
R = 1.9872 # CAL/(MOL K)
V_max = 180.0
@classmethod
def penicillin_vectorized(cls, X_input: Tensor) -> Tensor:
r"""Penicillin simulator, simplified and vectorized.
The 7 input parameters are (in order): culture volume, biomass
concentration, temperature, glucose concentration, substrate feed
rate, substrate feed concentration, and H+ concentration.
Args:
X_input: A `n x 7`-dim tensor of inputs.
Returns:
An `n x 3`-dim tensor of (negative) penicillin yield, CO2 and time.
"""
V, X, T, S, F, s_f, H_ = torch.split(X_input, 1, -1)
P, CO2 = torch.zeros_like(V), torch.zeros_like(V)
H = torch.full_like(H_, 10.0).pow(-H_)
active = torch.ones_like(V).bool()
t_tensor = torch.full_like(V, 2500)
for t in range(1, 2501):
if active.sum() == 0:
break
F_loss = (
V[active]
* cls.lambd
* (torch.exp(5 * ((T[active] - cls.T_o) / (cls.T_v - cls.T_o))) - 1)
)
dV_dt = F[active] - F_loss
mu = (
(cls.mu_X / (1 + cls.K_1 / H[active] + H[active] / cls.K_2))
* (S[active] / (cls.K_X * X[active] + S[active]))
* (
(cls.k_g * torch.exp(-cls.E_g / (cls.R * T[active])))
- (cls.k_d * torch.exp(-cls.E_d / (cls.R * T[active])))
)
)
dX_dt = mu * X[active] - (X[active] / V[active]) * dV_dt
mu_pp = cls.mu_p * (
S[active] / (cls.K_p + S[active] + S[active].pow(2) / cls.K_I)
)
dS_dt = (
-(mu / cls.Y_xs) * X[active]
- (mu_pp / cls.Y_ps) * X[active]
- cls.m_X * X[active]
+ F[active] * s_f[active] / V[active]
- (S[active] / V[active]) * dV_dt
)
dP_dt = (
(mu_pp * X[active])
- cls.K * P[active]
- (P[active] / V[active]) * dV_dt
)
dCO2_dt = cls.alpha_1 * dX_dt + cls.alpha_2 * X[active] + cls.alpha_3
# UPDATE
P[active] = P[active] + dP_dt # Penicillin concentration
V[active] = V[active] + dV_dt # Culture medium volume
X[active] = X[active] + dX_dt # Biomass concentration
S[active] = S[active] + dS_dt # Glucose concentration
CO2[active] = CO2[active] + dCO2_dt # CO2 concentration
# Update active indices
full_dpdt = torch.ones_like(P)
full_dpdt[active] = dP_dt
inactive = (V > cls.V_max) + (S < 0) + (full_dpdt < 10e-12)
t_tensor[inactive] = torch.minimum(
t_tensor[inactive], torch.full_like(t_tensor[inactive], t)
)
active[inactive] = 0
return torch.stack([-P, CO2, t_tensor], dim=-1)
def evaluate_true(self, X: Tensor) -> Tensor:
# This uses in-place operations. Hence, the clone is to avoid modifying
# the original X in-place.
return self.penicillin_vectorized(X.view(-1, self.dim).clone()).view(
*X.shape[:-1], self.num_objectives
)
class ToyRobust(MultiObjectiveTestProblem):
r"""A 1D problem where the Pareto frontier is sensitive to input noise.
Specifically, the pareto frontier over the nominal objectives is
sensitive to input noise. The first objective is a mixture of a linear
function and a sinusoidal function, and the second objective is a modified
Levy function, where the second parameter is fixed.
This function comes from [Daulton2022]_.
The reference point was set using the `infer_reference_point`
heuristic on the Pareto frontier over a large discrete set of
random designs.
"""
dim = 1
_bounds = [(0.0, 0.7)]
_ref_point = [-6.1397, -8.1942]
num_objectives = 2
levy = Levy()
def f_1(self, X: Tensor) -> Tensor:
p1 = 2.4 - 10 * X - 0.1 * X.pow(2)
p2 = 2 * X - 0.1 * X.pow(2)
smoother = (X - 0.5).pow(2) + torch.sin(30 * X) * 0.1
x_mask = torch.sigmoid((0.2 - X) / 0.005)
return -(p1 * x_mask + p2 * (1 - x_mask) + smoother) * 30 + 30
def f_2(self, X: Tensor) -> Tensor:
X = torch.cat(
[X, torch.zeros_like(X)],
dim=-1,
)
# Cut out the first part of the function.
X = X * 0.95 + 0.03
X = unnormalize(X, self.levy.bounds.to(X))
Y = self.levy(X).unsqueeze(-1)
Y -= X[..., :1].pow(2) * 0.75
return Y
def evaluate_true(self, X: Tensor) -> Tensor:
return -torch.cat([self.f_1(X), self.f_2(X)], dim=-1)
class VehicleSafety(MultiObjectiveTestProblem):
r"""Optimize Vehicle crash-worthiness.
See [Tanabe2020]_ for details.
The reference point is 1.1 * the nadir point from
approximate front provided by [Tanabe2020]_.
The maximum hypervolume is computed using the approximate
pareto front from [Tanabe2020]_.
"""
_ref_point = [1864.72022, 11.81993945, 0.2903999384]
_max_hv = 246.81607081187002
_bounds = [(1.0, 3.0)] * 5
dim = 5
num_objectives = 3
def evaluate_true(self, X: Tensor) -> Tensor:
X1, X2, X3, X4, X5 = torch.split(X, 1, -1)
f1 = (
1640.2823
+ 2.3573285 * X1
+ 2.3220035 * X2
+ 4.5688768 * X3
+ 7.7213633 * X4
+ 4.4559504 * X5
)
f2 = (
6.5856
+ 1.15 * X1
- 1.0427 * X2
+ 0.9738 * X3
+ 0.8364 * X4
- 0.3695 * X1 * X4
+ 0.0861 * X1 * X5
+ 0.3628 * X2 * X4
- 0.1106 * X1.pow(2)
- 0.3437 * X3.pow(2)
+ 0.1764 * X4.pow(2)
)
f3 = (
-0.0551
+ 0.0181 * X1
+ 0.1024 * X2
+ 0.0421 * X3
- 0.0073 * X1 * X2
+ 0.024 * X2 * X3
- 0.0118 * X2 * X4
- 0.0204 * X3 * X4
- 0.008 * X3 * X5
- 0.0241 * X2.pow(2)
+ 0.0109 * X4.pow(2)
)
f_X = torch.cat([f1, f2, f3], dim=-1)
return f_X
class ZDT(MultiObjectiveTestProblem):
r"""Base class for ZDT problems.
See [Zitzler2000]_ for more details on ZDT.
"""
_ref_point = [11.0, 11.0]
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
r"""
Args:
dim: The (input) dimension of the function.
num_objectives: Number of objectives. Must not be larger than dim.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
if num_objectives != 2:
raise NotImplementedError(
f"{type(self).__name__} currently only supports 2 objectives."
)
if dim < num_objectives:
raise ValueError(
f"dim must be >= num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
@staticmethod
def _g(X: Tensor) -> Tensor:
return 1 + 9 * X[..., 1:].mean(dim=-1)
class ZDT1(ZDT):
r"""ZDT1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - sqrt(x_0 / g(x))
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is convex.
"""
_max_hv = 120 + 2 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).sqrt())
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.sqrt()
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT2(ZDT):
r"""ZDT2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - (x_0 / g(x))^2)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is concave.
"""
_max_hv = 120 + 1 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).pow(2))
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.pow(2)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT3(ZDT):
r"""ZDT3 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = 1 - sqrt(x_0 / g(x)) - x_0 / g * sin(10 * pi * x_0)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front consists of several discontinuous convex parts.
"""
_max_hv = 128.77811613069076060
_parts = [
# this interval includes both end points
[0, 0.0830015349],
# this interval includes only the right end points
[0.1822287280, 0.2577623634],
[0.4093136748, 0.4538821041],
[0.6183967944, 0.6525117038],
[0.8233317983, 0.8518328654],
]
# nugget to make sure linspace returns elements within the specified range
_eps = 1e-6
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = 1 - (f_0 / g).sqrt() - f_0 / g * torch.sin(10 * math.pi * f_0)
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
n_parts = len(self._parts)
n_per_part = torch.full(
torch.Size([n_parts]),
n // n_parts,
dtype=torch.long,
device=self.bounds.device,
)
left_over = n % n_parts
n_per_part[:left_over] += 1
f_0s = []
for i, p in enumerate(self._parts):
left, right = p
f_0s.append(
torch.linspace(
left + self._eps,
right - self._eps,
n_per_part[i],
dtype=self.bounds.dtype,
device=self.bounds.device,
)
)
f_0 = torch.cat(f_0s, dim=0)
f_1 = 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class CarSideImpact(MultiObjectiveTestProblem):
r"""Car side impact problem.
See [Tanabe2020]_ for details.
The reference point is `nadir + 0.1 * (ideal - nadir)`
where the ideal and nadir points come from the approximate
Pareto frontier from [Tanabe2020]_. The max_hv was computed
based on the approximate Pareto frontier from [Tanabe2020]_.
"""
num_objectives: int = 4
dim: int = 7
_bounds = [
(0.5, 1.5),
(0.45, 1.35),
(0.5, 1.5),
(0.5, 1.5),
(0.875, 2.625),
(0.4, 1.2),
(0.4, 1.2),
]
_ref_point = [45.4872, 4.5114, 13.3394, 10.3942]
_max_hv = 484.72654347642793
def evaluate_true(self, X: Tensor) -> Tensor:
X1, X2, X3, X4, X5, X6, X7 = torch.split(X, 1, -1)
f1 = (
1.98
+ 4.9 * X1
+ 6.67 * X2
+ 6.98 * X3
+ 4.01 * X4
+ 1.78 * X5
+ 10**-5 * X6
+ 2.73 * X7
)
f2 = 4.72 - 0.5 * X4 - 0.19 * X2 * X3
V_MBP = 10.58 - 0.674 * X1 * X2 - 0.67275 * X2
V_FD = 16.45 - 0.489 * X3 * X7 - 0.843 * X5 * X6
f3 = 0.5 * (V_MBP + V_FD)
g1 = 1 - 1.16 + 0.3717 * X2 * X4 + 0.0092928 * X3
g2 = (
0.32
- 0.261
+ 0.0159 * X1 * X2
+ 0.06486 * X1
+ 0.019 * X2 * X7
- 0.0144 * X3 * X5
- 0.0154464 * X6
)
g3 = (
0.32
- 0.214
- 0.00817 * X5
+ 0.045195 * X1
+ 0.0135168 * X1
- 0.03099 * X2 * X6
+ 0.018 * X2 * X7
- 0.007176 * X3
- 0.023232 * X3
+ 0.00364 * X5 * X6
+ 0.018 * X2.pow(2)
)
g4 = 0.32 - 0.74 + 0.61 * X2 + 0.031296 * X3 + 0.031872 * X7 - 0.227 * X2.pow(2)
g5 = 32 - 28.98 - 3.818 * X3 + 4.2 * X1 * X2 - 1.27296 * X6 + 2.68065 * X7
g6 = (
32
- 33.86
- 2.95 * X3
+ 5.057 * X1 * X2
+ 3.795 * X2
+ 3.4431 * X7
- 1.45728
)
g7 = 32 - 46.36 + 9.9 * X2 + 4.4505 * X1
g8 = 4 - f2
g9 = 9.9 - V_MBP
g10 = 15.7 - V_FD
g = torch.cat([g1, g2, g3, g4, g5, g6, g7, g8, g9, g10], dim=-1)
zero = torch.tensor(0.0, dtype=X.dtype, device=X.device)
g = torch.where(g < 0, -g, zero)
f4 = g.sum(dim=-1, keepdim=True)
return torch.cat([f1, f2, f3, f4], dim=-1)
# ------ Constrained Multi-Objective Test Problems ----- #
class BNH(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained BNH problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.0, 5.0), (0.0, 3.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.stack(
[4.0 * (X**2).sum(dim=-1), ((X - 5.0) ** 2).sum(dim=-1)], dim=-1
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 25.0 - (X[..., 0] - 5.0) ** 2 - X[..., 1] ** 2
c2 = (X[..., 0] - 8.0) ** 2 + (X[..., 1] + 3.0) ** 2 - 7.7
return torch.stack([c1, c2], dim=-1)
class CONSTR(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained CONSTR problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.1, 10.0), (0.0, 5.0)]
_ref_point = [10.0, 10.0]
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = X[..., 0]
obj2 = (1.0 + X[..., 1]) / X[..., 0]
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 9.0 * X[..., 0] + X[..., 1] - 6.0
c2 = 9.0 * X[..., 0] - X[..., 1] - 1.0
return torch.stack([c1, c2], dim=-1)
class ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem):
r"""Constrained Branin Currin Function.
This uses the disk constraint from [Gelbart2014]_.
"""
dim = 2
num_objectives = 2
num_constraints = 1
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_con_bounds = [(-5.0, 10.0), (0.0, 15.0)]
_ref_point = [80.0, 12.0]
_max_hv = 608.4004237022673 # from NSGA-II with 90k evaluations
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
con_bounds = torch.tensor(self._con_bounds, dtype=torch.float).transpose(-1, -2)
self.register_buffer("con_bounds", con_bounds)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
X_tf = unnormalize(X, self.con_bounds)
return 50 - (X_tf[..., 0:1] - 2.5).pow(2) - (X_tf[..., 1:2] - 7.5).pow(2)
class C2DTLZ2(DTLZ2, ConstrainedBaseTestProblem):
num_constraints = 1
_r = 0.2
# approximate from nsga-ii, TODO: replace with analytic
_max_hv = 0.3996406303723544
def evaluate_slack_true(self, X: Tensor) -> Tensor:
if X.ndim > 2:
raise NotImplementedError("Batch X is not supported.")
f_X = self.evaluate_true(X)
term1 = (f_X - 1).pow(2)
mask = ~(torch.eye(f_X.shape[-1], device=f_X.device).bool())
indices = torch.arange(f_X.shape[1], device=f_X.device).repeat(f_X.shape[1], 1)
indexer = indices[mask].view(f_X.shape[1], f_X.shape[-1] - 1)
term2_inner = (
f_X.unsqueeze(1)
.expand(f_X.shape[0], f_X.shape[-1], f_X.shape[-1])
.gather(dim=-1, index=indexer.repeat(f_X.shape[0], 1, 1))
)
term2 = (term2_inner.pow(2) - self._r**2).sum(dim=-1)
min1 = (term1 + term2).min(dim=-1).values
min2 = ((f_X - 1 / math.sqrt(f_X.shape[-1])).pow(2) - self._r**2).sum(dim=-1)
return -torch.min(min1, min2).unsqueeze(-1)
class DiscBrake(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The Disc Brake problem.
There are 2 objectives and 4 constraints.
Both objectives should be minimized.
See [Tanabe2020]_ for details.
The reference point was set using the `infer_reference_point`
heuristic on the Pareto frontier over a large discrete set of
random designs.
"""
dim = 4
num_objectives = 2
num_constraints = 4
_bounds = [(55.0, 80.0), (75.0, 110.0), (1000.0, 3000.0), (11.0, 20.0)]
_ref_point = [5.7771, 3.9651]
def evaluate_true(self, X: Tensor) -> Tensor:
f = torch.zeros(
*X.shape[:-1], self.num_objectives, dtype=X.dtype, device=X.device
)
X1, X2, X3, X4 = torch.split(X, 1, -1)
sq_diff = X2.pow(2) - X1.pow(2)
f[..., :1] = 4.9 * 1e-5 * sq_diff * (X4 - 1.0)
f[..., 1:] = (9.82 * 1e6) * sq_diff / (X3 * X4 * (X2.pow(3) - X1.pow(3)))
return f
def evaluate_slack_true(self, X: Tensor) -> Tensor:
g = torch.zeros(
*X.shape[:-1], self.num_constraints, dtype=X.dtype, device=X.device
)
X1, X2, X3, X4 = torch.split(X, 1, -1)
sq_diff = X2.pow(2) - X1.pow(2)
cub_diff = X2.pow(3) - X1.pow(3)
g[..., :1] = X2 - X1 - 20.0
g[..., 1:2] = 0.4 - X3 / (3.14 * sq_diff)
g[..., 2:3] = 1.0 - (2.22 * 1e-3 * X3 * cub_diff) / sq_diff.pow(2)
g[..., 3:] = (2.66 * 1e-2 * X3 * X4 * cub_diff) / sq_diff - 900.0
return g
class MW7(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The MW7 problem.
This problem has 2 objectives, 2 constraints, and a disconnected Pareto
frontier. It supports arbitrary input dimension > 1. See [Ma2019]_ for details.
This implementation is adapted from:
https://github.com/anyoptimization/pymoo/blob/master/pymoo/problems/multi/mw.py
"""
num_constraints = 2
num_objectives = 2
_ref_point = [1.2, 1.2]
def __init__(
self,
dim: int,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
r"""
Args:
dim: The (input) dimension of the function. Must be at least 2.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
if dim < 2:
raise ValueError("dim must be greater than or equal to 2.")
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
def LA2(self, A, B, C, D, theta):
return A * torch.sin(B * theta.pow(C)).pow(D)
def evaluate_true(self, X: Tensor) -> Tensor:
a = X[..., :-1] - 0.5
contrib = 2 * (X[..., 1:] + a.pow(2) - 1).pow(2)
g = 1 + contrib.sum(dim=-1)
f0 = g * X[..., 0]
f1 = g * torch.sqrt(1 - (f0 / g).pow(2))
return torch.stack([f0, f1], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
ff = self.evaluate_true(X)
f0, f1 = ff[..., 0], ff[..., 1]
atan = torch.arctan(f1 / f0)
g0 = (
f0.pow(2)
+ f1.pow(2)
- (1.2 + (self.LA2(0.4, 4.0, 1.0, 16.0, atan)).abs()).pow(2)
)
g1 = (1.15 - self.LA2(0.2, 4.0, 1.0, 8.0, atan)).pow(2) - f0.pow(2) - f1.pow(2)
return -torch.stack([g0, g1], dim=-1)
class OSY(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""
The OSY test problem from [Oszycka1995]_.
Implementation from
https://github.com/msu-coinlab/pymoo/blob/master/pymoo/problems/multi/osy.py
Note that this implementation assumes minimization, so please choose negate=True.
"""
dim = 6
num_constraints = 6
num_objectives = 2
_bounds = [
(0.0, 10.0),
(0.0, 10.0),
(1.0, 5.0),
(0.0, 6.0),
(1.0, 5.0),
(0.0, 10.0),
]
_ref_point = [-75.0, 75.0]
def evaluate_true(self, X: Tensor) -> Tensor:
f1 = -(
25 * (X[..., 0] - 2) ** 2
+ (X[..., 1] - 2) ** 2
+ (X[..., 2] - 1) ** 2
+ (X[..., 3] - 4) ** 2
+ (X[..., 4] - 1) ** 2
)
f2 = (X**2).sum(-1)
return torch.stack([f1, f2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
g1 = X[..., 0] + X[..., 1] - 2.0
g2 = 6.0 - X[..., 0] - X[..., 1]
g3 = 2.0 - X[..., 1] + X[..., 0]
g4 = 2.0 - X[..., 0] + 3.0 * X[..., 1]
g5 = 4.0 - (X[..., 2] - 3.0) ** 2 - X[..., 3]
g6 = (X[..., 4] - 3.0) ** 2 + X[..., 5] - 4.0
return torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)
class SRN(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained SRN problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(-20.0, 20.0), (-20.0, 20.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = 2.0 + ((X - 2.0) ** 2).sum(dim=-1)
obj2 = 9.0 * X[..., 0] - (X[..., 1] - 1.0) ** 2
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 225.0 - ((X**2) ** 2).sum(dim=-1)
c2 = -10.0 - X[..., 0] + 3 * X[..., 1]
return torch.stack([c1, c2], dim=-1)
class WeldedBeam(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""
The Welded Beam multi-objective test problem. Similar to `WeldedBeamSO` in
`botorch.test_function.synthetic`, but with an additional output, somewhat
modified constraints, and a different domain.
Implementation from
https://github.com/msu-coinlab/pymoo/blob/master/pymoo/problems/multi/welded_beam.py
Note that this implementation assumes minimization, so please choose negate=True.
"""
dim = 4
num_constraints = 4
num_objectives = 2
_bounds = [
(0.125, 5.0),
(0.1, 10.0),
(0.1, 10.0),
(0.125, 5.0),
]
_ref_point = [40, 0.015]
def evaluate_true(self, X: Tensor) -> Tensor:
# We could do the following, but the constraints are using somewhat
# different numbers (see below).
# f1 = WeldedBeam.evaluate_true(self, X)
x1, x2, x3, x4 = X.unbind(-1)
f1 = 1.10471 * (x1**2) * x2 + 0.04811 * x3 * x4 * (14.0 + x2)
f2 = 2.1952 / (x4 * x3**3)
return torch.stack([f1, f2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
x1, x2, x3, x4 = X.unbind(-1)
P = 6000.0
L = 14.0
t_max = 13600.0
s_max = 30000.0
# Ideally, we could just do the following, but the numbers in the
# single-outcome WeldedBeam are different (see below)
# g1_, g2_, g3_, _, _, g6_ = WeldedBeam.evaluate_slack_true(self, X)
# g1 = g1_ / t_max
# g2 = g2_ / s_max
# g3 = 1 / (5 - 0.125) * g3_
# g4 = 1 / P * g6_
R = torch.sqrt(0.25 * (x2**2 + (x1 + x3) ** 2))
M = P * (L + x2 / 2)
# This `J` is different than the one in [CoelloCoello2002constraint]_
# by a factor of 2 (sqrt(2) instead of sqrt(0.5))
J = 2 * math.sqrt(0.5) * x1 * x2 * (x2**2 / 12 + 0.25 * (x1 + x3) ** 2)
t1 = P / (math.sqrt(2) * x1 * x2)
t2 = M * R / J
t = torch.sqrt(t1**2 + t1 * t2 * x2 / R + t2**2)
s = 6 * P * L / (x4 * x3**2)
# These numbers are also different from [CoelloCoello2002constraint]_
P_c = 64746.022 * (1 - 0.0282346 * x3) * x3 * x4**3
g1 = (t - t_max) / t_max
g2 = (s - s_max) / s_max
g3 = 1 / (5 - 0.125) * (x1 - x4)
g4 = (P - P_c) / P
return torch.stack([g1, g2, g3, g4], dim=-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-objective multi-fidelity optimization benchmark problems.
References
.. [Irshad2021]
F. Irshad, S. Karsch, and A. Döpp. Expected hypervolume improvement for
simultaneous multi-objective and multi-fidelity optimization.
arXiv preprint arXiv:2112.13901, 2021.
"""
import math
import torch
from botorch.test_functions.base import MultiObjectiveTestProblem
from torch import Tensor
class MOMFBraninCurrin(MultiObjectiveTestProblem):
r"""Branin-Currin problem for multi-objective-multi-fidelity optimization.
(2+1)-dimensional function with domain `[0,1]^3` where the last dimension
is the fidelity parameter `s`.
Both functions assume minimization. See [Irshad2021]_ for more details.
Modified Branin function:
B(x,s) = 21-((
15*x_2 - b(s) * (15 * x_1 - 5) ** 2 + c(s) * (15 * x_1 - 5) - 6 ) ** 2
+ 10 * (1 - t(s)) * cos(15 * x_1 - 5)+10)/22
Here `b`, `c`, `r` and `t` are constants and `s` is the fidelity parameter:
where `b = 5.1 / (4 * math.pi ** 2) - 0.01(1-s)`,
`c = 5 / math.pi - 0.1*(1 - s)`,
`r = 6`,
`t = 1 / (8 * math.pi) + 0.05*(1-s)`
Modified Currin function:
C(x) = 14-((1 - 0.1(1-s)exp(-1 / (2 * x_2))) * (
2300 * x_1 ** 3 + 1900 * x_1 ** 2 + 2092 * x_1 + 60
) / 100 * x_1 ** 3 + 500 * x_1 ** 2 + 4 * x_2 + 20)/15
"""
dim = 3
num_objectives = 2
_bounds = [(0.0, 1.0) for _ in range(dim)]
_ref_point = [0, 0]
_max_hv = 0.5235514158034145
def _branin(self, X: Tensor) -> Tensor:
x1 = X[..., 0]
x2 = X[..., 1]
s = X[..., 2]
x11 = 15 * x1 - 5
x22 = 15 * x2
b = 5.1 / (4 * math.pi**2) - 0.01 * (1 - s)
c = 5 / math.pi - 0.1 * (1 - s)
r = 6
t = 1 / (8 * math.pi) + 0.05 * (1 - s)
y = (x22 - b * x11**2 + c * x11 - r) ** 2 + 10 * (1 - t) * torch.cos(x11) + 10
B = 21 - y
return B / 22
def _currin(self, X: Tensor) -> Tensor:
x1 = X[..., 0]
x2 = X[..., 1]
s = X[..., 2]
A = 2300 * x1**3 + 1900 * x1**2 + 2092 * x1 + 60
B = 100 * x1**3 + 500 * x1**2 + 4 * x1 + 20
y = (1 - 0.1 * (1 - s) * torch.exp(-1 / (2 * x2))) * A / B
C = -y + 14
return C / 15
def evaluate_true(self, X: Tensor) -> Tensor:
branin = self._branin(X)
currin = self._currin(X)
return torch.stack([-branin, -currin], dim=-1)
class MOMFPark(MultiObjectiveTestProblem):
r"""Modified Park test functions for multi-objective multi-fidelity optimization.
(4+1)-dimensional function with domain `[0,1]^5` where the last dimension
is the fidelity parameter `s`. See [Irshad2021]_ for more details.
The first modified Park function is
P1(x, s)=A*(T1(x,s)+T2(x,s)-B)/22-0.8
The second modified Park function is
P2(x,s)=A*(5-2/3*exp(x1+x2)-x4*sin(x3)*A+x3-B)/4 - 0.7
Here
T_1(x,s) = (x1+0.001*(1-s))/2*sqrt(1+(x2+x3**2)*x4/(x1**2))
T_2(x, s) = (x1+3*x4)*exp(1+sin(x3))
and `A(s)=(0.9+0.1*s)`, `B(s)=0.1*(1-s)`.
"""
dim = 5
num_objectives = 2
_bounds = [(0.0, 1.0) for _ in range(dim)]
_ref_point = [0, 0]
_max_hv = 0.08551927363087991
def _transform(self, X: Tensor) -> Tensor:
x1 = X[..., 0]
x2 = X[..., 1]
x3 = X[..., 2]
x4 = X[..., 3]
s = X[..., 4]
_x1 = 1 - 2 * (x1 - 0.6) ** 2
_x2 = x2
_x3 = 1 - 3 * (x3 - 0.5) ** 2
_x4 = 1 - (x4 - 0.8) ** 2
return torch.stack([_x1, _x2, _x3, _x4, s], dim=-1)
def _park1(self, X: Tensor) -> Tensor:
x1 = X[..., 0]
x2 = X[..., 1]
x3 = X[..., 2]
x4 = X[..., 3]
s = X[..., 4]
T1 = (
(x1 + 1e-3 * (1 - s))
/ 2
* torch.sqrt(1 + (x2 + x3**2) * x4 / (x1**2 + 1e-4))
)
T2 = (x1 + 3 * x4) * torch.exp(1 + torch.sin(x3))
A = 0.9 + 0.1 * s
B = 0.1 * (1 - s)
return A * (T1 + T2 - B) / 22 - 0.8
def _park2(self, X: Tensor) -> Tensor:
x1 = X[..., 0]
x2 = X[..., 1]
x3 = X[..., 2]
x4 = X[..., 3]
s = X[..., 4]
A = 0.9 + 0.1 * s
B = 0.1 * (1 - s)
return (
A * (5 - 2 / 3 * torch.exp(x1 + x2) + x4 * torch.sin(x3) * A - x3 + B) / 4
- 0.7
)
def evaluate_true(self, X: Tensor) -> Tensor:
X = self._transform(X)
park1 = self._park1(X)
park2 = self._park2(X)
return torch.stack([-park1, -park2], dim=-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.test_functions.multi_fidelity import (
AugmentedBranin,
AugmentedHartmann,
AugmentedRosenbrock,
)
from botorch.test_functions.multi_objective import (
BNH,
BraninCurrin,
C2DTLZ2,
CarSideImpact,
CONSTR,
ConstrainedBraninCurrin,
DiscBrake,
DTLZ1,
DTLZ2,
DTLZ3,
DTLZ4,
DTLZ5,
DTLZ7,
GMM,
MW7,
OSY,
Penicillin,
SRN,
ToyRobust,
VehicleSafety,
WeldedBeam,
ZDT1,
ZDT2,
ZDT3,
)
from botorch.test_functions.multi_objective_multi_fidelity import (
MOMFBraninCurrin,
MOMFPark,
)
from botorch.test_functions.synthetic import (
Ackley,
Beale,
Branin,
Bukin,
Cosine8,
DixonPrice,
DropWave,
EggHolder,
Griewank,
Hartmann,
HolderTable,
Levy,
Michalewicz,
Powell,
PressureVessel,
Rastrigin,
Rosenbrock,
Shekel,
SixHumpCamel,
SpeedReducer,
StyblinskiTang,
SyntheticTestFunction,
TensionCompressionString,
ThreeHumpCamel,
WeldedBeamSO,
)
__all__ = [
"Ackley",
"AugmentedBranin",
"AugmentedHartmann",
"AugmentedRosenbrock",
"Beale",
"BNH",
"Branin",
"BraninCurrin",
"Bukin",
"CONSTR",
"Cosine8",
"CarSideImpact",
"ConstrainedBraninCurrin",
"C2DTLZ2",
"DiscBrake",
"DixonPrice",
"DropWave",
"DTLZ1",
"DTLZ2",
"DTLZ3",
"DTLZ4",
"DTLZ5",
"DTLZ7",
"EggHolder",
"GMM",
"Griewank",
"Hartmann",
"HolderTable",
"Levy",
"Michalewicz",
"MW7",
"OSY",
"Penicillin",
"Powell",
"PressureVessel",
"Rastrigin",
"Rosenbrock",
"Shekel",
"SixHumpCamel",
"SpeedReducer",
"SRN",
"StyblinskiTang",
"SyntheticTestFunction",
"TensionCompressionString",
"ThreeHumpCamel",
"ToyRobust",
"VehicleSafety",
"WeldedBeam",
"WeldedBeamSO",
"ZDT1",
"ZDT2",
"ZDT3",
"MOMFBraninCurrin",
"MOMFPark",
]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional, Tuple
import torch
from botorch.test_functions.synthetic import SyntheticTestFunction
from torch import Tensor
class Ishigami(SyntheticTestFunction):
r"""Ishigami test function.
three-dimensional function (usually evaluated on `[-pi, pi]^3`):
f(x) = sin(x_1) + a sin(x_2)^2 + b x_3^4 sin(x_1)
Here `a` and `b` are constants where a=7 and b=0.1 or b=0.05
Proposed to test sensitivity analysis methods because it exhibits strong
nonlinearity and nonmonotonicity and a peculiar dependence on x_3.
"""
def __init__(
self, b: float = 0.1, noise_std: Optional[float] = None, negate: bool = False
) -> None:
r"""
Args:
b: the b constant, should be 0.1 or 0.05.
noise_std: Standard deviation of the observation noise.
negative: If True, negative the objective.
"""
self._optimizers = None
if b not in (0.1, 0.05):
raise ValueError("b parameter should be 0.1 or 0.05")
self.dim = 3
if b == 0.1:
self.si = [0.3138, 0.4424, 0]
self.si_t = [0.558, 0.442, 0.244]
self.s_ij = [0, 0.244, 0]
self.dgsm_gradient = [-0.0004, -0.0004, -0.0004]
self.dgsm_gradient_abs = [1.9, 4.45, 1.97]
self.dgsm_gradient_square = [7.7, 24.5, 11]
elif b == 0.05:
self.si = [0.218, 0.687, 0]
self.si_t = [0.3131, 0.6868, 0.095]
self.s_ij = [0, 0.094, 0]
self.dgsm_gradient = [-0.0002, -0.0002, -0.0002]
self.dgsm_gradient_abs = [1.26, 4.45, 1.97]
self.dgsm_gradient_square = [2.8, 24.5, 11]
self._bounds = [(-math.pi, math.pi) for _ in range(self.dim)]
self.b = b
super().__init__(noise_std=noise_std, negate=negate)
@property
def _optimal_value(self) -> float:
raise NotImplementedError
def compute_dgsm(self, X: Tensor) -> Tuple[List[float], List[float], List[float]]:
r"""Compute derivative global sensitivity measures.
This function can be called separately to estimate the dgsm measure
The exact global integrals of these values are already added under
as attributes dgsm_gradient, dgsm_gradient_bas, and dgsm_gradient_square.
Args:
X: Set of points at which to compute derivative measures.
Returns: The average gradient, absolute gradient, and square gradients.
"""
dx_1 = torch.cos(X[..., 0]) * (1 + self.b * (X[..., 2] ** 4))
dx_2 = 14 * torch.cos(X[..., 1]) * torch.sin(X[..., 1])
dx_3 = 0.4 * (X[..., 2] ** 3) * torch.sin(X[..., 0])
gradient_measure = [
torch.mean(dx_1).item(),
torch.mean(dx_1).item(),
torch.mean(dx_1).item(),
]
gradient_absolute_measure = [
torch.mean(torch.abs(dx_1)).item(),
torch.mean(torch.abs(dx_2)).item(),
torch.mean(torch.abs(dx_3)).item(),
]
gradient_square_measure = [
torch.mean(torch.pow(dx_1, 2)).item(),
torch.mean(torch.pow(dx_2, 2)).item(),
torch.mean(torch.pow(dx_3, 2)).item(),
]
return gradient_measure, gradient_absolute_measure, gradient_square_measure
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
t = (
torch.sin(X[..., 0])
+ 7 * (torch.sin(X[..., 1]) ** 2)
+ self.b * (X[..., 2] ** 4) * torch.sin(X[..., 0])
)
return t
class Gsobol(SyntheticTestFunction):
r"""Gsobol test function.
d-dimensional function (usually evaluated on `[0, 1]^d`):
f(x) = Prod_{i=1}\^{d} ((\|4x_i-2\|+a_i)/(1+a_i)), a_i >=0
common combinations of dimension and a vector:
dim=8, a= [0, 1, 4.5, 9, 99, 99, 99, 99]
dim=6, a=[0, 0.5, 3, 9, 99, 99]
dim = 15, a= [1, 2, 5, 10, 20, 50, 100, 500, 1000, ..., 1000]
Proposed to test sensitivity analysis methods
First order Sobol indices have closed form expression S_i=V_i/V with :
V_i= 1/(3(1+a_i)\^2)
V= Prod_{i=1}\^{d} (1+V_i) - 1
"""
def __init__(
self,
dim: int,
a: List = None,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
r"""
Args:
dim: Dimensionality of the problem. If 6, 8, or 15, will use standard a.
a: a parameter, unless dim is 6, 8, or 15.
noise_std: Standard deviation of observation noise.
negate: Return negatie of function.
"""
self._optimizers = None
self.dim = dim
self._bounds = [(0, 1) for _ in range(self.dim)]
if self.dim == 6:
self.a = [0, 0.5, 3, 9, 99, 99]
elif self.dim == 8:
self.a = [0, 1, 4.5, 9, 99, 99, 99, 99]
elif self.dim == 15:
self.a = [
1,
2,
5,
10,
20,
50,
100,
500,
1000,
1000,
1000,
1000,
1000,
1000,
1000,
]
else:
self.a = a
self.optimal_sobol_indicies()
super().__init__(noise_std=noise_std, negate=negate)
@property
def _optimal_value(self) -> float:
raise NotImplementedError
def optimal_sobol_indicies(self):
vi = []
for i in range(self.dim):
vi.append(1 / (3 * ((1 + self.a[i]) ** 2)))
self.vi = Tensor(vi)
self.V = torch.prod((1 + self.vi)) - 1
self.si = self.vi / self.V
si_t = []
for i in range(self.dim):
si_t.append(
(
self.vi[i]
* torch.prod(self.vi[:i] + 1)
* torch.prod(self.vi[i + 1 :] + 1)
)
/ self.V
)
self.si_t = Tensor(si_t)
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
t = 1
for i in range(self.dim):
t = t * (torch.abs(4 * X[..., i] - 2) + self.a[i]) / (1 + self.a[i])
return t
class Morris(SyntheticTestFunction):
r"""Morris test function.
20-dimensional function (usually evaluated on `[0, 1]^20`):
f(x) = sum_{i=1}\^20 beta_i w_i + sum_{i<j}\^20 beta_ij w_i w_j
+ sum_{i<j<l}\^20 beta_ijl w_i w_j w_l + 5w_1 w_2 w_3 w_4
Proposed to test sensitivity analysis methods
"""
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""
Args:
noise_std: Standard deviation of observation noise.
negate: Return negative of function.
"""
self._optimizers = None
self.dim = 20
self._bounds = [(0, 1) for _ in range(self.dim)]
self.si = [
0.005,
0.008,
0.017,
0.009,
0.016,
0,
0.069,
0.1,
0.15,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
super().__init__(noise_std=noise_std, negate=negate)
@property
def _optimal_value(self) -> float:
raise NotImplementedError
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
W = []
t1 = 0
t2 = 0
t3 = 0
for i in range(self.dim):
if i in [2, 4, 6]:
wi = 2 * (1.1 * X[..., i] / (X[..., i] + 0.1) - 0.5)
else:
wi = 2 * (X[..., i] - 0.5)
W.append(wi)
if i < 10:
betai = 20
else:
betai = (-1) ** (i + 1)
t1 = t1 + betai * wi
for i in range(self.dim):
for j in range(i + 1, self.dim):
if i < 6 or j < 6:
beta_ij = -15
else:
beta_ij = (-1) ** (i + j + 2)
t2 = t2 + beta_ij * W[i] * W[j]
for k in range(j + 1, self.dim):
if i < 5 or j < 5 or k < 5:
beta_ijk = -10
else:
beta_ijk = 0
t3 = t3 + beta_ijk * W[i] * W[j] * W[k]
t4 = 5 * W[0] * W[1] * W[2] * W[3]
return t1 + t2 + t3 + t4
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional, Tuple
import torch
from torch import Tensor
def round_nearest(
X: Tensor, increment: float, bounds: Optional[Tuple[float, float]]
) -> Tensor:
r"""Rounds the input tensor to the nearest multiple of `increment`.
Args:
X: The input to be rounded.
increment: The increment to round to.
bounds: An optional tuple of two floats representing the lower and upper
bounds on `X`. If provided, this will round to the nearest multiple
of `increment` that lies within the bounds.
Returns:
The rounded input.
"""
X_round = torch.round(X / increment) * increment
if bounds is not None:
X_round = torch.where(X_round < bounds[0], X_round + increment, X_round)
X_round = torch.where(X_round > bounds[1], X_round - increment, X_round)
return X_round
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Base class for test functions for optimization benchmarks.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Optional, Tuple
import torch
from botorch.exceptions.errors import InputDataError
from torch import Tensor
from torch.nn import Module
class BaseTestProblem(Module, ABC):
r"""Base class for test functions."""
dim: int
_bounds: List[Tuple[float, float]]
_check_grad_at_opt: bool = True
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""Base constructor for test functions.
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
super().__init__()
self.noise_std = noise_std
self.negate = negate
if len(self._bounds) != self.dim:
raise InputDataError(
"Expected the bounds to match the dimensionality of the domain. "
f"Got {self.dim=} and {len(self._bounds)=}."
)
self.register_buffer(
"bounds", torch.tensor(self._bounds, dtype=torch.double).transpose(-1, -2)
)
def forward(self, X: Tensor, noise: bool = True) -> Tensor:
r"""Evaluate the function on a set of points.
Args:
X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the
function.
noise: If `True`, add observation noise as specified by `noise_std`.
Returns:
A `batch_shape`-dim tensor ouf function evaluations.
"""
batch = X.ndimension() > 1
X = X if batch else X.unsqueeze(0)
f = self.evaluate_true(X=X)
if noise and self.noise_std is not None:
f += self.noise_std * torch.randn_like(f)
if self.negate:
f = -f
return f if batch else f.squeeze(0)
@abstractmethod
def evaluate_true(self, X: Tensor) -> Tensor:
r"""Evaluate the function (w/o observation noise) on a set of points."""
pass # pragma: no cover
class ConstrainedBaseTestProblem(BaseTestProblem, ABC):
r"""Base class for test functions with constraints.
In addition to one or more objectives, a problem may have a number of outcome
constraints of the form `c_i(x) >= 0` for `i=1, ..., n_c`.
This base class provides common functionality for such problems.
"""
num_constraints: int
_check_grad_at_opt: bool = False
def evaluate_slack(self, X: Tensor, noise: bool = True) -> Tensor:
r"""Evaluate the constraint slack on a set of points.
Constraints `i` is assumed to be feasible at `x` if the associated slack
`c_i(x)` is positive. Zero slack means that the constraint is active. Negative
slack means that the constraint is violated.
Args:
X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the
constraint slacks: `c_1(X), ...., c_{n_c}(X)`.
noise: If `True`, add observation noise to the slack as specified by
`noise_std`.
Returns:
A `batch_shape x n_c`-dim tensor of constraint slack (where positive slack
corresponds to the constraint being feasible).
"""
cons = self.evaluate_slack_true(X=X)
if noise and self.noise_std is not None:
# TODO: Allow different noise levels for objective and constraints (and
# different noise levels between different constraints)
cons += self.noise_std * torch.randn_like(cons)
return cons
def is_feasible(self, X: Tensor, noise: bool = True) -> Tensor:
r"""Evaluate whether the constraints are feasible on a set of points.
Args:
X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the
constraints.
noise: If `True`, add observation noise as specified by `noise_std`.
Returns:
A `batch_shape`-dim boolean tensor that is `True` iff all constraint
slacks (potentially including observation noise) are positive.
"""
return (self.evaluate_slack(X=X, noise=noise) >= 0.0).all(dim=-1)
@abstractmethod
def evaluate_slack_true(self, X: Tensor) -> Tensor:
r"""Evaluate the constraint slack (w/o observation noise) on a set of points.
Args:
X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the
constraint slacks: `c_1(X), ...., c_{n_c}(X)`.
Returns:
A `batch_shape x n_c`-dim tensor of constraint slack (where positive slack
corresponds to the constraint being feasible).
"""
pass # pragma: no cover
class MultiObjectiveTestProblem(BaseTestProblem):
r"""Base class for test multi-objective test functions.
TODO: add a pareto distance function that returns the distance
between a provided point and the closest point on the true pareto front.
"""
num_objectives: int
_ref_point: List[float]
_max_hv: float
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""Base constructor for multi-objective test functions.
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
"""
super().__init__(noise_std=noise_std, negate=negate)
ref_point = torch.tensor(self._ref_point, dtype=torch.float)
if negate:
ref_point *= -1
self.register_buffer("ref_point", ref_point)
@property
def max_hv(self) -> float:
try:
return self._max_hv
except AttributeError:
raise NotImplementedError(
f"Problem {self.__class__.__name__} does not specify maximal "
"hypervolume."
)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points."""
raise NotImplementedError
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Synthetic functions for multi-fidelity optimization benchmarks.
"""
from __future__ import annotations
import math
from typing import Optional
import torch
from botorch.test_functions.synthetic import SyntheticTestFunction
from torch import Tensor
class AugmentedBranin(SyntheticTestFunction):
r"""Augmented Branin test function for multi-fidelity optimization.
3-dimensional function with domain `[-5, 10] x [0, 15] * [0,1]`, where
the last dimension of is the fidelity parameter:
B(x) = (x_2 - (b - 0.1 * (1 - x_3))x_1^2 + c x_1 - r)^2 +
10 (1-t) cos(x_1) + 10
Here `b`, `c`, `r` and `t` are constants where `b = 5.1 / (4 * math.pi ** 2)`
`c = 5 / math.pi`, `r = 6`, `t = 1 / (8 * math.pi)`.
B has infinitely many minimizers with `x_1 = -pi, pi, 3pi`
and `B_min = 0.397887`
"""
dim = 3
_bounds = [(-5.0, 10.0), (0.0, 15.0), (0.0, 1.0)]
_optimal_value = 0.397887
_optimizers = [ # this is a subset, ther are infinitely many optimizers
(-math.pi, 12.275, 1),
(math.pi, 1.3867356039019576, 0.1),
(math.pi, 1.781519779945532, 0.5),
(math.pi, 2.1763039559891064, 0.9),
]
def evaluate_true(self, X: Tensor) -> Tensor:
t1 = (
X[..., 1]
- (5.1 / (4 * math.pi**2) - 0.1 * (1 - X[:, 2])) * X[:, 0] ** 2
+ 5 / math.pi * X[..., 0]
- 6
)
t2 = 10 * (1 - 1 / (8 * math.pi)) * torch.cos(X[..., 0])
return t1**2 + t2 + 10
class AugmentedHartmann(SyntheticTestFunction):
r"""Augmented Hartmann synthetic test function.
7-dimensional function (typically evaluated on `[0, 1]^7`), where the last
dimension is the fidelity parameter.
H(x) = -(ALPHA_1 - 0.1 * (1-x_7)) * exp(- sum_{j=1}^6 A_1j (x_j - P_1j) ** 2) -
sum_{i=2}^4 ALPHA_i exp( - sum_{j=1}^6 A_ij (x_j - P_ij) ** 2)
H has a unique global minimizer
`x = [0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1.0]`
with `H_min = -3.32237`
"""
dim = 7
_bounds = [(0.0, 1.0) for _ in range(7)]
_optimal_value = -3.32237
_optimizers = [(0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1.0)]
_check_grad_at_opt = False
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
self.register_buffer("ALPHA", torch.tensor([1.0, 1.2, 3.0, 3.2]))
A = [
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
]
P = [
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
]
self.register_buffer("A", torch.tensor(A, dtype=torch.float))
self.register_buffer("P", torch.tensor(P, dtype=torch.float))
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
inner_sum = torch.sum(
self.A * (X[..., :6].unsqueeze(-2) - 0.0001 * self.P) ** 2, dim=-1
)
alpha1 = self.ALPHA[0] - 0.1 * (1 - X[..., 6])
H = (
-(torch.sum(self.ALPHA[1:] * torch.exp(-inner_sum)[..., 1:], dim=-1))
- alpha1 * torch.exp(-inner_sum)[..., 0]
)
return H
class AugmentedRosenbrock(SyntheticTestFunction):
r"""Augmented Rosenbrock synthetic test function for multi-fidelity optimization.
d-dimensional function (usually evaluated on `[-5, 10]^(d-2) * [0, 1]^2`),
where the last two dimensions are the fidelity parameters:
f(x) = sum_{i=1}^{d-1} (100 (x_{i+1} - x_i^2 + 0.1 * (1-x_{d-1}))^2 +
(x_i - 1 + 0.1 * (1 - x_d)^2)^2)
f has one minimizer for its global minimum at `z_1 = (1, 1, ..., 1)` with
`f(z_i) = 0.0`.
"""
_optimal_value = 0.0
def __init__(
self, dim=3, noise_std: Optional[float] = None, negate: bool = False
) -> None:
r"""
Args:
dim: The (input) dimension. Must be at least 3.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
if dim < 3:
raise ValueError(
"AugmentedRosenbrock must be defined it at least 3 dimensions"
)
self.dim = dim
self._bounds = [(-5.0, 10.0) for _ in range(self.dim)]
self._optimizers = [tuple(1.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate)
def evaluate_true(self, X: Tensor) -> Tensor:
X_curr = X[..., :-3]
X_next = X[..., 1:-2]
t1 = 100 * (X_next - X_curr**2 + 0.1 * (1 - X[..., -2:-1])) ** 2
t2 = (X_curr - 1 + 0.1 * (1 - X[..., -1:]) ** 2) ** 2
return -((t1 + t2).sum(dim=-1))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Preference acquisition functions. This includes:
Analytical EUBO acquisition function as introduced in [Lin2022preference]_.
.. [Lin2022preference]
Lin, Z.J., Astudillo, R., Frazier, P.I. and Bakshy, E. Preference Exploration
for Efficient Bayesian Optimization with Multiple Outcomes. International
Conference on Artificial Intelligence and Statistics (AISTATS), 2022.
.. [Houlsby2011bald]
Houlsby, N., Huszár, F., Ghahramani, Z. and Lengyel, M.
Bayesian Active Learning for Gaussian Process Classification.
NIPS Workshop on Bayesian optimization, experimental design and bandits:
Theory and applications, 2011.
"""
from __future__ import annotations
from typing import Any, Optional
import torch
from botorch.acquisition import AnalyticAcquisitionFunction
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.exceptions.errors import UnsupportedError
from botorch.models.deterministic import DeterministicModel
from botorch.models.model import Model
from botorch.utils.transforms import match_batch_shape, t_batch_mode_transform
from torch import Tensor
from torch.distributions import Bernoulli, Normal
SIGMA_JITTER = 1e-8
class AnalyticExpectedUtilityOfBestOption(AnalyticAcquisitionFunction):
r"""Analytic Prefential Expected Utility of Best Options, i.e., Analytical EUBO"""
def __init__(
self,
pref_model: Model,
outcome_model: Optional[DeterministicModel] = None,
previous_winner: Optional[Tensor] = None,
) -> None:
r"""Analytic implementation of Expected Utility of the Best Option under the
Laplace model (assumes a PairwiseGP is used as the preference model) as
proposed in [Lin2022preference]_.
Args:
pref_model: The preference model that maps the outcomes (i.e., Y) to
scalar-valued utility.
outcome_model: A deterministic model that maps parameters (i.e., X) to
outcomes (i.e., Y). The outcome model f defines the search space of
Y = f(X). If model is None, we are directly calculating EUBO on
the parameter space. When used with `OneSamplePosteriorDrawModel`,
we are obtaining EUBO-zeta as described in [Lin2022preference]_.
previous_winner: Tensor representing the previous winner in the Y space.
"""
super().__init__(model=pref_model)
# ensure the model is in eval mode
self.add_module("outcome_model", outcome_model)
self.register_buffer("previous_winner", previous_winner)
tkwargs = {
"dtype": pref_model.datapoints.dtype,
"device": pref_model.datapoints.device,
}
std_norm = torch.distributions.normal.Normal(
torch.zeros(1, **tkwargs),
torch.ones(1, **tkwargs),
)
self.std_norm = std_norm
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate analytical EUBO on the candidate set X.
Args:
X: A `batch_shape x q x d`-dim Tensor, where `q = 2` if `previous_winner`
is not `None`, and `q = 1` otherwise.
Returns:
The acquisition value for each batch as a tensor of shape `batch_shape`.
"""
if not (
((X.shape[-2] == 2) and (self.previous_winner is None))
or ((X.shape[-2] == 1) and (self.previous_winner is not None))
):
raise UnsupportedError(
f"{self.__class__.__name__} only support q=2 or q=1"
"with a previous winner specified"
)
Y = X if self.outcome_model is None else self.outcome_model(X)
if self.previous_winner is not None:
Y = torch.cat([Y, match_batch_shape(self.previous_winner, Y)], dim=-2)
pref_posterior = self.model.posterior(Y)
pref_mean = pref_posterior.mean.squeeze(-1)
pref_cov = pref_posterior.covariance_matrix
delta = pref_mean[..., 0] - pref_mean[..., 1]
w = torch.tensor([1.0, -1.0], dtype=pref_cov.dtype, device=pref_cov.device)
var = w @ pref_cov @ w
sigma = torch.sqrt(var.clamp(min=SIGMA_JITTER))
u = delta / sigma
ucdf = self.std_norm.cdf(u)
updf = torch.exp(self.std_norm.log_prob(u))
acqf_val = sigma * (updf + u * ucdf)
if self.previous_winner is None:
acqf_val = acqf_val + pref_mean[..., 1]
return acqf_val
class PairwiseBayesianActiveLearningByDisagreement(MCAcquisitionFunction):
r"""MC Bayesian Active Learning by Disagreement"""
def __init__(
self,
pref_model: Model,
outcome_model: Optional[DeterministicModel] = None,
num_samples: Optional[int] = 1024,
std_noise: Optional[float] = 0.0,
**kwargs: Any,
) -> None:
"""
Monte Carlo implementation of Bayesian Active Learning by Disagreement (BALD)
proposed in [Houlsby2011bald]_.
Args:
pref_model: The preference model that maps the outcomes (i.e., Y) to
scalar-valued utility.
outcome_model: A deterministic model that maps parameters (i.e., X) to
outcomes (i.e., Y). The outcome model f defines the search space of
Y = f(X). If model is None, we are directly calculating BALD on
the parameter space.
num_samples: number of samples to approximate the conditional_entropy.
std_noise: Additional observational noise to include. Defaults to 0.
"""
super().__init__(model=pref_model)
# ensure the model is in eval mode
self.add_module("outcome_model", outcome_model)
self.num_samples = num_samples
# assuming the relative observation noise is fixed at 1.0 (e.g., in PairwiseGP)
self.std_noise = std_noise
self.std_normal = Normal(0, 1)
@t_batch_mode_transform(expected_q=2)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate MC BALD on the candidate set `X`.
Args:
X: A `batch_shape x 2 x d`-dim Tensor of t-batches with `q=2`
`d`-dim design points each.
Returns:
A `batch_shape'`-dim Tensor of MC BALD values at the given
design points pair `X`, where `batch_shape'` is the broadcasted
batch shape of model and input `X`.
"""
Y = X if self.outcome_model is None else self.outcome_model(X)
pref_posterior = self.model.posterior(Y)
pref_mean = pref_posterior.mean.squeeze(-1)
pref_cov = pref_posterior.covariance_matrix
mu = pref_mean[..., 0] - pref_mean[..., 1]
w = torch.tensor([1.0, -1.0], dtype=pref_cov.dtype, device=pref_cov.device)
var = 2 * self.std_noise + w @ pref_cov @ w
sigma = torch.sqrt(var.clamp(min=SIGMA_JITTER))
# eq (3) in Houlsby, et al. (2011)
posterior_entropies = Bernoulli(
self.std_normal.cdf(mu / torch.sqrt(var + 1))
).entropy()
# Sample-based approx to eq (4) in Houlsby, et al. (2011)
obj_samples = self.std_normal.cdf(
Normal(loc=mu, scale=sigma).rsample(torch.Size([self.num_samples]))
)
sample_entropies = Bernoulli(obj_samples).entropy()
conditional_entropies = sample_entropies.mean(dim=0)
return posterior_entropies - conditional_entropies
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Modules to add regularization to acquisition functions.
"""
from __future__ import annotations
import math
from typing import Any, Callable, List, Optional
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import AnalyticAcquisitionFunction
from botorch.acquisition.objective import GenericMCObjective
from botorch.exceptions import UnsupportedError
from torch import Tensor
class L2Penalty(torch.nn.Module):
r"""L2 penalty class to be added to any arbitrary acquisition function
to construct a PenalizedAcquisitionFunction."""
def __init__(self, init_point: Tensor):
r"""Initializing L2 regularization.
Args:
init_point: The "1 x dim" reference point against which
we want to regularize.
"""
super().__init__()
self.init_point = init_point
def forward(self, X: Tensor) -> Tensor:
r"""
Args:
X: A "batch_shape x q x dim" representing the points to be evaluated.
Returns:
A tensor of size "batch_shape" representing the acqfn for each q-batch.
"""
regularization_term = (
torch.linalg.norm((X - self.init_point), ord=2, dim=-1).max(dim=-1).values
** 2
)
return regularization_term
class L1Penalty(torch.nn.Module):
r"""L1 penalty class to be added to any arbitrary acquisition function
to construct a PenalizedAcquisitionFunction."""
def __init__(self, init_point: Tensor):
r"""Initializing L1 regularization.
Args:
init_point: The "1 x dim" reference point against which
we want to regularize.
"""
super().__init__()
self.init_point = init_point
def forward(self, X: Tensor) -> Tensor:
r"""
Args:
X: A "batch_shape x q x dim" representing the points to be evaluated.
Returns:
A tensor of size "batch_shape" representing the acqfn for each q-batch.
"""
regularization_term = (
torch.linalg.norm((X - self.init_point), ord=1, dim=-1).max(dim=-1).values
)
return regularization_term
class GaussianPenalty(torch.nn.Module):
r"""Gaussian penalty class to be added to any arbitrary acquisition function
to construct a PenalizedAcquisitionFunction."""
def __init__(self, init_point: Tensor, sigma: float):
r"""Initializing Gaussian regularization.
Args:
init_point: The "1 x dim" reference point against which
we want to regularize.
sigma: The parameter used in gaussian function.
"""
super().__init__()
self.init_point = init_point
self.sigma = sigma
def forward(self, X: Tensor) -> Tensor:
r"""
Args:
X: A "batch_shape x q x dim" representing the points to be evaluated.
Returns:
A tensor of size "batch_shape" representing the acqfn for each q-batch.
"""
sq_diff = torch.linalg.norm((X - self.init_point), ord=2, dim=-1) ** 2
pdf = torch.exp(sq_diff / 2 / self.sigma**2)
regularization_term = pdf.max(dim=-1).values
return regularization_term
class GroupLassoPenalty(torch.nn.Module):
r"""Group lasso penalty class to be added to any arbitrary acquisition function
to construct a PenalizedAcquisitionFunction."""
def __init__(self, init_point: Tensor, groups: List[List[int]]):
r"""Initializing Group-Lasso regularization.
Args:
init_point: The "1 x dim" reference point against which we want
to regularize.
groups: Groups of indices used in group lasso.
"""
super().__init__()
self.init_point = init_point
self.groups = groups
def forward(self, X: Tensor) -> Tensor:
r"""
X should be batch_shape x 1 x dim tensor. Evaluation for q-batch is not
implemented yet.
"""
if X.shape[-2] != 1:
raise NotImplementedError(
"group-lasso has not been implemented for q>1 yet."
)
regularization_term = group_lasso_regularizer(
X=X.squeeze(-2) - self.init_point, groups=self.groups
)
return regularization_term
def narrow_gaussian(X: Tensor, a: Tensor) -> Tensor:
return torch.exp(-0.5 * (X / a) ** 2)
def nnz_approx(X: Tensor, target_point: Tensor, a: Tensor) -> Tensor:
r"""Differentiable relaxation of ||X - target_point||_0
Args:
X: An `n x d` tensor of inputs.
target_point: A tensor of size `n` corresponding to the target point.
a: A scalar tensor that controls the differentiable relaxation.
"""
d = X.shape[-1]
if d != target_point.shape[-1]:
raise ValueError("X and target_point have different shapes.")
return d - narrow_gaussian(X - target_point, a).sum(dim=-1, keepdim=True)
class L0Approximation(torch.nn.Module):
r"""Differentiable relaxation of the L0 norm using a Gaussian basis function."""
def __init__(self, target_point: Tensor, a: float = 1.0, **tkwargs: Any) -> None:
r"""Initializing L0 penalty with differentiable relaxation.
Args:
target_point: A tensor corresponding to the target point.
a: A hyperparameter that controls the differentiable relaxation.
"""
super().__init__()
self.target_point = target_point
# hyperparameter to control the differentiable relaxation in L0 norm function.
self.register_buffer("a", torch.tensor(a, **tkwargs))
def __call__(self, X: Tensor) -> Tensor:
return nnz_approx(X=X, target_point=self.target_point, a=self.a)
class L0PenaltyApprox(L0Approximation):
r"""Differentiable relaxation of the L0 norm to be added to any arbitrary
acquisition function to construct a PenalizedAcquisitionFunction."""
def __init__(self, target_point: Tensor, a: float = 1.0, **tkwargs: Any) -> None:
r"""Initializing L0 penalty with differentiable relaxation.
Args:
target_point: A tensor corresponding to the target point.
a: A hyperparameter that controls the differentiable relaxation.
"""
super().__init__(target_point=target_point, a=a, **tkwargs)
def __call__(self, X: Tensor) -> Tensor:
r"""
Args:
X: A "batch_shape x q x dim" representing the points to be evaluated.
Returns:
A tensor of size "batch_shape" representing the acqfn for each q-batch.
"""
return super().__call__(X=X).squeeze(dim=-1).min(dim=-1).values
class PenalizedAcquisitionFunction(AcquisitionFunction):
r"""Single-outcome acquisition function regularized by the given penalty.
The usage is similar to:
raw_acqf = NoisyExpectedImprovement(...)
penalty = GroupLassoPenalty(...)
acqf = PenalizedAcquisitionFunction(raw_acqf, penalty)
"""
def __init__(
self,
raw_acqf: AcquisitionFunction,
penalty_func: torch.nn.Module,
regularization_parameter: float,
) -> None:
r"""Initializing Group-Lasso regularization.
Args:
raw_acqf: The raw acquisition function that is going to be regularized.
penalty_func: The regularization function.
regularization_parameter: Regularization parameter used in optimization.
"""
super().__init__(model=raw_acqf.model)
self.raw_acqf = raw_acqf
self.penalty_func = penalty_func
self.regularization_parameter = regularization_parameter
def forward(self, X: Tensor) -> Tensor:
raw_value = self.raw_acqf(X=X)
penalty_term = self.penalty_func(X)
return raw_value - self.regularization_parameter * penalty_term
@property
def X_pending(self) -> Optional[Tensor]:
return self.raw_acqf.X_pending
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
if not isinstance(self.raw_acqf, AnalyticAcquisitionFunction):
self.raw_acqf.set_X_pending(X_pending=X_pending)
else:
raise UnsupportedError(
"The raw acquisition function is Analytic and does not account "
"for X_pending yet."
)
def group_lasso_regularizer(X: Tensor, groups: List[List[int]]) -> Tensor:
r"""Computes the group lasso regularization function for the given point.
Args:
X: A bxd tensor representing the points to evaluate the regularization at.
groups: List of indices of different groups.
Returns:
Computed group lasso norm of at the given points.
"""
return torch.sum(
torch.stack(
[
math.sqrt(len(g)) * torch.linalg.norm(X[..., g], ord=2, dim=-1)
for g in groups
],
dim=-1,
),
dim=-1,
)
class L1PenaltyObjective(torch.nn.Module):
r"""
L1 penalty objective class. An instance of this class can be added to any
arbitrary objective to construct a PenalizedMCObjective.
"""
def __init__(self, init_point: Tensor):
r"""Initializing L1 penalty objective.
Args:
init_point: The "1 x dim" reference point against which
we want to regularize.
"""
super().__init__()
self.init_point = init_point
def forward(self, X: Tensor) -> Tensor:
r"""
Args:
X: A "batch_shape x q x dim" representing the points to be evaluated.
Returns:
A "1 x batch_shape x q" tensor representing the penalty for each point.
The first dimension corresponds to the dimension of MC samples.
"""
return torch.linalg.norm((X - self.init_point), ord=1, dim=-1).unsqueeze(dim=0)
class PenalizedMCObjective(GenericMCObjective):
r"""Penalized MC objective.
Allows to construct a penaltized MC-objective by adding a penalty term to
the original objective.
mc_acq(X) = objective(X) + penalty_objective(X)
Note: PenalizedMCObjective allows adding penalty at the MCObjective level,
different from the AcquisitionFunction level in PenalizedAcquisitionFunction.
Example:
>>> regularization_parameter = 0.01
>>> init_point = torch.zeros(3) # assume data dim is 3
>>> objective = lambda Y, X: torch.sqrt(Y).sum(dim=-1)
>>> l1_penalty_objective = L1PenaltyObjective(init_point=init_point)
>>> l1_penalized_objective = PenalizedMCObjective(
objective, l1_penalty_objective, regularization_parameter
)
>>> samples = sampler(posterior)
objective, l1_penalty_objective, regularization_parameter
"""
def __init__(
self,
objective: Callable[[Tensor, Optional[Tensor]], Tensor],
penalty_objective: torch.nn.Module,
regularization_parameter: float,
expand_dim: Optional[int] = None,
) -> None:
r"""Penalized MC objective.
Args:
objective: A callable `f(samples, X)` mapping a
`sample_shape x batch-shape x q x m`-dim Tensor `samples` and
an optional `batch-shape x q x d`-dim Tensor `X` to a
`sample_shape x batch-shape x q`-dim Tensor of objective values.
penalty_objective: A torch.nn.Module `f(X)` that takes in a
`batch-shape x q x d`-dim Tensor `X` and outputs a
`1 x batch-shape x q`-dim Tensor of penalty objective values.
regularization_parameter: weight of the penalty (regularization) term
expand_dim: dim to expand penalty_objective to match with objective when
fully bayesian model is used. If None, no expansion is performed.
"""
super().__init__(objective=objective)
self.penalty_objective = penalty_objective
self.regularization_parameter = regularization_parameter
self.expand_dim = expand_dim
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Evaluate the penalized objective on the samples.
Args:
samples: A `sample_shape x batch_shape x q x m`-dim Tensors of
samples from a model posterior.
X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if
the objective depends on the inputs explicitly.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of objective values
with penalty added for each point.
"""
obj = super().forward(samples=samples, X=X)
penalty_obj = self.penalty_objective(X)
# when fully bayesian model is used, we pass unmarginalize_dim to match the
# shape between obj `sample_shape x batch-shape x mcmc_samples x q` and
# penalty_obj `1 x batch-shape x q`
if self.expand_dim is not None:
# reshape penalty_obj to match the dim
penalty_obj = penalty_obj.unsqueeze(self.expand_dim)
return obj - self.regularization_parameter * penalty_obj
class L0PenaltyApproxObjective(L0Approximation):
r"""Differentiable relaxation of the L0 norm penalty objective class.
An instance of this class can be added to any arbitrary objective to
construct a PenalizedMCObjective.
"""
def __init__(self, target_point: Tensor, a: float = 1.0, **tkwargs: Any) -> None:
r"""Initializing L0 penalty with differentiable relaxation.
Args:
target_point: A tensor corresponding to the target point.
a: A hyperparameter that controls the differentiable relaxation.
"""
super().__init__(target_point=target_point, a=a, **tkwargs)
def __call__(self, X: Tensor) -> Tensor:
r"""
Args:
X: A "batch_shape x q x dim" representing the points to be evaluated.
Returns:
A "1 x batch_shape x q" tensor representing the penalty for each point.
The first dimension corresponds to the dimension of MC samples.
"""
return super().__call__(X=X).squeeze(dim=-1).unsqueeze(dim=0)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Objective Modules to be used with acquisition functions."""
from __future__ import annotations
import inspect
import warnings
from abc import ABC, abstractmethod
from typing import Callable, List, Optional, TYPE_CHECKING, Union
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.exceptions.warnings import InputDataWarning
from botorch.models.model import Model
from botorch.models.transforms.outcome import Standardize
from botorch.posteriors.gpytorch import GPyTorchPosterior, scalarize_posterior
from botorch.sampling import IIDNormalSampler, MCSampler
from botorch.utils import apply_constraints
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from linear_operator.operators.dense_linear_operator import to_linear_operator
from torch import Tensor
from torch.nn import Module
if TYPE_CHECKING:
from botorch.posteriors.posterior import Posterior # pragma: no cover
from botorch.posteriors.posterior_list import PosteriorList # pragma: no cover
class PosteriorTransform(Module, ABC):
r"""
Abstract base class for objectives that transform the posterior.
:meta private:
"""
@abstractmethod
def evaluate(self, Y: Tensor) -> Tensor:
r"""Evaluate the transform on a set of outcomes.
Args:
Y: A `batch_shape x q x m`-dim tensor of outcomes.
Returns:
A `batch_shape x q' [x m']`-dim tensor of transformed outcomes.
"""
pass # pragma: no cover
@abstractmethod
def forward(self, posterior) -> Posterior:
r"""Compute the transformed posterior.
Args:
posterior: The posterior to be transformed.
Returns:
The transformed posterior object.
"""
pass # pragma: no cover
# import DeterministicModel after PosteriorTransform to avoid circular import
from botorch.models.deterministic import DeterministicModel # noqa
class ScalarizedPosteriorTransform(PosteriorTransform):
r"""An affine posterior transform for scalarizing multi-output posteriors.
For a Gaussian posterior at a single point (`q=1`) with mean `mu` and
covariance matrix `Sigma`, this yields a single-output posterior with mean
`weights^T * mu` and variance `weights^T Sigma w`.
Example:
Example for a model with two outcomes:
>>> weights = torch.tensor([0.5, 0.25])
>>> posterior_transform = ScalarizedPosteriorTransform(weights)
>>> EI = ExpectedImprovement(
... model, best_f=0.1, posterior_transform=posterior_transform
... )
"""
scalarize: bool = True
def __init__(self, weights: Tensor, offset: float = 0.0) -> None:
r"""
Args:
weights: A one-dimensional tensor with `m` elements representing the
linear weights on the outputs.
offset: An offset to be added to posterior mean.
"""
if weights.dim() != 1:
raise ValueError("weights must be a one-dimensional tensor.")
super().__init__()
self.register_buffer("weights", weights)
self.offset = offset
def evaluate(self, Y: Tensor) -> Tensor:
r"""Evaluate the transform on a set of outcomes.
Args:
Y: A `batch_shape x q x m`-dim tensor of outcomes.
Returns:
A `batch_shape x q`-dim tensor of transformed outcomes.
"""
return self.offset + Y @ self.weights
def forward(
self, posterior: Union[GPyTorchPosterior, PosteriorList]
) -> GPyTorchPosterior:
r"""Compute the posterior of the affine transformation.
Args:
posterior: A posterior with the same number of outputs as the
elements in `self.weights`.
Returns:
A single-output posterior.
"""
return scalarize_posterior(
posterior=posterior, weights=self.weights, offset=self.offset
)
class ExpectationPosteriorTransform(PosteriorTransform):
r"""Transform the `batch x (q * n_w) x m` posterior into a `batch x q x m`
posterior of the expectation. The expectation is calculated over each
consecutive `n_w` block of points in the posterior.
This is intended for use with `InputPerturbation` or `AppendFeatures` for
optimizing the expectation over `n_w` points. This should not be used when
there are constraints present, since this does not take into account
the feasibility of the objectives.
Note: This is different than `ScalarizedPosteriorTransform` in that
this operates over the q-batch dimension.
"""
def __init__(self, n_w: int, weights: Optional[Tensor] = None) -> None:
r"""A posterior transform calculating the expectation over the q-batch
dimension.
Args:
n_w: The number of points in the q-batch of the posterior to compute
the expectation over. This corresponds to the size of the
`feature_set` of `AppendFeatures` or the size of the `perturbation_set`
of `InputPerturbation`.
weights: An optional `n_w x m`-dim tensor of weights. Can be used to
compute a weighted expectation. Weights are normalized before use.
"""
super().__init__()
if weights is not None:
if weights.dim() != 2 or weights.shape[0] != n_w:
raise ValueError("`weights` must be a tensor of size `n_w x m`.")
if torch.any(weights < 0):
raise ValueError("`weights` must be non-negative.")
else:
weights = torch.ones(n_w, 1)
# Normalize the weights.
weights = weights / weights.sum(dim=0)
self.register_buffer("weights", weights)
self.n_w = n_w
def evaluate(self, Y: Tensor) -> Tensor:
r"""Evaluate the expectation of a set of outcomes.
Args:
Y: A `batch_shape x (q * n_w) x m`-dim tensor of outcomes.
Returns:
A `batch_shape x q x m`-dim tensor of expectation outcomes.
"""
batch_shape, m = Y.shape[:-2], Y.shape[-1]
weighted_Y = Y.view(*batch_shape, -1, self.n_w, m) * self.weights.to(Y)
return weighted_Y.sum(dim=-2)
def forward(self, posterior: GPyTorchPosterior) -> GPyTorchPosterior:
r"""Compute the posterior of the expectation.
Args:
posterior: An `m`-outcome joint posterior over `q * n_w` points.
Returns:
An `m`-outcome joint posterior over `q` expectations.
"""
org_mvn = posterior.distribution
if getattr(org_mvn, "_interleaved", False):
raise UnsupportedError(
"`ExpectationPosteriorTransform` does not support "
"interleaved posteriors."
)
# Initialize the weight matrix of shape compatible with the mvn.
org_event_shape = org_mvn.event_shape
batch_shape = org_mvn.batch_shape
q = org_event_shape[0] // self.n_w
m = 1 if len(org_event_shape) == 1 else org_event_shape[-1]
tkwargs = {"device": org_mvn.loc.device, "dtype": org_mvn.loc.dtype}
weights = torch.zeros(q * m, q * self.n_w * m, **tkwargs)
# Make sure self.weights has the correct dtype/device and shape.
self.weights = self.weights.to(org_mvn.loc).expand(self.n_w, m)
# Fill in the non-zero entries of the weight matrix.
# We want each row to have non-zero weights for the corresponding
# `n_w` sized diagonal. The `m` outcomes are not interleaved.
for i in range(q * m):
weights[i, self.n_w * i : self.n_w * (i + 1)] = self.weights[:, i // q]
# Trasform the mean.
new_loc = (
(weights @ org_mvn.loc.unsqueeze(-1))
.view(*batch_shape, m, q)
.transpose(-1, -2)
)
# Transform the covariance matrix.
org_cov = (
org_mvn.lazy_covariance_matrix
if org_mvn.islazy
else org_mvn.covariance_matrix
)
new_cov = weights @ (org_cov @ weights.t())
if m == 1:
new_mvn = MultivariateNormal(
new_loc.squeeze(-1), to_linear_operator(new_cov)
)
else:
# Using MTMVN since we pass a single loc and covar for all `m` outputs.
new_mvn = MultitaskMultivariateNormal(
new_loc, to_linear_operator(new_cov), interleaved=False
)
return GPyTorchPosterior(distribution=new_mvn)
class UnstandardizePosteriorTransform(PosteriorTransform):
r"""Posterior transform that unstandardizes the posterior.
TODO: remove this when MultiTask models support outcome transforms.
Example:
>>> unstd_transform = UnstandardizePosteriorTransform(Y_mean, Y_std)
>>> unstd_posterior = unstd_transform(posterior)
"""
def __init__(self, Y_mean: Tensor, Y_std: Tensor) -> None:
r"""Initialize objective.
Args:
Y_mean: `m`-dim tensor of outcome means
Y_std: `m`-dim tensor of outcome standard deviations
"""
if Y_mean.ndim > 1 or Y_std.ndim > 1:
raise BotorchTensorDimensionError(
"Y_mean and Y_std must both be 1-dimensional, but got "
f"{Y_mean.ndim} and {Y_std.ndim}"
)
super().__init__()
self.outcome_transform = Standardize(m=Y_mean.shape[0]).to(Y_mean)
Y_std_unsqueezed = Y_std.unsqueeze(0)
self.outcome_transform.means = Y_mean.unsqueeze(0)
self.outcome_transform.stdvs = Y_std_unsqueezed
self.outcome_transform._stdvs_sq = Y_std_unsqueezed.pow(2)
self.outcome_transform._is_trained = torch.tensor(True)
self.outcome_transform.eval()
def evaluate(self, Y: Tensor) -> Tensor:
return self.outcome_transform.untransform(Y)[0]
def forward(self, posterior: GPyTorchPosterior) -> Tensor:
return self.outcome_transform.untransform_posterior(posterior)
class MCAcquisitionObjective(Module, ABC):
r"""Abstract base class for MC-based objectives.
Args:
_verify_output_shape: If True and `X` is given, check that the q-batch
shape of the objectives agrees with that of X.
_is_mo: A boolean denoting whether the objectives are multi-output.
:meta private:
"""
_verify_output_shape: bool = True
_is_mo: bool = False
@abstractmethod
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Evaluate the objective on the samples.
Args:
samples: A `sample_shape x batch_shape x q x m`-dim Tensors of
samples from a model posterior.
X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if
the objective depends on the inputs explicitly.
Returns:
Tensor: A `sample_shape x batch_shape x q`-dim Tensor of objective
values (assuming maximization).
This method is usually not called directly, but via the objectives.
Example:
>>> # `__call__` method:
>>> samples = sampler(posterior)
>>> outcome = mc_obj(samples)
"""
pass # pragma: no cover
def __call__(
self, samples: Tensor, X: Optional[Tensor] = None, *args, **kwargs
) -> Tensor:
output = super().__call__(samples=samples, X=X, *args, **kwargs)
# q-batch dimension is at -1 for single-output objectives and at
# -2 for multi-output objectives.
q_batch_idx = -2 if self._is_mo else -1
if (
X is not None
and self._verify_output_shape
and output.shape[q_batch_idx] != X.shape[-2]
):
raise RuntimeError(
"The q-batch shape of the objective values does not agree with "
f"the q-batch shape of X. Got {output.shape[q_batch_idx]} and "
f"{X.shape[-2]}. This may happen if you used a one-to-many input "
"transform but forgot to use a corresponding objective."
)
return output
class IdentityMCObjective(MCAcquisitionObjective):
r"""Trivial objective extracting the last dimension.
Example:
>>> identity_objective = IdentityMCObjective()
>>> samples = sampler(posterior)
>>> objective = identity_objective(samples)
"""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
return samples.squeeze(-1)
class LinearMCObjective(MCAcquisitionObjective):
r"""Linear objective constructed from a weight tensor.
For input `samples` and `mc_obj = LinearMCObjective(weights)`, this produces
`mc_obj(samples) = sum_{i} weights[i] * samples[..., i]`
Example:
Example for a model with two outcomes:
>>> weights = torch.tensor([0.75, 0.25])
>>> linear_objective = LinearMCObjective(weights)
>>> samples = sampler(posterior)
>>> objective = linear_objective(samples)
"""
def __init__(self, weights: Tensor) -> None:
r"""
Args:
weights: A one-dimensional tensor with `m` elements representing the
linear weights on the outputs.
"""
super().__init__()
if weights.dim() != 1:
raise ValueError("weights must be a one-dimensional tensor.")
self.register_buffer("weights", weights)
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Evaluate the linear objective on the samples.
Args:
samples: A `sample_shape x batch_shape x q x m`-dim tensors of
samples from a model posterior.
X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if
the objective depends on the inputs explicitly.
Returns:
A `sample_shape x batch_shape x q`-dim tensor of objective values.
"""
if samples.shape[-1] != self.weights.shape[-1]:
raise RuntimeError("Output shape of samples not equal to that of weights")
return torch.einsum("...m, m", [samples, self.weights])
class GenericMCObjective(MCAcquisitionObjective):
r"""Objective generated from a generic callable.
Allows to construct arbitrary MC-objective functions from a generic
callable. In order to be able to use gradient-based acquisition function
optimization it should be possible to backpropagate through the callable.
Example:
>>> generic_objective = GenericMCObjective(
lambda Y, X: torch.sqrt(Y).sum(dim=-1),
)
>>> samples = sampler(posterior)
>>> objective = generic_objective(samples)
"""
def __init__(self, objective: Callable[[Tensor, Optional[Tensor]], Tensor]) -> None:
r"""
Args:
objective: A callable `f(samples, X)` mapping a
`sample_shape x batch-shape x q x m`-dim Tensor `samples` and
an optional `batch-shape x q x d`-dim Tensor `X` to a
`sample_shape x batch-shape x q`-dim Tensor of objective values.
"""
super().__init__()
if len(inspect.signature(objective).parameters) == 1:
warnings.warn(
"The `objective` callable of `GenericMCObjective` is expected to "
"take two arguments. Passing a callable that expects a single "
"argument will result in an error in future versions.",
DeprecationWarning,
)
def obj(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
return objective(samples)
self.objective = obj
else:
self.objective = objective
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Evaluate the objective on the samples.
Args:
samples: A `sample_shape x batch_shape x q x m`-dim Tensors of
samples from a model posterior.
X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if
the objective depends on the inputs explicitly.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of objective values.
"""
return self.objective(samples, X=X)
class ConstrainedMCObjective(GenericMCObjective):
r"""Feasibility-weighted objective.
An Objective allowing to maximize some scalable objective on the model
outputs subject to a number of constraints. Constraint feasibilty is
approximated by a sigmoid function.
mc_acq(X) = (
(objective(X) + infeasible_cost) * \prod_i (1 - sigmoid(constraint_i(X)))
) - infeasible_cost
See `botorch.utils.objective.apply_constraints` for details on the constraint
handling.
Example:
>>> bound = 0.0
>>> objective = lambda Y: Y[..., 0]
>>> # apply non-negativity constraint on f(x)[1]
>>> constraint = lambda Y: bound - Y[..., 1]
>>> constrained_objective = ConstrainedMCObjective(objective, [constraint])
>>> samples = sampler(posterior)
>>> objective = constrained_objective(samples)
TODO: Deprecate this as default way to handle constraints with MC acquisition
functions once we have data on how well SampleReducingMCAcquisitionFunction works.
"""
def __init__(
self,
objective: Callable[[Tensor, Optional[Tensor]], Tensor],
constraints: List[Callable[[Tensor], Tensor]],
infeasible_cost: Union[Tensor, float] = 0.0,
eta: Union[Tensor, float] = 1e-3,
) -> None:
r"""
Args:
objective: A callable `f(samples, X)` mapping a
`sample_shape x batch-shape x q x m`-dim Tensor `samples` and
an optional `batch-shape x q x d`-dim Tensor `X` to a
`sample_shape x batch-shape x q`-dim Tensor of objective values.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility.
infeasible_cost: The cost of a design if all associated samples are
infeasible.
eta: The temperature parameter of the sigmoid function approximating
the constraint. Can be either a float or a 1-dim tensor. In case
of a float the same eta is used for every constraint in
constraints. In case of a tensor the length of the tensor must
match the number of provided constraints. The i-th constraint is
then estimated with the i-th eta value.
"""
super().__init__(objective=objective)
self.constraints = constraints
if type(eta) is not Tensor:
eta = torch.full((len(constraints),), eta)
self.register_buffer("eta", eta)
self.register_buffer("infeasible_cost", torch.as_tensor(infeasible_cost))
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Evaluate the feasibility-weighted objective on the samples.
Args:
samples: A `sample_shape x batch_shape x q x m`-dim Tensors of
samples from a model posterior.
X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if
the objective depends on the inputs explicitly.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of objective values
weighted by feasibility (assuming maximization).
"""
obj = super().forward(samples=samples)
return apply_constraints(
obj=obj,
constraints=self.constraints,
samples=samples,
infeasible_cost=self.infeasible_cost,
eta=self.eta,
)
LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN = (
"pref_model has double-precision data, but single-precision data "
"was passed to the LearnedObjective. Upcasting to double."
)
class LearnedObjective(MCAcquisitionObjective):
r"""Learned preference objective constructed from a preference model.
For input `samples`, it samples each individual sample again from the latent
preference posterior distribution using `pref_model` and return the posterior mean.
Example:
>>> train_X = torch.rand(2, 2)
>>> train_comps = torch.LongTensor([[0, 1]])
>>> pref_model = PairwiseGP(train_X, train_comps)
>>> learned_pref_obj = LearnedObjective(pref_model)
>>> samples = sampler(posterior)
>>> objective = learned_pref_obj(samples)
"""
def __init__(
self,
pref_model: Model,
sampler: Optional[MCSampler] = None,
):
r"""
Args:
pref_model: A BoTorch model, which models the latent preference/utility
function. Given an input tensor of size
`sample_size x batch_shape x N x d`, its `posterior` method should
return a `Posterior` object with single outcome representing the
utility values of the input.
sampler: Sampler for the preference model to account for uncertainty in
preferece when calculating the objective; it's not the one used
in MC acquisition functions. If None,
it uses `IIDNormalSampler(sample_shape=torch.Size([1]))`.
"""
super().__init__()
self.pref_model = pref_model
if isinstance(pref_model, DeterministicModel):
assert sampler is None
self.sampler = None
else:
if sampler is None:
self.sampler = IIDNormalSampler(sample_shape=torch.Size([1]))
else:
self.sampler = sampler
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Sample each element of samples.
Args:
samples: A `sample_size x batch_shape x N x d`-dim Tensors of
samples from a model posterior.
Returns:
A `(sample_size * num_samples) x batch_shape x N`-dim Tensor of
objective values sampled from utility posterior using `pref_model`.
"""
if samples.dtype == torch.float32 and any(
d == torch.float64 for d in self.pref_model.dtypes_of_buffers
):
warnings.warn(
LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN,
InputDataWarning,
stacklevel=2,
)
samples = samples.to(torch.float64)
post = self.pref_model.posterior(samples)
if isinstance(self.pref_model, DeterministicModel):
# return preference posterior mean
return post.mean.squeeze(-1)
else:
# return preference posterior sample mean
samples = self.sampler(post).squeeze(-1)
return samples.reshape(-1, *samples.shape[2:]) # batch_shape x N
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Abstract class for acquisition functions leveraging a cached Cholesky
decomposition of the posterior covaiance over f(X_baseline).
"""
from __future__ import annotations
import warnings
from abc import ABC
import torch
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.higher_order_gp import HigherOrderGP
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.multitask import KroneckerMultiTaskGP, MultiTaskGP
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.posteriors.posterior import Posterior
from botorch.utils.low_rank import extract_batch_covar, sample_cached_cholesky
from gpytorch.distributions.multitask_multivariate_normal import (
MultitaskMultivariateNormal,
)
from linear_operator.utils.errors import NanError, NotPSDError
from torch import Tensor
def supports_cache_root(model: Model) -> bool:
r"""Checks if a model supports the cache_root functionality.
The two criteria are that the model is not multi-task and the model
produces a GPyTorchPosterior.
"""
if isinstance(model, ModelListGP):
return all(supports_cache_root(m) for m in model.models)
# Multi task models and non-GPyTorch models are not supported.
if isinstance(
model, (MultiTaskGP, KroneckerMultiTaskGP, HigherOrderGP)
) or not isinstance(model, GPyTorchModel):
return False
# Models that return a TransformedPosterior are not supported.
if hasattr(model, "outcome_transform") and (not model.outcome_transform._is_linear):
return False
return True
def _get_cache_root_not_supported_message(model_cls: type) -> str:
msg = (
"`cache_root` is only supported for GPyTorchModels that "
"are not MultiTask models and don't produce a "
f"TransformedPosterior. Got a model of type {model_cls}. Setting "
"`cache_root = False`."
)
return msg
class CachedCholeskyMCAcquisitionFunction(ABC):
r"""Abstract class for acquisition functions using a cached Cholesky.
Specifically, this is for acquisition functions that require sampling from
the posterior P(f(X_baseline, X) | D). The Cholesky of the posterior
covariance over f(X_baseline) is cached.
:meta private:
"""
def _setup(
self,
model: Model,
cache_root: bool = False,
) -> None:
r"""Set class attributes and perform compatibility checks.
Args:
model: A model.
cache_root: A boolean indicating whether to cache the Cholesky.
This might be overridden in the model is not compatible.
"""
if cache_root and not supports_cache_root(model):
warnings.warn(
_get_cache_root_not_supported_message(type(model)),
RuntimeWarning,
)
cache_root = False
self._cache_root = cache_root
def _compute_root_decomposition(
self,
posterior: Posterior,
) -> Tensor:
r"""Cache Cholesky of the posterior covariance over f(X_baseline).
Because `LinearOperator.root_decomposition` is decorated with LinearOperator's
@cached decorator, this function is doing a lot implicitly:
1) Check if a root decomposition has already been cached to `lazy_covar`.
Note that it will not have been if `posterior.mvn` is a
`MultitaskMultivariateNormal`, since we construct `lazy_covar` in that
case.
2) If the root decomposition has not been found in the cache, compute it.
3) Write it to the cache of `lazy_covar`. Note that this will become inacessible
if `posterior.mvn` is a `MultitaskMultivariateNormal`, since in that case
`lazy_covar`'s scope is only this function.
Args:
posterior: The posterior over f(X_baseline).
"""
if isinstance(posterior.distribution, MultitaskMultivariateNormal):
lazy_covar = extract_batch_covar(posterior.distribution)
else:
lazy_covar = posterior.distribution.lazy_covariance_matrix
lazy_covar_root = lazy_covar.root_decomposition()
return lazy_covar_root.root.to_dense()
def _get_f_X_samples(self, posterior: GPyTorchPosterior, q_in: int) -> Tensor:
r"""Get posterior samples at the `q_in` new points from the joint posterior.
Args:
posterior: The joint posterior is over (X_baseline, X).
q_in: The number of new points in the posterior. See `_set_sampler` for
more information.
Returns:
A `sample_shape x batch_shape x q x m`-dim tensor of posterior
samples at the new points.
"""
# Technically we should make sure that we add a consistent nugget to the
# cached covariance (and box decompositions) and the new block.
# But recomputing box decompositions every time the jitter changes would
# be quite slow.
if self._cache_root and hasattr(self, "_baseline_L"):
try:
return sample_cached_cholesky(
posterior=posterior,
baseline_L=self._baseline_L,
q=q_in,
base_samples=self.sampler.base_samples,
sample_shape=self.sampler.sample_shape,
)
except (NanError, NotPSDError):
warnings.warn(
"Low-rank cholesky updates failed due NaNs or due to an "
"ill-conditioned covariance matrix. "
"Falling back to standard sampling.",
BotorchWarning,
)
# TODO: improve efficiency for multi-task models
samples = self.get_posterior_samples(posterior)
if isinstance(self.model, HigherOrderGP):
# Select the correct q-batch dimension for HOGP.
q_dim = -self.model._num_dimensions
q_idcs = (
torch.arange(-q_in, 0, device=samples.device) + samples.shape[q_dim]
)
return samples.index_select(q_dim, q_idcs)
else:
return samples[..., -q_in:, :]
def _set_sampler(
self,
q_in: int,
posterior: Posterior,
) -> None:
r"""Update the sampler to use the original base samples for X_baseline.
Args:
q_in: The effective input batch size. This is typically equal to the
q-batch size of `X`. However, if using a one-to-many input transform,
e.g., `InputPerturbation` with `n_w` perturbations, the posterior will
have `n_w` points on the q-batch for each point on the q-batch of `X`.
In which case, `q_in = q * n_w` is used.
posterior: The posterior.
"""
if self.q_in != q_in and self.base_sampler is not None:
self.sampler._update_base_samples(
posterior=posterior, base_sampler=self.base_sampler
)
self.q_in = q_in
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A wrapper around AcquisitionFunctions to add proximal weighting of the
acquisition function.
"""
from __future__ import annotations
from typing import Optional
import torch
from botorch.acquisition import AcquisitionFunction
from botorch.exceptions.errors import UnsupportedError
from botorch.models import ModelListGP
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
from botorch.models.model import Model
from botorch.models.transforms.input import InputTransform
from botorch.utils import t_batch_mode_transform
from torch import Tensor
from torch.nn import Module
class ProximalAcquisitionFunction(AcquisitionFunction):
"""A wrapper around AcquisitionFunctions to add proximal weighting of the
acquisition function. The acquisition function is
weighted via a squared exponential centered at the last training point,
with varying lengthscales corresponding to `proximal_weights`. Can only be used
with acquisition functions based on single batch models. Acquisition functions
must be positive or `beta` must be specified to apply a SoftPlus transform before
proximal weighting.
Small values of `proximal_weights` corresponds to strong biasing towards recently
observed points, which smoothes optimization with a small potential decrese in
convergence rate.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> EI = ExpectedImprovement(model, best_f=0.0)
>>> proximal_weights = torch.ones(d)
>>> EI_proximal = ProximalAcquisitionFunction(EI, proximal_weights)
>>> eip = EI_proximal(test_X)
"""
def __init__(
self,
acq_function: AcquisitionFunction,
proximal_weights: Tensor,
transformed_weighting: Optional[bool] = True,
beta: Optional[float] = None,
) -> None:
r"""Derived Acquisition Function weighted by proximity to recently
observed point.
Args:
acq_function: The base acquisition function, operating on input tensors
of feature dimension `d`.
proximal_weights: A `d` dim tensor used to bias locality
along each axis.
transformed_weighting: If True, the proximal weights are applied in
the transformed input space given by
`acq_function.model.input_transform` (if available), otherwise
proximal weights are applied in real input space.
beta: If not None, apply a softplus transform to the base acquisition
function, allows negative base acquisition function values.
"""
Module.__init__(self)
self.acq_func = acq_function
model = self.acq_func.model
if hasattr(acq_function, "X_pending"):
if acq_function.X_pending is not None:
raise UnsupportedError(
"Proximal acquisition function requires `X_pending` to be None."
)
self.X_pending = acq_function.X_pending
self.register_buffer("proximal_weights", proximal_weights)
self.register_buffer(
"transformed_weighting", torch.tensor(transformed_weighting)
)
self.register_buffer("beta", None if beta is None else torch.tensor(beta))
_validate_model(model, proximal_weights)
@t_batch_mode_transform(expected_q=1, assert_output_shape=False)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate base acquisition function with proximal weighting.
Args:
X: Input tensor of feature dimension `d` .
Returns:
Base acquisition function evaluated on tensor `X` multiplied by proximal
weighting.
"""
model = self.acq_func.model
train_inputs = model.train_inputs[0]
# if the model is ModelListGP then get the first model
if isinstance(model, ModelListGP):
train_inputs = train_inputs[0]
model = model.models[0]
# if the model has more than one output get the first copy of training inputs
if isinstance(model, BatchedMultiOutputGPyTorchModel) and model.num_outputs > 1:
train_inputs = train_inputs[0]
input_transform = _get_input_transform(model)
last_X = train_inputs[-1].reshape(1, 1, -1)
# if transformed_weighting, transform X to calculate diff
# (proximal weighting in transformed space)
# otherwise,un-transform the last observed point to real space
# (proximal weighting in real space)
if input_transform is not None:
if self.transformed_weighting:
# transformed space weighting
diff = input_transform.transform(X) - last_X
else:
# real space weighting
diff = X - input_transform.untransform(last_X)
else:
# no transformation
diff = X - last_X
M = torch.linalg.norm(diff / self.proximal_weights, dim=-1) ** 2
proximal_acq_weight = torch.exp(-0.5 * M)
base_acqf = self.acq_func(X)
if self.beta is None:
if torch.any(base_acqf < 0):
raise RuntimeError(
"Cannot use proximal biasing for negative "
"acquisition function values, set a value for beta to "
"fix this with a softplus transform"
)
else:
base_acqf = torch.nn.functional.softplus(base_acqf, beta=self.beta)
return base_acqf * proximal_acq_weight.flatten()
def _validate_model(model: Model, proximal_weights: Tensor) -> None:
r"""Validate model
Perform vaidation checks on model used in base acquisition function to make sure
it is compatible with proximal weighting.
Args:
model: Model associated with base acquisition function to be validated.
proximal_weights: A `d` dim tensor used to bias locality
along each axis.
"""
# check model for train_inputs and single batch
if not hasattr(model, "train_inputs"):
raise UnsupportedError("Acquisition function model must have `train_inputs`.")
# get train inputs for each type of possible model
if isinstance(model, ModelListGP):
# ModelListGP models
# check to make sure that the training inputs and input transformers for each
# model match and are reversible
train_inputs = model.train_inputs[0][0]
input_transform = _get_input_transform(model.models[0])
for i in range(len(model.train_inputs)):
if not torch.equal(train_inputs, model.train_inputs[i][0]):
raise UnsupportedError(
"Proximal acquisition function does not support unequal "
"training inputs"
)
if not input_transform == _get_input_transform(model.models[i]):
raise UnsupportedError(
"Proximal acquisition function does not support non-identical "
"input transforms"
)
else:
# any non-ModelListGP model
train_inputs = model.train_inputs[0]
# check to make sure that the model is single t-batch (q-batches are allowed)
if model.batch_shape != torch.Size([]) and train_inputs.shape[1] != 1:
raise UnsupportedError(
"Proximal acquisition function requires a single batch model"
)
# check to make sure that weights match the training data shape
if (
len(proximal_weights.shape) != 1
or proximal_weights.shape[0] != train_inputs.shape[-1]
):
raise ValueError(
"`proximal_weights` must be a one dimensional tensor with "
"same feature dimension as model."
)
def _get_input_transform(model: Model) -> Optional[InputTransform]:
"""get input transform if defined"""
try:
return model.input_transform
except AttributeError:
return None
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Active learning acquisition functions.
.. [Seo2014activedata]
S. Seo, M. Wallat, T. Graepel, and K. Obermayer. Gaussian process regression:
Active data selection and test point rejection. IJCNN 2000.
.. [Chen2014seqexpdesign]
X. Chen and Q. Zhou. Sequential experimental designs for stochastic kriging.
Winter Simulation Conference 2014.
.. [Binois2017repexp]
M. Binois, J. Huang, R. B. Gramacy, and M. Ludkovski. Replication or
exploration? Sequential design for stochastic simulation experiments.
ArXiv 2017.
"""
from __future__ import annotations
from typing import Optional
import torch
from botorch import settings
from botorch.acquisition.analytic import AnalyticAcquisitionFunction
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
class qNegIntegratedPosteriorVariance(AnalyticAcquisitionFunction):
r"""Batch Integrated Negative Posterior Variance for Active Learning.
This acquisition function quantifies the (negative) integrated posterior variance
(excluding observation noise, computed using MC integration) of the model.
In that, it is a proxy for global model uncertainty, and thus purely focused on
"exploration", rather the "exploitation" of many of the classic Bayesian
Optimization acquisition functions.
See [Seo2014activedata]_, [Chen2014seqexpdesign]_, and [Binois2017repexp]_.
"""
def __init__(
self,
model: Model,
mc_points: Tensor,
sampler: Optional[MCSampler] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Integrated Negative Posterior Variance.
Args:
model: A fitted model.
mc_points: A `batch_shape x N x d` tensor of points to use for
MC-integrating the posterior variance. Usually, these are qMC
samples on the whole design space, but biased sampling directly
allows weighted integration of the posterior variance.
sampler: The sampler used for drawing fantasy samples. In the basic setting
of a standard GP (default) this is a dummy, since the variance of the
model after conditioning does not actually depend on the sampled values.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
X_pending: A `n' x d`-dim Tensor of `n'` design points that have
points that have been submitted for function evaluation but
have not yet been evaluated.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
if sampler is None:
# If no sampler is provided, we use the following dummy sampler for the
# fantasize() method in forward. IMPORTANT: This assumes that the posterior
# variance does not depend on the samples y (only on x), which is true for
# standard GP models, but not in general (e.g. for other likelihoods or
# heteroskedastic GPs using a separate noise model fit on data).
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([1]))
self.sampler = sampler
self.X_pending = X_pending
self.register_buffer("mc_points", mc_points)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
# Construct the fantasy model (we actually do not use the full model,
# this is just a convenient way of computing fast posterior covariances
fantasy_model = self.model.fantasize(
X=X, sampler=self.sampler, observation_noise=True
)
bdims = tuple(1 for _ in X.shape[:-2])
if self.model.num_outputs > 1:
# We use q=1 here b/c ScalarizedObjective currently does not fully exploit
# LinearOperator operations and thus may be slow / overly memory-hungry.
# TODO (T52818288): Properly use LinearOperators in scalarize_posterior
mc_points = self.mc_points.view(-1, *bdims, 1, X.size(-1))
else:
# While we only need marginal variances, we can evaluate for q>1
# b/c for GPyTorch models lazy evaluation can make this quite a bit
# faster than evaluting in t-batch mode with q-batch size of 1
mc_points = self.mc_points.view(*bdims, -1, X.size(-1))
# evaluate the posterior at the grid points
with settings.propagate_grads(True):
posterior = fantasy_model.posterior(
mc_points, posterior_transform=self.posterior_transform
)
neg_variance = posterior.variance.mul(-1.0)
if self.posterior_transform is None:
# if single-output, shape is 1 x batch_shape x num_grid_points x 1
return neg_variance.mean(dim=-2).squeeze(-1).squeeze(0)
else:
# if multi-output + obj, shape is num_grid_points x batch_shape x 1 x 1
return neg_variance.mean(dim=0).squeeze(-1).squeeze(-1)
class PairwiseMCPosteriorVariance(MCAcquisitionFunction):
r"""Variance of difference for Active Learning
Given a model and an objective, calculate the posterior sample variance
of the objective on the difference of pairs of points. See more implementation
details in `forward`. This acquisition function is typically used with a
pairwise model (e.g., PairwiseGP) and a likelihood/link function
on the pair difference (e.g., logistic or probit) for pure exploration
"""
def __init__(
self,
model: Model,
objective: MCAcquisitionObjective,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Pairwise Monte Carlo Posterior Variance
Args:
model: A fitted model.
objective: An MCAcquisitionObjective representing the link function
(e.g., logistic or probit.) applied on the difference of (usually 1-d)
two samples. Can be implemented via GenericMCObjective.
sampler: The sampler used for drawing MC samples.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=None
)
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate PairwiseMCPosteriorVariance on the candidate set `X`.
Args:
X: A `batch_size x q x d`-dim Tensor. q should be a multiple of 2.
Returns:
Tensor of shape `batch_size x q` representing the posterior variance
of link function at X that active learning hopes to maximize
"""
if X.shape[-2] == 0 or X.shape[-2] % 2 != 0:
raise RuntimeError(
"q must be a multiple of 2 for PairwiseMCPosteriorVariance"
)
# The output is of shape batch_shape x 2 x d
# For PairwiseGP, d = 1
post = self.model.posterior(X)
samples = self.get_posterior_samples(post) # num_samples x batch_shape x 2 x d
# The output is of shape num_samples x batch_shape x q/2 x d
# assuming the comparison is made between the 2 * i and 2 * i + 1 elements
samples_diff = samples[..., ::2, :] - samples[..., 1::2, :]
mc_var = self.objective(samples_diff).var(dim=0)
mean_mc_var = mc_var.mean(dim=-1)
return mean_mc_var
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Risk Measures implemented as Monte-Carlo objectives, based on Bayesian
optimization of risk measures as introduced in [Cakmak2020risk]_. For a
broader discussion of Monte-Carlo methods for VaR and CVaR risk measures,
see also [Hong2014review]_.
.. [Cakmak2020risk]
S. Cakmak, R. Astudillo, P. Frazier, and E. Zhou. Bayesian Optimization of
Risk Measures. Advances in Neural Information Processing Systems 33, 2020.
.. [Hong2014review]
L. J. Hong, Z. Hu, and G. Liu. Monte carlo methods for value-at-risk and
conditional value-at-risk: a review. ACM Transactions on Modeling and
Computer Simulation, 2014.
"""
from abc import ABC, abstractmethod
from math import ceil
from typing import Callable, Optional
import torch
from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective
from botorch.acquisition.objective import IdentityMCObjective, MCAcquisitionObjective
from torch import Tensor
class RiskMeasureMCObjective(MCAcquisitionObjective, ABC):
r"""Objective transforming the posterior samples to samples of a risk measure.
The risk measure is calculated over joint q-batch samples from the posterior.
If the q-batch includes samples corresponding to multiple inputs, it is assumed
that first `n_w` samples correspond to first input, second `n_w` samples
correspond to second input etc.
The risk measures are commonly defined for minimization by considering the
upper tail of the distribution, i.e., treating larger values as being undesirable.
BoTorch by default assumes a maximization objective, so the default behavior here
is to calculate the risk measures w.r.t. the lower tail of the distribution.
This can be changed by passing a preprocessing function with
`weights=torch.tensor([-1.0])`.
:meta private:
"""
def __init__(
self,
n_w: int,
preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None,
) -> None:
r"""Transform the posterior samples to samples of a risk measure.
Args:
n_w: The size of the `w_set` to calculate the risk measure over.
preprocessing_function: A preprocessing function to apply to the samples
before computing the risk measure. This can be used to scalarize
multi-output samples before calculating the risk measure.
For constrained optimization, this should also apply
feasibility-weighting to samples. Given a `batch x m`-dim
tensor of samples, this should return a `batch`-dim tensor.
"""
super().__init__()
self.n_w = n_w
if preprocessing_function is None:
if self._is_mo:
preprocessing_function = IdentityMCMultiOutputObjective()
else:
preprocessing_function = IdentityMCObjective()
self.preprocessing_function = preprocessing_function
def _prepare_samples(self, samples: Tensor) -> Tensor:
r"""Prepare samples for risk measure calculations by scalarizing and
separating out the q-batch dimension.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
Returns:
A `sample_shape x batch_shape x q x n_w`-dim tensor of prepared samples.
"""
if samples.shape[-1] > 1 and isinstance(
self.preprocessing_function, IdentityMCObjective
):
raise RuntimeError(
"Multi-output samples should be scalarized using a "
"`preprocessing_function`."
)
samples = self.preprocessing_function(samples)
return samples.view(*samples.shape[:-1], -1, self.n_w)
@abstractmethod
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the risk measure corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q`-dim tensor of risk measure samples.
"""
pass # pragma: no cover
class CVaR(RiskMeasureMCObjective):
r"""The Conditional Value-at-Risk risk measure.
The Conditional Value-at-Risk measures the expectation of the worst outcomes
(small rewards or large losses) with a total probability of `1 - alpha`. It
is commonly defined as the conditional expectation of the reward function,
with the condition that the reward is smaller than the corresponding
Value-at-Risk (also defined below).
Note: Due to the use of a discrete `w_set` of samples, the VaR and CVaR
calculated here are (possibly biased) Monte-Carlo approximations of
the true risk measures.
"""
def __init__(
self,
alpha: float,
n_w: int,
preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None,
) -> None:
r"""Transform the posterior samples to samples of a risk measure.
Args:
alpha: The risk level, float in `(0.0, 1.0]`.
n_w: The size of the `w_set` to calculate the risk measure over.
preprocessing_function: A preprocessing function to apply to the samples
before computing the risk measure. This can be used to scalarize
multi-output samples before calculating the risk measure.
For constrained optimization, this should also apply
feasibility-weighting to samples. Given a `batch x m`-dim
tensor of samples, this should return a `batch`-dim tensor.
"""
super().__init__(n_w=n_w, preprocessing_function=preprocessing_function)
if not 0 < alpha <= 1:
raise ValueError("alpha must be in (0.0, 1.0]")
self.alpha = alpha
self.alpha_idx = ceil(n_w * alpha) - 1
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the CVaR corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q`-dim tensor of CVaR samples.
"""
prepared_samples = self._prepare_samples(samples)
return torch.topk(
prepared_samples,
k=prepared_samples.shape[-1] - self.alpha_idx,
largest=False,
dim=-1,
).values.mean(dim=-1)
class VaR(CVaR):
r"""The Value-at-Risk risk measure.
Value-at-Risk measures the smallest possible reward (or largest possible loss)
after excluding the worst outcomes with a total probability of `1 - alpha`. It
is commonly used in financial risk management, and it corresponds to the
`1 - alpha` quantile of a given random variable.
"""
def __init__(
self,
alpha: float,
n_w: int,
preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None,
) -> None:
r"""Transform the posterior samples to samples of a risk measure.
Args:
alpha: The risk level, float in `(0.0, 1.0]`.
n_w: The size of the `w_set` to calculate the risk measure over.
preprocessing_function: A preprocessing function to apply to the samples
before computing the risk measure. This can be used to scalarize
multi-output samples before calculating the risk measure.
For constrained optimization, this should also apply
feasibility-weighting to samples. Given a `batch x m`-dim
tensor of samples, this should return a `batch`-dim tensor.
"""
super().__init__(
n_w=n_w,
alpha=alpha,
preprocessing_function=preprocessing_function,
)
self._q = 1 - self.alpha_idx / n_w
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the VaR corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q`-dim tensor of VaR samples.
"""
prepared_samples = self._prepare_samples(samples)
# this is equivalent to sorting along dim=-1 in descending order
# and taking the values at index self.alpha_idx. E.g.
# >>> sorted_res = prepared_samples.sort(dim=-1, descending=True)
# >>> sorted_res.values[..., self.alpha_idx]
# Using quantile is far more memory efficient since `torch.sort`
# produces values and indices tensors with shape
# `sample_shape x batch_shape x (q * n_w) x m`
return torch.quantile(
input=prepared_samples,
q=self._q,
dim=-1,
keepdim=False,
interpolation="lower",
)
class WorstCase(RiskMeasureMCObjective):
r"""The worst-case risk measure."""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the worst-case measure corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q`-dim tensor of worst-case samples.
"""
prepared_samples = self._prepare_samples(samples)
return prepared_samples.min(dim=-1).values
class Expectation(RiskMeasureMCObjective):
r"""The expectation risk measure.
For unconstrained problems, we recommend using the `ExpectationPosteriorTransform`
instead. `ExpectationPosteriorTransform` directly transforms the posterior
distribution over `q * n_w` to a posterior of `q` expectations, significantly
reducing the cost of posterior sampling as a result.
"""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the expectation corresponding to the given samples.
This calculates the expectation / mean / average of each `n_w` samples
across the q-batch dimension. If `self.weights` is given, the samples
are scalarized across the output dimension before taking the expectation.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q`-dim tensor of expectation samples.
"""
prepared_samples = self._prepare_samples(samples)
return prepared_samples.mean(dim=-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.acquisition.acquisition import (
AcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.acquisition.active_learning import (
PairwiseMCPosteriorVariance,
qNegIntegratedPosteriorVariance,
)
from botorch.acquisition.analytic import (
AnalyticAcquisitionFunction,
ConstrainedExpectedImprovement,
ExpectedImprovement,
LogExpectedImprovement,
LogNoisyExpectedImprovement,
NoisyExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
qAnalyticProbabilityOfImprovement,
UpperConfidenceBound,
)
from botorch.acquisition.cost_aware import (
GenericCostAwareUtility,
InverseCostWeightedUtility,
)
from botorch.acquisition.decoupled import DecoupledAcquisitionFunction
from botorch.acquisition.factory import get_acquisition_function
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.input_constructors import get_acqf_input_constructor
from botorch.acquisition.knowledge_gradient import (
qKnowledgeGradient,
qMultiFidelityKnowledgeGradient,
)
from botorch.acquisition.logei import (
LogImprovementMCAcquisitionFunction,
qLogExpectedImprovement,
qLogNoisyExpectedImprovement,
)
from botorch.acquisition.max_value_entropy_search import (
MaxValueBase,
qLowerBoundMaxValueEntropy,
qMaxValueEntropy,
qMultiFidelityLowerBoundMaxValueEntropy,
qMultiFidelityMaxValueEntropy,
)
from botorch.acquisition.monte_carlo import (
MCAcquisitionFunction,
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
SampleReducingMCAcquisitionFunction,
)
from botorch.acquisition.multi_step_lookahead import qMultiStepLookahead
from botorch.acquisition.objective import (
ConstrainedMCObjective,
GenericMCObjective,
IdentityMCObjective,
LearnedObjective,
LinearMCObjective,
MCAcquisitionObjective,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.preference import (
AnalyticExpectedUtilityOfBestOption,
PairwiseBayesianActiveLearningByDisagreement,
)
from botorch.acquisition.prior_guided import PriorGuidedAcquisitionFunction
from botorch.acquisition.proximal import ProximalAcquisitionFunction
__all__ = [
"AcquisitionFunction",
"AnalyticAcquisitionFunction",
"AnalyticExpectedUtilityOfBestOption",
"ConstrainedExpectedImprovement",
"DecoupledAcquisitionFunction",
"ExpectedImprovement",
"LogExpectedImprovement",
"LogNoisyExpectedImprovement",
"FixedFeatureAcquisitionFunction",
"GenericCostAwareUtility",
"InverseCostWeightedUtility",
"NoisyExpectedImprovement",
"OneShotAcquisitionFunction",
"PairwiseBayesianActiveLearningByDisagreement",
"PairwiseMCPosteriorVariance",
"PosteriorMean",
"PriorGuidedAcquisitionFunction",
"ProbabilityOfImprovement",
"ProximalAcquisitionFunction",
"UpperConfidenceBound",
"qAnalyticProbabilityOfImprovement",
"qExpectedImprovement",
"LogImprovementMCAcquisitionFunction",
"qLogExpectedImprovement",
"qLogNoisyExpectedImprovement",
"qKnowledgeGradient",
"MaxValueBase",
"qMultiFidelityKnowledgeGradient",
"qMaxValueEntropy",
"qMultiFidelityLowerBoundMaxValueEntropy",
"qLowerBoundMaxValueEntropy",
"qMultiFidelityMaxValueEntropy",
"qMultiStepLookahead",
"qNoisyExpectedImprovement",
"qNegIntegratedPosteriorVariance",
"qProbabilityOfImprovement",
"qSimpleRegret",
"qUpperConfidenceBound",
"ConstrainedMCObjective",
"GenericMCObjective",
"IdentityMCObjective",
"LearnedObjective",
"LinearMCObjective",
"MCAcquisitionFunction",
"SampleReducingMCAcquisitionFunction",
"MCAcquisitionObjective",
"ScalarizedPosteriorTransform",
"get_acquisition_function",
"get_acqf_input_constructor",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Batch Knowledge Gradient (KG) via one-shot optimization as introduced in
[Balandat2020botorch]_. For broader discussion of KG see also [Frazier2008knowledge]_
and [Wu2016parallelkg]_.
.. [Balandat2020botorch]
M. Balandat, B. Karrer, D. R. Jiang, S. Daulton, B. Letham, A. G. Wilson, and
E. Bakshy. BoTorch: A Framework for Efficient Monte-Carlo Bayesian Optimization.
Advances in Neural Information Processing Systems 33, 2020.
.. [Frazier2008knowledge]
P. Frazier, W. Powell, and S. Dayanik. A Knowledge-Gradient policy for
sequential information collection. SIAM Journal on Control and Optimization,
2008.
.. [Wu2016parallelkg]
J. Wu and P. Frazier. The parallel knowledge gradient method for batch
bayesian optimization. NIPS 2016.
"""
from __future__ import annotations
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Tuple, Type
import torch
from botorch import settings
from botorch.acquisition.acquisition import (
AcquisitionFunction,
MCSamplerMixin,
OneShotAcquisitionFunction,
)
from botorch.acquisition.analytic import PosteriorMean
from botorch.acquisition.cost_aware import CostAwareUtility
from botorch.acquisition.monte_carlo import MCAcquisitionFunction, qSimpleRegret
from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform
from botorch.exceptions.errors import UnsupportedError
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from torch import Tensor
class qKnowledgeGradient(MCAcquisitionFunction, OneShotAcquisitionFunction):
r"""Batch Knowledge Gradient using one-shot optimization.
This computes the batch Knowledge Gradient using fantasies for the outer
expectation and either the model posterior mean or MC-sampling for the inner
expectation.
In addition to the design variables, the input `X` also includes variables
for the optimal designs for each of the fantasy models. For a fixed number
of fantasies, all parts of `X` can be optimized in a "one-shot" fashion.
"""
def __init__(
self,
model: Model,
num_fantasies: Optional[int] = 64,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
inner_sampler: Optional[MCSampler] = None,
X_pending: Optional[Tensor] = None,
current_value: Optional[Tensor] = None,
) -> None:
r"""q-Knowledge Gradient (one-shot optimization).
Args:
model: A fitted model. Must support fantasizing.
num_fantasies: The number of fantasy points to use. More fantasy
points result in a better approximation, at the expense of
memory and wall time. Unused if `sampler` is specified.
sampler: The sampler used to sample fantasy observations. Optional
if `num_fantasies` is specified.
objective: The objective under which the samples are evaluated. If
`None`, then the analytic posterior mean is used. Otherwise, the
objective is MC-evaluated (using inner_sampler).
posterior_transform: An optional PosteriorTransform. If given, this
transforms the posterior before evaluation. If `objective is None`,
then the analytic posterior mean of the transformed posterior is
used. If `objective` is given, the `inner_sampler` is used to draw
samples from the transformed posterior, which are then evaluated under
the `objective`.
inner_sampler: The sampler used for inner sampling. Ignored if the
objective is `None`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
current_value: The current value, i.e. the expected best objective
given the observed points `D`. If omitted, forward will not
return the actual KG value, but the expected best objective
given the data set `D u X`.
"""
if sampler is None:
if num_fantasies is None:
raise ValueError(
"Must specify `num_fantasies` if no `sampler` is provided."
)
# base samples should be fixed for joint optimization over X, X_fantasies
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([num_fantasies]))
elif num_fantasies is not None:
if sampler.sample_shape != torch.Size([num_fantasies]):
raise ValueError(
f"The sampler shape must match num_fantasies={num_fantasies}."
)
else:
num_fantasies = sampler.sample_shape[0]
super(MCAcquisitionFunction, self).__init__(model=model)
MCSamplerMixin.__init__(self, sampler=sampler)
# if not explicitly specified, we use the posterior mean for linear objs
if isinstance(objective, MCAcquisitionObjective) and inner_sampler is None:
inner_sampler = SobolQMCNormalSampler(sample_shape=torch.Size([128]))
elif objective is not None and not isinstance(
objective, MCAcquisitionObjective
):
raise UnsupportedError(
"Objectives that are not an `MCAcquisitionObjective` are not supported."
)
if objective is None and model.num_outputs != 1:
if posterior_transform is None:
raise UnsupportedError(
"Must specify an objective or a posterior transform when using "
"a multi-output model."
)
elif not posterior_transform.scalarize:
raise UnsupportedError(
"If using a multi-output model without an objective, "
"posterior_transform must scalarize the output."
)
self.objective = objective
self.posterior_transform = posterior_transform
self.set_X_pending(X_pending)
self.X_pending: Tensor = self.X_pending
self.inner_sampler = inner_sampler
self.num_fantasies: int = num_fantasies
self.current_value = current_value
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qKnowledgeGradient on the candidate set `X`.
Args:
X: A `b x (q + num_fantasies) x d` Tensor with `b` t-batches of
`q + num_fantasies` design points each. We split this X tensor
into two parts in the `q` dimension (`dim=-2`). The first `q`
are the q-batch of design points and the last num_fantasies are
the current solutions of the inner optimization problem.
`X_fantasies = X[..., -num_fantasies:, :]`
`X_fantasies.shape = b x num_fantasies x d`
`X_actual = X[..., :-num_fantasies, :]`
`X_actual.shape = b x q x d`
Returns:
A Tensor of shape `b`. For t-batch b, the q-KG value of the design
`X_actual[b]` is averaged across the fantasy models, where
`X_fantasies[b, i]` is chosen as the final selection for the
`i`-th fantasy model.
NOTE: If `current_value` is not provided, then this is not the
true KG value of `X_actual[b]`, and `X_fantasies[b, : ]` must be
maximized at fixed `X_actual[b]`.
"""
X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=self.num_fantasies)
# We only concatenate X_pending into the X part after splitting
if self.X_pending is not None:
X_actual = torch.cat(
[X_actual, match_batch_shape(self.X_pending, X_actual)], dim=-2
)
# construct the fantasy model of shape `num_fantasies x b`
fantasy_model = self.model.fantasize(
X=X_actual, sampler=self.sampler, observation_noise=True
)
# get the value function
value_function = _get_value_function(
model=fantasy_model,
objective=self.objective,
posterior_transform=self.posterior_transform,
sampler=self.inner_sampler,
)
# make sure to propagate gradients to the fantasy model train inputs
with settings.propagate_grads(True):
values = value_function(X=X_fantasies) # num_fantasies x b
if self.current_value is not None:
values = values - self.current_value
# return average over the fantasy samples
return values.mean(dim=0)
@concatenate_pending_points
@t_batch_mode_transform()
def evaluate(self, X: Tensor, bounds: Tensor, **kwargs: Any) -> Tensor:
r"""Evaluate qKnowledgeGradient on the candidate set `X_actual` by
solving the inner optimization problem.
Args:
X: A `b x q x d` Tensor with `b` t-batches of `q` design points
each. Unlike `forward()`, this does not include solutions of the
inner optimization problem.
bounds: A `2 x d` tensor of lower and upper bounds for each column of
the solutions to the inner problem.
kwargs: Additional keyword arguments. This includes the options for
optimization of the inner problem, i.e. `num_restarts`, `raw_samples`,
an `options` dictionary to be passed on to the optimization helpers, and
a `scipy_options` dictionary to be passed to `scipy.minimize`.
Returns:
A Tensor of shape `b`. For t-batch b, the q-KG value of the design
`X[b]` is averaged across the fantasy models.
NOTE: If `current_value` is not provided, then this is not the
true KG value of `X[b]`.
"""
if hasattr(self, "expand"):
X = self.expand(X)
# construct the fantasy model of shape `num_fantasies x b`
fantasy_model = self.model.fantasize(
X=X, sampler=self.sampler, observation_noise=True
)
# get the value function
value_function = _get_value_function(
model=fantasy_model,
objective=self.objective,
posterior_transform=self.posterior_transform,
sampler=self.inner_sampler,
project=getattr(self, "project", None),
)
from botorch.generation.gen import gen_candidates_scipy
# optimize the inner problem
from botorch.optim.initializers import gen_value_function_initial_conditions
initial_conditions = gen_value_function_initial_conditions(
acq_function=value_function,
bounds=bounds,
num_restarts=kwargs.get("num_restarts", 20),
raw_samples=kwargs.get("raw_samples", 1024),
current_model=self.model,
options={**kwargs.get("options", {}), **kwargs.get("scipy_options", {})},
)
_, values = gen_candidates_scipy(
initial_conditions=initial_conditions,
acquisition_function=value_function,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
options=kwargs.get("scipy_options"),
)
# get the maximizer for each batch
values, _ = torch.max(values, dim=0)
if self.current_value is not None:
values = values - self.current_value
# NOTE: using getattr to cover both no-attribute with qKG and None with qMFKG
if getattr(self, "cost_aware_utility", None) is not None:
values = self.cost_aware_utility(
X=X, deltas=values, sampler=self.cost_sampler
)
# return average over the fantasy samples
return values.mean(dim=0)
def get_augmented_q_batch_size(self, q: int) -> int:
r"""Get augmented q batch size for one-shot optimization.
Args:
q: The number of candidates to consider jointly.
Returns:
The augmented size for one-shot optimization (including variables
parameterizing the fantasy solutions).
"""
return q + self.num_fantasies
def extract_candidates(self, X_full: Tensor) -> Tensor:
r"""We only return X as the set of candidates post-optimization.
Args:
X_full: A `b x (q + num_fantasies) x d`-dim Tensor with `b`
t-batches of `q + num_fantasies` design points each.
Returns:
A `b x q x d`-dim Tensor with `b` t-batches of `q` design points each.
"""
return X_full[..., : -self.num_fantasies, :]
class qMultiFidelityKnowledgeGradient(qKnowledgeGradient):
r"""Batch Knowledge Gradient for multi-fidelity optimization.
A version of `qKnowledgeGradient` that supports multi-fidelity optimization
via a `CostAwareUtility` and the `project` and `expand` operators. If none
of these are set, this acquisition function reduces to `qKnowledgeGradient`.
Through `valfunc_cls` and `valfunc_argfac`, this can be changed into a custom
multi-fidelity acquisition function (it is only KG if the terminal value is
computed using a posterior mean).
"""
def __init__(
self,
model: Model,
num_fantasies: Optional[int] = 64,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
inner_sampler: Optional[MCSampler] = None,
X_pending: Optional[Tensor] = None,
current_value: Optional[Tensor] = None,
cost_aware_utility: Optional[CostAwareUtility] = None,
project: Callable[[Tensor], Tensor] = lambda X: X,
expand: Callable[[Tensor], Tensor] = lambda X: X,
valfunc_cls: Optional[Type[AcquisitionFunction]] = None,
valfunc_argfac: Optional[Callable[[Model], Dict[str, Any]]] = None,
) -> None:
r"""Multi-Fidelity q-Knowledge Gradient (one-shot optimization).
Args:
model: A fitted model. Must support fantasizing.
num_fantasies: The number of fantasy points to use. More fantasy
points result in a better approximation, at the expense of
memory and wall time. Unused if `sampler` is specified.
sampler: The sampler used to sample fantasy observations. Optional
if `num_fantasies` is specified.
objective: The objective under which the samples are evaluated. If
`None`, then the analytic posterior mean is used. Otherwise, the
objective is MC-evaluated (using inner_sampler).
posterior_transform: An optional PosteriorTransform. If given, this
transforms the posterior before evaluation. If `objective is None`,
then the analytic posterior mean of the transformed posterior is
used. If `objective` is given, the `inner_sampler` is used to draw
samples from the transformed posterior, which are then evaluated under
the `objective`.
inner_sampler: The sampler used for inner sampling. Ignored if the
objective is `None`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
current_value: The current value, i.e. the expected best objective
given the observed points `D`. If omitted, forward will not
return the actual KG value, but the expected best objective
given the data set `D u X`.
cost_aware_utility: A CostAwareUtility computing the cost-transformed
utility from a candidate set and samples of increases in utility.
project: A callable mapping a `batch_shape x q x d` tensor of design
points to a tensor with shape `batch_shape x q_term x d` projected
to the desired target set (e.g. the target fidelities in case of
multi-fidelity optimization). For the basic case, `q_term = q`.
expand: A callable mapping a `batch_shape x q x d` input tensor to
a `batch_shape x (q + q_e)' x d`-dim output tensor, where the
`q_e` additional points in each q-batch correspond to
additional ("trace") observations.
valfunc_cls: An acquisition function class to be used as the terminal
value function.
valfunc_argfac: An argument factory, i.e. callable that maps a `Model`
to a dictionary of kwargs for the terminal value function (e.g.
`best_f` for `ExpectedImprovement`).
"""
if current_value is None and cost_aware_utility is not None:
raise UnsupportedError(
"Cost-aware KG requires current_value to be specified."
)
super().__init__(
model=model,
num_fantasies=num_fantasies,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
inner_sampler=inner_sampler,
X_pending=X_pending,
current_value=current_value,
)
self.cost_aware_utility = cost_aware_utility
self.project = project
self.expand = expand
self._cost_sampler = None
self.valfunc_cls = valfunc_cls
self.valfunc_argfac = valfunc_argfac
@property
def cost_sampler(self):
if self._cost_sampler is None:
# Note: Using the deepcopy here is essential. Removing this poses a
# problem if the base model and the cost model have a different number
# of outputs or test points (this would be caused by expand), as this
# would trigger re-sampling the base samples in the fantasy sampler.
# By cloning the sampler here, the right thing will happen if the
# the sizes are compatible, if they are not this will result in
# samples being drawn using different base samples, but it will at
# least avoid changing state of the fantasy sampler.
self._cost_sampler = deepcopy(self.sampler)
return self._cost_sampler
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qMultiFidelityKnowledgeGradient on the candidate set `X`.
Args:
X: A `b x (q + num_fantasies) x d` Tensor with `b` t-batches of
`q + num_fantasies` design points each. We split this X tensor
into two parts in the `q` dimension (`dim=-2`). The first `q`
are the q-batch of design points and the last num_fantasies are
the current solutions of the inner optimization problem.
`X_fantasies = X[..., -num_fantasies:, :]`
`X_fantasies.shape = b x num_fantasies x d`
`X_actual = X[..., :-num_fantasies, :]`
`X_actual.shape = b x q x d`
In addition, `X` may be augmented with fidelity parameteres as
part of thee `d`-dimension. Projecting fidelities to the target
fidelity is handled by `project`.
Returns:
A Tensor of shape `b`. For t-batch b, the q-KG value of the design
`X_actual[b]` is averaged across the fantasy models, where
`X_fantasies[b, i]` is chosen as the final selection for the
`i`-th fantasy model.
NOTE: If `current_value` is not provided, then this is not the
true KG value of `X_actual[b]`, and `X_fantasies[b, : ]` must be
maximized at fixed `X_actual[b]`.
"""
X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=self.num_fantasies)
# We only concatenate X_pending into the X part after splitting
if self.X_pending is not None:
X_eval = torch.cat(
[X_actual, match_batch_shape(self.X_pending, X_actual)], dim=-2
)
else:
X_eval = X_actual
# construct the fantasy model of shape `num_fantasies x b`
# expand X (to potentially add trace observations)
fantasy_model = self.model.fantasize(
X=self.expand(X_eval), sampler=self.sampler, observation_noise=True
)
# get the value function
value_function = _get_value_function(
model=fantasy_model,
objective=self.objective,
posterior_transform=self.posterior_transform,
sampler=self.inner_sampler,
project=self.project,
valfunc_cls=self.valfunc_cls,
valfunc_argfac=self.valfunc_argfac,
)
# make sure to propagate gradients to the fantasy model train inputs
# project the fantasy points
with settings.propagate_grads(True):
values = value_function(X=X_fantasies) # num_fantasies x b
if self.current_value is not None:
values = values - self.current_value
if self.cost_aware_utility is not None:
values = self.cost_aware_utility(
X=X_actual, deltas=values, sampler=self.cost_sampler
)
# return average over the fantasy samples
return values.mean(dim=0)
class ProjectedAcquisitionFunction(AcquisitionFunction):
r"""
Defines a wrapper around an `AcquisitionFunction` that incorporates the project
operator. Typically used to handle value functions in look-ahead methods.
"""
def __init__(
self,
base_value_function: AcquisitionFunction,
project: Callable[[Tensor], Tensor],
) -> None:
r"""
Args:
base_value_function: The wrapped `AcquisitionFunction`.
project: A callable mapping a `batch_shape x q x d` tensor of design
points to a tensor with shape `batch_shape x q_term x d` projected
to the desired target set (e.g. the target fidelities in case of
multi-fidelity optimization). For the basic case, `q_term = q`.
"""
super().__init__(base_value_function.model)
self.base_value_function = base_value_function
self.project = project
self.objective = getattr(base_value_function, "objective", None)
self.posterior_transform = base_value_function.posterior_transform
self.sampler = getattr(base_value_function, "sampler", None)
def forward(self, X: Tensor) -> Tensor:
return self.base_value_function(self.project(X))
def _get_value_function(
model: Model,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
sampler: Optional[MCSampler] = None,
project: Optional[Callable[[Tensor], Tensor]] = None,
valfunc_cls: Optional[Type[AcquisitionFunction]] = None,
valfunc_argfac: Optional[Callable[[Model], Dict[str, Any]]] = None,
) -> AcquisitionFunction:
r"""Construct value function (i.e. inner acquisition function)."""
if valfunc_cls is not None:
common_kwargs: Dict[str, Any] = {
"model": model,
"posterior_transform": posterior_transform,
}
if issubclass(valfunc_cls, MCAcquisitionFunction):
common_kwargs["sampler"] = sampler
common_kwargs["objective"] = objective
kwargs = valfunc_argfac(model=model) if valfunc_argfac is not None else {}
base_value_function = valfunc_cls(**common_kwargs, **kwargs)
else:
if objective is not None:
base_value_function = qSimpleRegret(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
)
else:
base_value_function = PosteriorMean(
model=model, posterior_transform=posterior_transform
)
if project is None:
return base_value_function
else:
return ProjectedAcquisitionFunction(
base_value_function=base_value_function,
project=project,
)
def _split_fantasy_points(X: Tensor, n_f: int) -> Tuple[Tensor, Tensor]:
r"""Split a one-shot optimization input into actual and fantasy points
Args:
X: A `batch_shape x (q + n_f) x d`-dim tensor of actual and fantasy
points
Returns:
2-element tuple containing
- A `batch_shape x q x d`-dim tensor `X_actual` of input candidates.
- A `n_f x batch_shape x 1 x d`-dim tensor `X_fantasies` of fantasy
points, where `X_fantasies[i, batch_idx]` is the i-th fantasy point
associated with the batch indexed by `batch_idx`.
"""
if n_f > X.size(-2):
raise ValueError(
f"n_f ({n_f}) must be less than the q-batch dimension of X ({X.size(-2)})"
)
split_sizes = [X.size(-2) - n_f, n_f]
X_actual, X_fantasies = torch.split(X, split_sizes, dim=-2)
# X_fantasies is b x num_fantasies x d, needs to be num_fantasies x b x 1 x d
# for batch mode evaluation with batch shape num_fantasies x b.
# b x num_fantasies x d --> num_fantasies x b x d
X_fantasies = X_fantasies.permute(-2, *range(X_fantasies.dim() - 2), -1)
# num_fantasies x b x 1 x d
X_fantasies = X_fantasies.unsqueeze(dim=-2)
return X_actual, X_fantasies
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_, [Wilson2017reparam]_ and
[Balandat2020botorch]_.
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
from __future__ import annotations
import math
from abc import ABC, abstractmethod
from copy import deepcopy
from functools import partial
from typing import Callable, List, Optional, Protocol, Tuple, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin
from botorch.acquisition.cached_cholesky import CachedCholeskyMCAcquisitionFunction
from botorch.acquisition.objective import (
ConstrainedMCObjective,
IdentityMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.acquisition.utils import (
compute_best_feasible_objective,
prune_inferior_points,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.utils.objective import compute_smoothed_feasibility_indicator
from botorch.utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from torch import Tensor
class MCAcquisitionFunction(AcquisitionFunction, MCSamplerMixin, ABC):
r"""
Abstract base class for Monte-Carlo based batch acquisition functions.
:meta private:
"""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. If not given,
a sampler is generated on the fly within the
`get_posterior_samples` method using
`botorch.sampling.get_sampler`.
NOTE: For posteriors that do not support base samples,
a sampler compatible with intended use case must be provided.
See `ForkedRNGSampler` and `StochasticSampler` as examples.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape, m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
MCSamplerMixin.__init__(self, sampler=sampler)
if objective is None and model.num_outputs != 1:
if posterior_transform is None:
raise UnsupportedError(
"Must specify an objective or a posterior transform when using "
"a multi-output model."
)
elif not posterior_transform.scalarize:
raise UnsupportedError(
"If using a multi-output model without an objective, "
"posterior_transform must scalarize the output."
)
if objective is None:
objective = IdentityMCObjective()
self.posterior_transform = posterior_transform
self.objective: MCAcquisitionObjective = objective
self.set_X_pending(X_pending)
def _get_samples_and_objectives(self, X: Tensor) -> Tuple[Tensor, Tensor]:
"""Computes posterior samples and objective values at input X.
Args:
X: A `batch_shape x q x d`-dim Tensor of model inputs.
Returns:
A two-tuple `(samples, obj)`, where `samples` is a tensor of posterior
samples with shape `sample_shape x batch_shape x q x m`, and `obj` is a
tensor of MC objective values with shape `sample_shape x batch_shape x q`.
"""
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
samples = self.get_posterior_samples(posterior)
return samples, self.objective(samples=samples, X=X)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `batch_shape x q x d` X Tensor of t-batches with `q` `d`-dim
design points each, and returns a Tensor with shape `batch_shape'`, where
`batch_shape'` is the broadcasted batch shape of model and input `X`. Should
utilize the result of `set_X_pending` as needed to account for pending function
evaluations.
"""
pass # pragma: no cover
class SampleReductionProtocol(Protocol):
"""For static type check of SampleReducingMCAcquisitionFunction's mc_reduction."""
@staticmethod
def __call__(X: Tensor, *, dim: torch.Size) -> Tensor:
pass # pragma: no cover
class SampleReducingMCAcquisitionFunction(MCAcquisitionFunction):
r"""MC-based batch acquisition function that reduces across samples and implements
a general treatment of outcome constraints.
This class's `forward` computes the - possibly constrained - acquisition value by
(1) computing the unconstrained utility for each MC sample using `_sample_forward`,
(2) weighing the utility values by the constraint indicator per MC sample, and
(3) reducing (e.g. averaging) the weighted utility values over the MC dimension.
NOTE: Do *NOT* override the `forward` method, unless you have thought about it well.
`forward` is implemented generically to incorporate constraints in a principled way,
and takes care of reducing over the Monte Carlo and batch dimensions via the
`sample_reduction` and `q_reduction` arguments, which default to `torch.mean` and
`torch.max`, respectively.
In order to implement a custom SampleReducingMCAcquisitionFunction, we only need to
implement the `_sample_forward(obj: Tensor) -> Tensor` method, which maps objective
samples to acquisition utility values without reducing the Monte Carlo and batch
(i.e. q) dimensions (see details in the docstring of `_sample_forward`).
A note on design choices:
The primary purpose of `SampleReducingMCAcquisitionFunction`is to support outcome
constraints. On the surface, designing a wrapper `ConstrainedMCAcquisitionFunction`
could be an elegant solution to this end, but it would still require the acquisition
functions to implement a `_sample_forward` method to weigh acquisition utilities at
the sample level. Further, `qNoisyExpectedImprovement` is a special case that is
hard to encompass in this pattern, since it requires the computation of the best
*feasible* objective, which requires access to the constraint functions. However,
if the constraints are stored in a wrapper class, they will be inaccessible to the
forward pass. These problems are circumvented by the design of this class.
"""
_log: bool = False # whether the acquisition utilities are in log-space
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sample_reduction: SampleReductionProtocol = torch.mean,
q_reduction: SampleReductionProtocol = torch.amax,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
fat: bool = False,
):
r"""Constructor of SampleReducingMCAcquisitionFunction.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. If not given, a
sampler is generated on the fly within the
`get_posterior_samples` method using
`botorch.sampling.get_sampler`.
NOTE: For posteriors that do not support base samples,
a sampler compatible with intended use case must be provided.
See `ForkedRNGSampler` and `StochasticSampler` as examples.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
NOTE: `ConstrainedMCObjective` for outcome constraints is deprecated in
favor of passing the `constraints` directly to this constructor.
posterior_transform: A `PosteriorTransform` (optional).
X_pending: A `batch_shape, m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated.
sample_reduction: A callable that takes in a `sample_shape x batch_shape`
Tensor of acquisition utility values, a keyword-argument `dim` that
specifies the sample dimensions to reduce over, and returns a
`batch_shape`-dim Tensor of acquisition values.
q_reduction: A callable that takes in a `sample_shape x batch_shape x q`
Tensor of acquisition utility values, a keyword-argument `dim` that
specifies the q dimension to reduce over (i.e. -1), and returns a
`sample_shape x batch_shape`-dim Tensor of acquisition values.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
NOTE: Constraint-weighting is only compatible with non-negative
acquistion utilities, e.g. all improvement-based acquisition functions.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
fat: Wether to apply a fat-tailed smooth approximation to the feasibility
indicator or the canonical sigmoid approximation.
"""
if constraints is not None and isinstance(objective, ConstrainedMCObjective):
raise ValueError(
"ConstrainedMCObjective as well as constraints passed to constructor."
"Choose one or the other, preferably the latter."
)
# TODO: deprecate ConstrainedMCObjective
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
# Shall the need arise, sample_dim could be exposed in the constructor.
sample_dim = tuple(range(len(self.sample_shape)))
self._sample_reduction = partial(sample_reduction, dim=sample_dim)
self._q_reduction = partial(q_reduction, dim=-1)
self._constraints = constraints
self._eta = eta
self._fat = fat
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Computes the acquisition value associated with the input `X`. Weighs the
acquisition utility values by smoothed constraint indicators if `constraints`
was passed to the constructor of the class. Applies `self.sample_reduction` and
`self.q_reduction` to reduce over the Monte Carlo and batch (q) dimensions.
NOTE: Do *NOT* override the `forward` method for a custom acquisition function.
Instead, implement the `_sample_forward` method. See the docstring of this class
for details.
Args:
X: A `batch_shape x q x d` Tensor of t-batches with `q` `d`-dim
design points each.
Returns:
A Tensor with shape `batch_shape'`, where `batch_shape'` is the broadcasted
batch shape of model and input `X`.
"""
non_reduced_acqval = self._non_reduced_forward(X=X)
return self._sample_reduction(self._q_reduction(non_reduced_acqval))
def _non_reduced_forward(self, X: Tensor) -> Tensor:
"""Compute the constrained acquisition values at the MC-sample, q level.
Args:
X: A `batch_shape x q x d` Tensor of t-batches with `q` `d`-dim
design points each.
Returns:
A Tensor with shape `sample_sample x batch_shape x q`.
"""
samples, obj = self._get_samples_and_objectives(X)
acqval = self._sample_forward(obj) # `sample_sample x batch_shape x q`
return self._apply_constraints(acqval=acqval, samples=samples)
@abstractmethod
def _sample_forward(self, obj: Tensor) -> Tensor:
"""Evaluates the acquisition utility per MC sample based on objective value obj.
Should utilize the result of `set_X_pending` as needed to account for pending
function evaluations.
Args:
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of acquisition utility values.
"""
pass # pragma: no cover
def _apply_constraints(self, acqval: Tensor, samples: Tensor) -> Tensor:
"""Multiplies the acquisition utility by constraint indicators.
Args:
acqval: `sample_shape x batch_shape x q`-dim acquisition utility values.
samples: `sample_shape x batch_shape x q x m`-dim posterior samples.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of acquisition utility values
multiplied by a smoothed constraint indicator per sample.
"""
if self._constraints is not None:
if not self._log and (acqval < 0).any():
raise ValueError(
"Constraint-weighting requires unconstrained "
"acquisition values to be non-negative."
)
ind = compute_smoothed_feasibility_indicator(
constraints=self._constraints,
samples=samples,
eta=self._eta,
log=self._log,
fat=self._fat,
)
acqval = acqval.add(ind) if self._log else acqval.mul(ind)
return acqval
class qExpectedImprovement(SampleReducingMCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1024)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless). Can be
a `batch_shape`-shaped tensor, which in case of a batched model
specifies potentially different values for each element of the batch.
sampler: The sampler used to draw base samples. See `MCAcquisitionFunction`
more details.
objective: The MCAcquisitionObjective under which the samples are evaluated.
Defaults to `IdentityMCObjective()`.
NOTE: `ConstrainedMCObjective` for outcome constraints is deprecated in
favor of passing the `constraints` directly to this constructor.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call. Copied and set to have no
gradient.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
)
self.register_buffer("best_f", torch.as_tensor(best_f, dtype=float))
def _sample_forward(self, obj: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement per sample on the candidate set `X`.
Args:
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of improvement utility values.
"""
return (obj - self.best_f.unsqueeze(-1).to(obj)).clamp_min(0)
class qNoisyExpectedImprovement(
SampleReducingMCAcquisitionFunction, CachedCholeskyMCAcquisitionFunction
):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = True,
cache_root: bool = True,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
marginalize_dim: Optional[int] = None,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `batch_shape x r x d`-dim Tensor of `r` design points
that have already been observed. These points are considered as
the potential best design point.
sampler: The sampler used to draw base samples. See `MCAcquisitionFunction`
more details.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
NOTE: `ConstrainedMCObjective` for outcome constraints is deprecated in
favor of passing the `constraints` directly to this constructor.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into `X` upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
cache_root: A boolean indicating whether to cache the root
decomposition over `X_baseline` and use low-rank updates.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
marginalize_dim: The dimension to marginalize over.
TODO: similar to qNEHVI, when we are using sequential greedy candidate
selection, we could incorporate pending points X_baseline and compute
the incremental qNEI from the new point. This would greatly increase
efficiency for large batches.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
)
self._setup(model=model, cache_root=cache_root)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model,
X=X_baseline,
objective=objective,
posterior_transform=posterior_transform,
marginalize_dim=marginalize_dim,
)
self.register_buffer("X_baseline", X_baseline)
# registering buffers for _get_samples_and_objectives in the next `if` block
self.register_buffer("baseline_samples", None)
self.register_buffer("baseline_obj", None)
if self._cache_root:
self.q_in = -1
# set baseline samples
with torch.no_grad(): # this is _get_samples_and_objectives(X_baseline)
posterior = self.model.posterior(
X_baseline, posterior_transform=self.posterior_transform
)
# Note: The root decomposition is cached in two different places. It
# may be confusing to have two different caches, but this is not
# trivial to change since each is needed for a different reason:
# - LinearOperator caching to `posterior.mvn` allows for reuse within
# this function, which may be helpful if the same root decomposition
# is produced by the calls to `self.base_sampler` and
# `self._cache_root_decomposition`.
# - self._baseline_L allows a root decomposition to be persisted outside
# this method.
baseline_samples = self.get_posterior_samples(posterior)
baseline_obj = self.objective(baseline_samples, X=X_baseline)
# We make a copy here because we will write an attribute `base_samples`
# to `self.base_sampler.base_samples`, and we don't want to mutate
# `self.sampler`.
self.base_sampler = deepcopy(self.sampler)
self.baseline_samples = baseline_samples
self.baseline_obj = baseline_obj
self.register_buffer(
"_baseline_best_f",
self._compute_best_feasible_objective(
samples=baseline_samples, obj=baseline_obj
),
)
self._baseline_L = self._compute_root_decomposition(posterior=posterior)
def compute_best_f(self, obj: Tensor) -> Tensor:
"""Computes the best (feasible) noisy objective value.
Args:
obj: `sample_shape x batch_shape x q`-dim Tensor of objectives in forward.
Returns:
A `sample_shape x batch_shape x 1`-dim Tensor of best feasible objectives.
"""
if self._cache_root:
val = self._baseline_best_f
else:
val = self._compute_best_feasible_objective(
samples=self.baseline_samples, obj=self.baseline_obj
)
# ensuring shape, dtype, device compatibility with obj
n_sample_dims = len(self.sample_shape)
view_shape = torch.Size(
[
*val.shape[:n_sample_dims], # sample dimensions
*(1,) * (obj.ndim - val.ndim), # pad to match obj
*val.shape[n_sample_dims:], # the rest
]
)
return val.view(view_shape).to(obj)
def _sample_forward(self, obj: Tensor) -> Tensor:
"""Evaluate qNoisyExpectedImprovement per objective value in `obj`.
Args:
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of noisy improvement values.
"""
return (obj - self.compute_best_f(obj)).clamp_min(0)
def _get_samples_and_objectives(self, X: Tensor) -> Tuple[Tensor, Tensor]:
r"""Compute samples at new points, using the cached root decomposition.
Args:
X: A `batch_shape x q x d`-dim tensor of inputs.
Returns:
A two-tuple `(samples, obj)`, where `samples` is a tensor of posterior
samples with shape `sample_shape x batch_shape x q x m`, and `obj` is a
tensor of MC objective values with shape `sample_shape x batch_shape x q`.
"""
q = X.shape[-2]
X_full = torch.cat([match_batch_shape(self.X_baseline, X), X], dim=-2)
# TODO: Implement more efficient way to compute posterior over both training and
# test points in GPyTorch (https://github.com/cornellius-gp/gpytorch/issues/567)
posterior = self.model.posterior(
X_full, posterior_transform=self.posterior_transform
)
if not self._cache_root:
samples_full = super().get_posterior_samples(posterior)
samples = samples_full[..., -q:, :]
obj_full = self.objective(samples_full, X=X_full)
# assigning baseline buffers so `best_f` can be computed in _sample_forward
self.baseline_obj, obj = obj_full[..., :-q], obj_full[..., -q:]
self.baseline_samples = samples_full[..., :-q, :]
return samples, obj
# handle one-to-many input transforms
n_plus_q = X_full.shape[-2]
n_w = posterior._extended_shape()[-2] // n_plus_q
q_in = q * n_w
self._set_sampler(q_in=q_in, posterior=posterior)
samples = self._get_f_X_samples(posterior=posterior, q_in=q_in)
obj = self.objective(samples, X=X_full[..., -q:, :])
return samples, obj
def _compute_best_feasible_objective(self, samples: Tensor, obj: Tensor) -> Tensor:
r"""Computes best feasible objective value from samples.
Args:
samples: `sample_shape x batch_shape x q x m`-dim posterior samples.
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x 1`-dim Tensor of best feasible objectives.
"""
return compute_best_feasible_objective(
samples=samples,
obj=obj,
constraints=self._constraints,
model=self.model,
objective=self.objective,
posterior_transform=self.posterior_transform,
X_baseline=self.X_baseline,
)
class qProbabilityOfImprovement(SampleReducingMCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1024)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless). Can
be a `batch_shape`-shaped tensor, which in case of a batched model
specifies potentially different values for each element of the batch.
sampler: The sampler used to draw base samples. See `MCAcquisitionFunction`
more details.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
NOTE: `ConstrainedMCObjective` for outcome constraints is deprecated in
favor of passing the `constraints` directly to this constructor.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
constraints: A list of constraint callables which map posterior samples to
a scalar. The associated constraint is considered satisfied if this
scalar is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
)
best_f = torch.as_tensor(best_f, dtype=float).unsqueeze(-1) # adding batch dim
self.register_buffer("best_f", best_f)
self.register_buffer("tau", torch.as_tensor(tau, dtype=float))
def _sample_forward(self, obj: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement per sample on the candidate set `X`.
Args:
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of improvement indicators.
"""
improvement = obj - self.best_f.to(obj)
return torch.sigmoid(improvement / self.tau)
class qSimpleRegret(SampleReducingMCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
def _sample_forward(self, obj: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret per sample on the candidate set `X`.
Args:
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of simple regret values.
"""
return obj
class qUpperConfidenceBound(SampleReducingMCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. See `MCAcquisitionFunction`
more details.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation but have not yet
been evaluated. Concatenated into X upon forward call. Copied and set to
have no gradient.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
def _sample_forward(self, obj: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound per sample on the candidate set `X`.
Args:
obj: A `sample_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of acquisition values.
"""
mean = obj.mean(dim=0)
return mean + self.beta_prime * (obj - mean).abs()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for acquisition functions.
"""
from __future__ import annotations
from typing import Callable, List, Optional, Union
import torch
from botorch.acquisition import monte_carlo
from botorch.acquisition.multi_objective import monte_carlo as moo_monte_carlo
from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform
from botorch.acquisition.utils import compute_best_feasible_objective
from botorch.models.model import Model
from botorch.sampling.get_sampler import get_sampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from torch import Tensor
def get_acquisition_function(
acquisition_function_name: str,
model: Model,
objective: MCAcquisitionObjective,
X_observed: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Optional[Union[Tensor, float]] = 1e-3,
mc_samples: int = 512,
seed: Optional[int] = None,
*,
# optional parameters that are only needed for certain acquisition functions
tau: float = 1e-3,
prune_baseline: bool = True,
marginalize_dim: Optional[int] = None,
cache_root: bool = True,
beta: Optional[float] = None,
ref_point: Union[None, List[float], Tensor] = None,
Y: Optional[Tensor] = None,
alpha: float = 0.0,
) -> monte_carlo.MCAcquisitionFunction:
r"""Convenience function for initializing botorch acquisition functions.
Args:
acquisition_function_name: Name of the acquisition function.
model: A fitted model.
objective: A MCAcquisitionObjective.
X_observed: A `m1 x d`-dim Tensor of `m1` design points that have
already been observed.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `m2 x d`-dim Tensor of `m2` design points whose evaluation
is pending.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility. Used for all acquisition functions except qSR and qUCB.
eta: The temperature parameter for the sigmoid function used for the
differentiable approximation of the constraints. In case of a float the
same eta is used for every constraint in constraints. In case of a
tensor the length of the tensor must match the number of provided
constraints. The i-th constraint is then estimated with the i-th
eta value. Used for all acquisition functions except qSR and qUCB.
mc_samples: The number of samples to use for (q)MC evaluation of the
acquisition function.
seed: If provided, perform deterministic optimization (i.e. the
function to optimize is fixed and not stochastic).
Returns:
The requested acquisition function.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> obj = LinearMCObjective(weights=torch.tensor([1.0, 2.0]))
>>> acqf = get_acquisition_function("qEI", model, obj, train_X)
"""
# initialize the sampler
sampler = get_sampler(
posterior=model.posterior(X_observed[:1]),
sample_shape=torch.Size([mc_samples]),
seed=seed,
)
if posterior_transform is not None and acquisition_function_name in [
"qEHVI",
"qNEHVI",
]:
raise NotImplementedError(
"PosteriorTransforms are not yet implemented for multi-objective "
"acquisition functions."
)
# instantiate and return the requested acquisition function
if acquisition_function_name in ("qEI", "qLogEI", "qPI"):
# Since these are the non-noisy variants, use the posterior mean at the observed
# inputs directly to compute the best feasible value without sampling.
Y = model.posterior(X_observed, posterior_transform=posterior_transform).mean
obj = objective(samples=Y, X=X_observed)
best_f = compute_best_feasible_objective(
samples=Y,
obj=obj,
constraints=constraints,
model=model,
objective=objective,
posterior_transform=posterior_transform,
X_baseline=X_observed,
)
if acquisition_function_name == "qEI":
return monte_carlo.qExpectedImprovement(
model=model,
best_f=best_f,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
)
if acquisition_function_name == "qLogEI":
# putting the import here to avoid circular imports
# ideally, the entire function should be moved out of this file,
# but since it is used for legacy code to be deprecated, we keep it here.
from botorch.acquisition.logei import qLogExpectedImprovement
return qLogExpectedImprovement(
model=model,
best_f=best_f,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
)
elif acquisition_function_name == "qPI":
return monte_carlo.qProbabilityOfImprovement(
model=model,
best_f=best_f,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
tau=tau,
constraints=constraints,
eta=eta,
)
elif acquisition_function_name == "qNEI":
return monte_carlo.qNoisyExpectedImprovement(
model=model,
X_baseline=X_observed,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
prune_baseline=prune_baseline,
marginalize_dim=marginalize_dim,
cache_root=cache_root,
constraints=constraints,
eta=eta,
)
elif acquisition_function_name == "qLogNEI":
from botorch.acquisition.logei import qLogNoisyExpectedImprovement
return qLogNoisyExpectedImprovement(
model=model,
X_baseline=X_observed,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
prune_baseline=prune_baseline,
marginalize_dim=marginalize_dim,
cache_root=cache_root,
constraints=constraints,
eta=eta,
)
elif acquisition_function_name == "qSR":
return monte_carlo.qSimpleRegret(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
elif acquisition_function_name == "qUCB":
if beta is None:
raise ValueError("`beta` must be not be None for qUCB.")
return monte_carlo.qUpperConfidenceBound(
model=model,
beta=beta,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
elif acquisition_function_name == "qEHVI":
if Y is None:
raise ValueError("`Y` must not be None for qEHVI")
if ref_point is None:
raise ValueError("`ref_point` must not be None for qEHVI")
# get feasible points
if constraints is not None:
feas = torch.stack([c(Y) <= 0 for c in constraints], dim=-1).all(dim=-1)
Y = Y[feas]
obj = objective(Y)
if alpha > 0:
partitioning = NondominatedPartitioning(
ref_point=torch.as_tensor(ref_point, dtype=Y.dtype, device=Y.device),
Y=obj,
alpha=alpha,
)
else:
partitioning = FastNondominatedPartitioning(
ref_point=torch.as_tensor(ref_point, dtype=Y.dtype, device=Y.device),
Y=obj,
)
return moo_monte_carlo.qExpectedHypervolumeImprovement(
model=model,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
objective=objective,
constraints=constraints,
eta=eta,
X_pending=X_pending,
)
elif acquisition_function_name == "qNEHVI":
if ref_point is None:
raise ValueError("`ref_point` must not be None for qNEHVI")
return moo_monte_carlo.qNoisyExpectedHypervolumeImprovement(
model=model,
ref_point=ref_point,
X_baseline=X_observed,
sampler=sampler,
objective=objective,
constraints=constraints,
eta=eta,
prune_baseline=prune_baseline,
alpha=alpha,
X_pending=X_pending,
marginalize_dim=marginalize_dim,
cache_root=cache_root,
)
raise NotImplementedError(
f"Unknown acquisition function {acquisition_function_name}"
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Acquisition function for joint entropy search (JES).
.. [Hvarfner2022joint]
C. Hvarfner, F. Hutter, L. Nardi,
Joint Entropy Search for Maximally-informed Bayesian Optimization.
In Proceedings of the Annual Conference on Neural Information
Processing Systems (NeurIPS), 2022.
.. [Tu2022joint]
B. Tu, A. Gandy, N. Kantas, B. Shafei,
Joint Entropy Search for Multi-objective Bayesian Optimization.
In Proceedings of the Annual Conference on Neural Information
Processing Systems (NeurIPS), 2022.
"""
from __future__ import annotations
import warnings
from math import log, pi
from typing import Optional
import torch
from botorch import settings
from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.gp_regression import MIN_INFERRED_NOISE_LEVEL
from botorch.models.model import Model
from botorch.models.utils import check_no_nans, fantasize as fantasize_flag
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
from torch.distributions import Normal
MCMC_DIM = -3 # Only relevant if you do Fully Bayesian GPs.
ESTIMATION_TYPES = ["MC", "LB"]
MC_ADD_TERM = 0.5 * (1 + log(2 * pi))
# The CDF query cannot be strictly zero in the division
# and this clamping helps assure that it is always positive.
CLAMP_LB = torch.finfo(torch.float32).eps
FULLY_BAYESIAN_ERROR_MSG = (
"JES is not yet available with Fully Bayesian GPs. Track the issue, "
"which regards conditioning on a number of optima on a collection "
"of models, in detail at https://github.com/pytorch/botorch/issues/1680"
)
class qJointEntropySearch(AcquisitionFunction, MCSamplerMixin):
r"""The acquisition function for the Joint Entropy Search, where the batches
`q > 1` are supported through the lower bound formulation.
This acquisition function computes the mutual information between the observation
at a candidate point `X` and the optimal input-output pair.
See [Tu2022joint]_ for a discussion on the estimation procedure.
"""
def __init__(
self,
model: Model,
optimal_inputs: Tensor,
optimal_outputs: Tensor,
condition_noiseless: bool = True,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
estimation_type: str = "LB",
maximize: bool = True,
num_samples: int = 64,
) -> None:
r"""Joint entropy search acquisition function.
Args:
model: A fitted single-outcome model.
optimal_inputs: A `num_samples x d`-dim tensor containing the sampled
optimal inputs of dimension `d`. We assume for simplicity that each
sample only contains one optimal set of inputs.
optimal_outputs: A `num_samples x 1`-dim Tensor containing the optimal
set of objectives of dimension `1`.
condition_noiseless: Whether to condition on noiseless optimal observations
`f*` [Hvarfner2022joint]_ or noisy optimal observations `y*`
[Tu2022joint]_. These are sampled identically, so this only controls
the fashion in which the GP is reshaped as a result of conditioning
on the optimum.
estimation_type: estimation_type: A string to determine which entropy
estimate is computed: Lower bound" ("LB") or "Monte Carlo" ("MC").
Lower Bound is recommended due to the relatively high variance
of the MC estimator.
maximize: If true, we consider a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
num_samples: The number of Monte Carlo samples used for the Monte Carlo
estimate.
"""
super().__init__(model=model)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([num_samples]))
MCSamplerMixin.__init__(self, sampler=sampler)
# To enable fully bayesian GP conditioning, we need to unsqueeze
# to get num_optima x num_gps unique GPs
# inputs come as num_optima_per_model x (num_models) x d
# but we want it four-dimensional in the Fully bayesian case,
# and three-dimensional otherwise.
self.optimal_inputs = optimal_inputs.unsqueeze(-2)
self.optimal_outputs = optimal_outputs.unsqueeze(-2)
self.posterior_transform = posterior_transform
self.maximize = maximize
# The optima (can be maxima, can be minima) come in as the largest
# values if we optimize, or the smallest (likely substantially negative)
# if we minimize. Inside the acquisition function, however, we always
# want to consider MAX-values. As such, we need to flip them if
# we want to minimize.
if not self.maximize:
optimal_outputs = -optimal_outputs
self.num_samples = optimal_inputs.shape[0]
self.condition_noiseless = condition_noiseless
self.initial_model = model
# Here, the optimal inputs have shapes num_optima x [num_models if FB] x 1 x D
# and the optimal outputs have shapes num_optima x [num_models if FB] x 1 x 1
# The third dimension equaling 1 is required to get one optimum per model,
# which raises a BotorchTensorDimensionWarning.
if isinstance(model, SaasFullyBayesianSingleTaskGP):
raise NotImplementedError(FULLY_BAYESIAN_ERROR_MSG)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with fantasize_flag():
with settings.propagate_grads(False):
# We must do a forward pass one before conditioning.
self.initial_model.posterior(
self.optimal_inputs[:1], observation_noise=False
)
# This equates to the JES version proposed by Hvarfner et. al.
if self.condition_noiseless:
opt_noise = torch.full_like(
self.optimal_outputs, MIN_INFERRED_NOISE_LEVEL
)
# conditional (batch) model of shape (num_models)
# x num_optima_per_model
self.conditional_model = (
self.initial_model.condition_on_observations(
X=self.initial_model.transform_inputs(self.optimal_inputs),
Y=self.optimal_outputs,
noise=opt_noise,
)
)
else:
self.conditional_model = (
self.initial_model.condition_on_observations(
X=self.initial_model.transform_inputs(self.optimal_inputs),
Y=self.optimal_outputs,
)
)
self.estimation_type = estimation_type
self.set_X_pending(X_pending)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluates qJointEntropySearch at the design points `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
if self.estimation_type == "LB":
res = self._compute_lower_bound_information_gain(X)
elif self.estimation_type == "MC":
res = self._compute_monte_carlo_information_gain(X)
else:
raise ValueError(
f"Estimation type {self.estimation_type} is not valid. "
f"Please specify any of {ESTIMATION_TYPES}"
)
return res
def _compute_lower_bound_information_gain(
self, X: Tensor, return_parts: bool = False
) -> Tensor:
r"""Evaluates the lower bound information gain at the design points `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
initial_posterior = self.initial_model.posterior(X, observation_noise=True)
# need to check if there is a two-dimensional batch shape -
# the sampled optima appear in the dimension right after
batch_shape = X.shape[:-2]
sample_dim = len(batch_shape)
# We DISREGARD the additional constant term.
initial_entropy = 0.5 * torch.logdet(
initial_posterior.mvn.lazy_covariance_matrix
)
# initial_entropy of shape batch_size or batch_size x num_models if FBGP
# first need to unsqueeze the sample dim (after batch dim) and then the two last
initial_entropy = (
initial_entropy.unsqueeze(sample_dim).unsqueeze(-1).unsqueeze(-1)
)
# Compute the mixture mean and variance
posterior_m = self.conditional_model.posterior(
X.unsqueeze(MCMC_DIM), observation_noise=True
)
noiseless_var = self.conditional_model.posterior(
X.unsqueeze(MCMC_DIM), observation_noise=False
).variance
mean_m = posterior_m.mean
if not self.maximize:
mean_m = -mean_m
variance_m = posterior_m.variance
check_no_nans(variance_m)
# get stdv of noiseless variance
stdv = noiseless_var.sqrt()
# batch_shape x 1
normal = Normal(
torch.zeros(1, device=X.device, dtype=X.dtype),
torch.ones(1, device=X.device, dtype=X.dtype),
)
normalized_mvs = (self.optimal_outputs - mean_m) / stdv
cdf_mvs = normal.cdf(normalized_mvs).clamp_min(CLAMP_LB)
pdf_mvs = torch.exp(normal.log_prob(normalized_mvs))
ratio = pdf_mvs / cdf_mvs
var_truncated = noiseless_var * (
1 - (normalized_mvs + ratio) * ratio
).clamp_min(CLAMP_LB)
var_truncated = var_truncated + (variance_m - noiseless_var)
conditional_entropy = 0.5 * torch.log(var_truncated)
# Shape batch_size x num_optima x [num_models if FB] x q x num_outputs
# squeeze the num_outputs dim (since it's 1)
entropy_reduction = (
initial_entropy - conditional_entropy.sum(dim=-2, keepdim=True)
).squeeze(-1)
# average over the number of optima and squeeze the q-batch
entropy_reduction = entropy_reduction.mean(dim=sample_dim).squeeze(-1)
return entropy_reduction
def _compute_monte_carlo_variables(self, posterior):
"""Retrieves monte carlo samples and their log probabilities from the posterior.
Args:
posterior: The posterior distribution.
Returns:
A two-element tuple containing:
- samples: a num_optima x batch_shape x num_mc_samples x q x 1
tensor of samples drawn from the posterior.
- samples_log_prob: a num_optima x batch_shape x num_mc_samples x q x 1
tensor of associated probabilities.
"""
samples = self.get_posterior_samples(posterior)
samples_log_prob = (
posterior.mvn.log_prob(samples.squeeze(-1)).unsqueeze(-1).unsqueeze(-1)
)
return samples, samples_log_prob
def _compute_monte_carlo_information_gain(
self, X: Tensor, return_parts: bool = False
) -> Tensor:
r"""Evaluates the lower bound information gain at the design points `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
initial_posterior = self.initial_model.posterior(X, observation_noise=True)
batch_shape = X.shape[:-2]
sample_dim = len(batch_shape)
# We DISREGARD the additional constant term.
initial_entropy = MC_ADD_TERM + 0.5 * torch.logdet(
initial_posterior.mvn.lazy_covariance_matrix
)
# initial_entropy of shape batch_size or batch_size x num_models if FBGP
# first need to unsqueeze the sample dim (after batch dim), then the two last
initial_entropy = (
initial_entropy.unsqueeze(sample_dim).unsqueeze(-1).unsqueeze(-1)
)
# Compute the mixture mean and variance
posterior_m = self.conditional_model.posterior(
X.unsqueeze(MCMC_DIM), observation_noise=True
)
noiseless_var = self.conditional_model.posterior(
X.unsqueeze(MCMC_DIM), observation_noise=False
).variance
mean_m = posterior_m.mean
if not self.maximize:
mean_m = -mean_m
variance_m = posterior_m.variance.clamp_min(CLAMP_LB)
conditional_samples, conditional_logprobs = self._compute_monte_carlo_variables(
posterior_m
)
normalized_samples = (conditional_samples - mean_m) / variance_m.sqrt()
# Correlation between noisy observations and noiseless values f
rho = (noiseless_var / variance_m).sqrt()
normal = Normal(
torch.zeros(1, device=X.device, dtype=X.dtype),
torch.ones(1, device=X.device, dtype=X.dtype),
)
# prepare max value quantities and re-scale as required
normalized_mvs = (self.optimal_outputs - mean_m) / noiseless_var.sqrt()
mvs_rescaled_mc = (normalized_mvs - rho * normalized_samples) / (1 - rho**2)
cdf_mvs = normal.cdf(normalized_mvs).clamp_min(CLAMP_LB)
cdf_rescaled_mvs = normal.cdf(mvs_rescaled_mc).clamp_min(CLAMP_LB)
mv_ratio = cdf_rescaled_mvs / cdf_mvs
log_term = torch.log(mv_ratio) + conditional_logprobs
conditional_entropy = -(mv_ratio * log_term).mean(0)
entropy_reduction = (
initial_entropy - conditional_entropy.sum(dim=-2, keepdim=True)
).squeeze(-1)
# average over the number of optima and squeeze the q-batch
entropy_reduction = entropy_reduction.mean(dim=sample_dim).squeeze(-1)
return entropy_reduction
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A registry of helpers for generating inputs to acquisition function
constructors programmatically from a consistent input format.
"""
from __future__ import annotations
import inspect
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import (
ConstrainedExpectedImprovement,
ExpectedImprovement,
LogConstrainedExpectedImprovement,
LogExpectedImprovement,
LogNoisyExpectedImprovement,
LogProbabilityOfImprovement,
NoisyExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
UpperConfidenceBound,
)
from botorch.acquisition.cost_aware import InverseCostWeightedUtility
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.joint_entropy_search import qJointEntropySearch
from botorch.acquisition.knowledge_gradient import (
qKnowledgeGradient,
qMultiFidelityKnowledgeGradient,
)
from botorch.acquisition.logei import (
qLogExpectedImprovement,
qLogNoisyExpectedImprovement,
TAU_MAX,
TAU_RELU,
)
from botorch.acquisition.max_value_entropy_search import (
qMaxValueEntropy,
qMultiFidelityMaxValueEntropy,
)
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.multi_objective import (
ExpectedHypervolumeImprovement,
MCMultiOutputObjective,
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.objective import (
AnalyticMultiOutputObjective,
IdentityAnalyticMultiOutputObjective,
IdentityMCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import get_default_partitioning_alpha
from botorch.acquisition.objective import (
IdentityMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from botorch.acquisition.risk_measures import RiskMeasureMCObjective
from botorch.acquisition.utils import (
compute_best_feasible_objective,
expand_trace_observations,
get_optimal_samples,
project_to_target_fidelity,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models.cost import AffineFidelityCostModel
from botorch.models.deterministic import FixedSingleSampleModel
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.optim.optimize import optimize_acqf
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.containers import BotorchContainer
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from torch import Tensor
ACQF_INPUT_CONSTRUCTOR_REGISTRY = {}
T = TypeVar("T")
MaybeDict = Union[T, Dict[Hashable, T]]
TOptimizeObjectiveKwargs = Union[
None,
MCAcquisitionObjective,
PosteriorTransform,
Tuple[Tensor, Tensor],
Dict[int, float],
bool,
int,
Dict[str, Any],
Callable[[Tensor], Tensor],
Tensor,
]
def _field_is_shared(
datasets: Union[Iterable[SupervisedDataset], Dict[Hashable, SupervisedDataset]],
fieldname: str,
) -> bool:
r"""Determines whether or not a given field is shared by all datasets."""
if isinstance(datasets, dict):
datasets = datasets.values()
base = None
for dataset in datasets:
if not hasattr(dataset, fieldname):
raise AttributeError(f"{type(dataset)} object has no field `{fieldname}`.")
obj = getattr(dataset, fieldname)
if base is None:
base = obj
elif isinstance(base, Tensor):
if not torch.equal(base, obj):
return False
elif base != obj: # pragma: no cover
return False
return True
def _get_dataset_field(
dataset: MaybeDict[SupervisedDataset],
fieldname: str,
transform: Optional[Callable[[BotorchContainer], Any]] = None,
join_rule: Optional[Callable[[Sequence[Any]], Any]] = None,
first_only: bool = False,
assert_shared: bool = False,
) -> Any:
r"""Convenience method for extracting a given field from one or more datasets."""
if isinstance(dataset, dict):
if assert_shared and not _field_is_shared(dataset, fieldname):
raise ValueError(f"Field `{fieldname}` must be shared.")
if not first_only:
fields = (
_get_dataset_field(d, fieldname, transform) for d in dataset.values()
)
return join_rule(tuple(fields)) if join_rule else tuple(fields)
dataset = next(iter(dataset.values()))
field = getattr(dataset, fieldname)
return transform(field) if transform else field
def get_acqf_input_constructor(
acqf_cls: Type[AcquisitionFunction],
) -> Callable[..., Dict[str, Any]]:
r"""Get acqusition function input constructor from registry.
Args:
acqf_cls: The AcquisitionFunction class (not instance) for which
to retrieve the input constructor.
Returns:
The input constructor associated with `acqf_cls`.
"""
if acqf_cls not in ACQF_INPUT_CONSTRUCTOR_REGISTRY:
raise RuntimeError(
f"Input constructor for acquisition class `{acqf_cls.__name__}` not "
"registered. Use the `@acqf_input_constructor` decorator to register "
"a new method."
)
return ACQF_INPUT_CONSTRUCTOR_REGISTRY[acqf_cls]
def allow_only_specific_variable_kwargs(f: Callable[..., T]) -> Callable[..., T]:
"""
Decorator for allowing a function to accept keyword arguments that are not
explicitly listed in the function signature, but only specific ones.
This decorator is applied in `acqf_input_constructor` so that all constructors
obtained with `acqf_input_constructor` allow keyword
arguments such as `training_data` and `objective`, even if they do not appear
in the signature of `f`. Any other keyword arguments will raise an error.
"""
allowed = {
"training_data",
"objective",
"posterior_transform",
"X_baseline",
"X_pending",
"objective_thresholds",
"constraints",
"target_fidelities",
"bounds",
}
def g(*args: Any, **kwargs: Any) -> T:
new_kwargs = {}
accepted_kwargs = inspect.signature(f).parameters.keys()
for k, v in kwargs.items():
if k in accepted_kwargs:
new_kwargs[k] = v
elif k not in allowed:
raise TypeError(
f"Unexpected keyword argument `{k}` when"
f" constructing input arguments for {f.__name__}."
)
return f(*args, **new_kwargs)
return g
def acqf_input_constructor(
*acqf_cls: Type[AcquisitionFunction],
) -> Callable[..., AcquisitionFunction]:
r"""Decorator for registering acquisition function input constructors.
Args:
acqf_cls: The AcquisitionFunction classes (not instances) for which
to register the input constructor.
"""
for acqf_cls_ in acqf_cls:
if acqf_cls_ in ACQF_INPUT_CONSTRUCTOR_REGISTRY:
raise ValueError(
"Cannot register duplicate arg constructor for acquisition "
f"class `{acqf_cls_.__name__}`"
)
def decorator(method):
method_kwargs = allow_only_specific_variable_kwargs(method)
for acqf_cls_ in acqf_cls:
ACQF_INPUT_CONSTRUCTOR_REGISTRY[acqf_cls_] = method_kwargs
return method
return decorator
def _register_acqf_input_constructor(
acqf_cls: Type[AcquisitionFunction],
input_constructor: Callable[..., Dict[str, Any]],
) -> None:
ACQF_INPUT_CONSTRUCTOR_REGISTRY[acqf_cls] = input_constructor
# --------------------- Input argument constructors --------------------- #
@acqf_input_constructor(PosteriorMean)
def construct_inputs_posterior_mean(
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
) -> Dict[str, Union[Model, Optional[PosteriorTransform]]]:
r"""Construct kwargs for PosteriorMean acquisition function.
Args:
model: The model to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
return {"model": model, "posterior_transform": posterior_transform}
@acqf_input_constructor(
ExpectedImprovement,
LogExpectedImprovement,
ProbabilityOfImprovement,
LogProbabilityOfImprovement,
)
def construct_inputs_best_f(
model: Model,
training_data: MaybeDict[SupervisedDataset],
posterior_transform: Optional[PosteriorTransform] = None,
best_f: Optional[Union[float, Tensor]] = None,
maximize: bool = True,
) -> Dict[str, Any]:
r"""Construct kwargs for the acquisition functions requiring `best_f`.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
Used to determine default value for `best_f`.
best_f: Threshold above (or below) which improvement is defined.
posterior_transform: The posterior transform to be used in the
acquisition function.
maximize: If True, consider the problem a maximization problem.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
if best_f is None:
best_f = get_best_f_analytic(
training_data=training_data,
posterior_transform=posterior_transform,
)
return {
"model": model,
"posterior_transform": posterior_transform,
"best_f": best_f,
"maximize": maximize,
}
@acqf_input_constructor(UpperConfidenceBound)
def construct_inputs_ucb(
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
beta: Union[float, Tensor] = 0.2,
maximize: bool = True,
) -> Dict[str, Any]:
r"""Construct kwargs for `UpperConfidenceBound`.
Args:
model: The model to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
beta: Either a scalar or a one-dim tensor with `b` elements (batch mode)
representing the trade-off parameter between mean and covariance
maximize: If True, consider the problem a maximization problem.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
return {
"model": model,
"posterior_transform": posterior_transform,
"beta": beta,
"maximize": maximize,
}
@acqf_input_constructor(
ConstrainedExpectedImprovement, LogConstrainedExpectedImprovement
)
def construct_inputs_constrained_ei(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective_index: int,
constraints: Dict[int, Tuple[Optional[float], Optional[float]]],
maximize: bool = True,
) -> Dict[str, Any]:
r"""Construct kwargs for `ConstrainedExpectedImprovement`.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
objective_index: The index of the objective.
constraints: A dictionary of the form `{i: [lower, upper]}`, where
`i` is the output index, and `lower` and `upper` are lower and upper
bounds on that output (resp. interpreted as -Inf / Inf if None)
maximize: If True, consider the problem a maximization problem.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
# TODO: Implement best point computation from training data
# best_f =
# return {
# "model": model,
# "best_f": best_f,
# "objective_index": objective_index,
# "constraints": constraints,
# "maximize": maximize,
# }
raise NotImplementedError # pragma: nocover
@acqf_input_constructor(NoisyExpectedImprovement, LogNoisyExpectedImprovement)
def construct_inputs_noisy_ei(
model: Model,
training_data: MaybeDict[SupervisedDataset],
num_fantasies: int = 20,
maximize: bool = True,
) -> Dict[str, Any]:
r"""Construct kwargs for `NoisyExpectedImprovement`.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
num_fantasies: The number of fantasies to generate. The higher this
number the more accurate the model (at the expense of model
complexity and performance).
maximize: If True, consider the problem a maximization problem.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
# TODO: Add prune_baseline functionality as for qNEI
X = _get_dataset_field(training_data, "X", first_only=True, assert_shared=True)
return {
"model": model,
"X_observed": X,
"num_fantasies": num_fantasies,
"maximize": maximize,
}
@acqf_input_constructor(qSimpleRegret)
def construct_inputs_qSimpleRegret(
model: Model,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
) -> Dict[str, Any]:
r"""Construct kwargs for qSimpleRegret.
Args:
model: The model to be used in the acquisition function.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `batch_shape, m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"X_pending": X_pending,
"sampler": sampler,
}
@acqf_input_constructor(qExpectedImprovement)
def construct_inputs_qEI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
best_f: Optional[Union[float, Tensor]] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
) -> Dict[str, Any]:
r"""Construct kwargs for the `qExpectedImprovement` constructor.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
best_f: Threshold above (or below) which improvement is defined.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
if best_f is None:
best_f = get_best_f_mc(
training_data=training_data,
objective=objective,
posterior_transform=posterior_transform,
constraints=constraints,
model=model,
)
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"X_pending": X_pending,
"sampler": sampler,
"best_f": best_f,
"constraints": constraints,
"eta": eta,
}
@acqf_input_constructor(qLogExpectedImprovement)
def construct_inputs_qLogEI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
best_f: Optional[Union[float, Tensor]] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
fat: bool = True,
tau_max: float = TAU_MAX,
tau_relu: float = TAU_RELU,
) -> Dict[str, Any]:
r"""Construct kwargs for the `qExpectedImprovement` constructor.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
best_f: Threshold above (or below) which improvement is defined.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
fat: Toggles the logarithmic / linear asymptotic behavior of the smooth
approximation to the ReLU.
tau_max: Temperature parameter controlling the sharpness of the smooth
approximations to max.
tau_relu: Temperature parameter controlling the sharpness of the smooth
approximations to ReLU.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
return {
**construct_inputs_qEI(
model=model,
training_data=training_data,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
sampler=sampler,
best_f=best_f,
constraints=constraints,
eta=eta,
),
"fat": fat,
"tau_max": tau_max,
"tau_relu": tau_relu,
}
@acqf_input_constructor(qNoisyExpectedImprovement)
def construct_inputs_qNEI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
X_baseline: Optional[Tensor] = None,
prune_baseline: Optional[bool] = True,
cache_root: Optional[bool] = True,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
) -> Dict[str, Any]:
r"""Construct kwargs for the `qNoisyExpectedImprovement` constructor.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
X_baseline: A `batch_shape x r x d`-dim Tensor of `r` design points
that have already been observed. These points are considered as
the potential best design point. If omitted, checks that all
training_data have the same input features and take the first `X`.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
if X_baseline is None:
X_baseline = _get_dataset_field(
training_data,
fieldname="X",
assert_shared=True,
first_only=True,
)
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"X_pending": X_pending,
"sampler": sampler,
"X_baseline": X_baseline,
"prune_baseline": prune_baseline,
"cache_root": cache_root,
"constraints": constraints,
"eta": eta,
}
@acqf_input_constructor(qLogNoisyExpectedImprovement)
def construct_inputs_qLogNEI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
X_baseline: Optional[Tensor] = None,
prune_baseline: Optional[bool] = True,
cache_root: Optional[bool] = True,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
fat: bool = True,
tau_max: float = TAU_MAX,
tau_relu: float = TAU_RELU,
):
r"""Construct kwargs for the `qLogNoisyExpectedImprovement` constructor.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
X_baseline: A `batch_shape x r x d`-dim Tensor of `r` design points
that have already been observed. These points are considered as
the potential best design point. If omitted, checks that all
training_data have the same input features and take the first `X`.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
fat: Toggles the logarithmic / linear asymptotic behavior of the smooth
approximation to the ReLU.
tau_max: Temperature parameter controlling the sharpness of the smooth
approximations to max.
tau_relu: Temperature parameter controlling the sharpness of the smooth
approximations to ReLU.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
return {
**construct_inputs_qNEI(
model=model,
training_data=training_data,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
sampler=sampler,
X_baseline=X_baseline,
prune_baseline=prune_baseline,
cache_root=cache_root,
constraints=constraints,
eta=eta,
),
"fat": fat,
"tau_max": tau_max,
"tau_relu": tau_relu,
}
@acqf_input_constructor(qProbabilityOfImprovement)
def construct_inputs_qPI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
tau: float = 1e-3,
best_f: Optional[Union[float, Tensor]] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
) -> Dict[str, Any]:
r"""Construct kwargs for the `qProbabilityOfImprovement` constructor.
Args:
model: The model to be used in the acquisition function.
training_data: Dataset(s) used to train the model.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
best_f: The best objective value observed so far (assumed noiseless). Can
be a `batch_shape`-shaped tensor, which in case of a batched model
specifies potentially different values for each element of the batch.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are considered satisfied if the output is less than zero.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. For more details, on this
parameter, see the docs of `compute_smoothed_feasibility_indicator`.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
if best_f is None:
best_f = get_best_f_mc(
training_data=training_data,
objective=objective,
posterior_transform=posterior_transform,
constraints=constraints,
model=model,
)
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"X_pending": X_pending,
"sampler": sampler,
"tau": tau,
"best_f": best_f,
"constraints": constraints,
"eta": eta,
}
@acqf_input_constructor(qUpperConfidenceBound)
def construct_inputs_qUCB(
model: Model,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
beta: float = 0.2,
) -> Dict[str, Any]:
r"""Construct kwargs for the `qUpperConfidenceBound` constructor.
Args:
model: The model to be used in the acquisition function.
objective: The objective to be used in the acquisition function.
posterior_transform: The posterior transform to be used in the
acquisition function.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call.
sampler: The sampler used to draw base samples. If omitted, uses
the acquisition functions's default sampler.
beta: Controls tradeoff between mean and standard deviation in UCB.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"X_pending": X_pending,
"sampler": sampler,
"beta": beta,
}
def _get_sampler(mc_samples: int, qmc: bool) -> MCSampler:
"""Set up MC sampler for q(N)EHVI."""
# initialize the sampler
shape = torch.Size([mc_samples])
if qmc:
return SobolQMCNormalSampler(sample_shape=shape)
return IIDNormalSampler(sample_shape=shape)
@acqf_input_constructor(ExpectedHypervolumeImprovement)
def construct_inputs_EHVI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective_thresholds: Tensor,
objective: Optional[AnalyticMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
alpha: Optional[float] = None,
Y_pmean: Optional[Tensor] = None,
) -> Dict[str, Any]:
r"""Construct kwargs for `ExpectedHypervolumeImprovement` constructor."""
num_objectives = objective_thresholds.shape[0]
if constraints is not None:
raise NotImplementedError("EHVI does not yet support outcome constraints.")
X = _get_dataset_field(
training_data,
fieldname="X",
first_only=True,
assert_shared=True,
)
alpha = (
get_default_partitioning_alpha(num_objectives=num_objectives)
if alpha is None
else alpha
)
# This selects the objectives (a subset of the outcomes) and set each
# objective threhsold to have the proper optimization direction.
if objective is None:
objective = IdentityAnalyticMultiOutputObjective()
if isinstance(objective, RiskMeasureMCObjective):
pre_obj = objective.preprocessing_function
else:
pre_obj = objective
ref_point = pre_obj(objective_thresholds)
# Compute posterior mean (for ref point computation ref pareto frontier)
# if one is not provided among arguments.
if Y_pmean is None:
with torch.no_grad():
Y_pmean = model.posterior(X).mean
if alpha > 0:
partitioning = NondominatedPartitioning(
ref_point=ref_point,
Y=pre_obj(Y_pmean),
alpha=alpha,
)
else:
partitioning = FastNondominatedPartitioning(
ref_point=ref_point,
Y=pre_obj(Y_pmean),
)
return {
"model": model,
"ref_point": ref_point,
"partitioning": partitioning,
"objective": objective,
}
@acqf_input_constructor(qExpectedHypervolumeImprovement)
def construct_inputs_qEHVI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective_thresholds: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
alpha: Optional[float] = None,
sampler: Optional[MCSampler] = None,
X_pending: Optional[Tensor] = None,
eta: float = 1e-3,
mc_samples: int = 128,
qmc: bool = True,
) -> Dict[str, Any]:
r"""Construct kwargs for `qExpectedHypervolumeImprovement` constructor."""
X = _get_dataset_field(
training_data,
fieldname="X",
first_only=True,
assert_shared=True,
)
# compute posterior mean (for ref point computation ref pareto frontier)
with torch.no_grad():
Y_pmean = model.posterior(X).mean
# For HV-based acquisition functions we pass the constraint transform directly
if constraints is not None:
# Adjust `Y_pmean` to contrain feasible points only.
feas = torch.stack([c(Y_pmean) <= 0 for c in constraints], dim=-1).all(dim=-1)
Y_pmean = Y_pmean[feas]
if objective is None:
objective = IdentityMCMultiOutputObjective()
ehvi_kwargs = construct_inputs_EHVI(
model=model,
training_data=training_data,
objective_thresholds=objective_thresholds,
objective=objective,
constraints=None,
alpha=alpha,
# Pass `Y_pmean` that accounts for constraints to `construct_inputs_EHVI`
# to ensure that correct non-dominated partitioning is produced.
Y_pmean=Y_pmean,
)
if sampler is None and isinstance(model, GPyTorchModel):
sampler = _get_sampler(mc_samples=mc_samples, qmc=qmc)
add_qehvi_kwargs = {
"sampler": sampler,
"X_pending": X_pending,
"constraints": constraints,
"eta": eta,
}
return {**ehvi_kwargs, **add_qehvi_kwargs}
@acqf_input_constructor(qNoisyExpectedHypervolumeImprovement)
def construct_inputs_qNEHVI(
model: Model,
training_data: MaybeDict[SupervisedDataset],
objective_thresholds: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
X_baseline: Optional[Tensor] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
alpha: Optional[float] = None,
sampler: Optional[MCSampler] = None,
X_pending: Optional[Tensor] = None,
eta: float = 1e-3,
mc_samples: int = 128,
qmc: bool = True,
prune_baseline: bool = True,
cache_pending: bool = True,
max_iep: int = 0,
incremental_nehvi: bool = True,
cache_root: bool = True,
) -> Dict[str, Any]:
r"""Construct kwargs for `qNoisyExpectedHypervolumeImprovement` constructor."""
if X_baseline is None:
X_baseline = _get_dataset_field(
training_data,
fieldname="X",
first_only=True,
assert_shared=True,
)
# This selects the objectives (a subset of the outcomes) and set each
# objective threhsold to have the proper optimization direction.
if objective is None:
objective = IdentityMCMultiOutputObjective()
if constraints is not None:
if isinstance(objective, RiskMeasureMCObjective):
raise UnsupportedError(
"Outcome constraints are not supported with risk measures. "
"Use a feasibility-weighted risk measure instead."
)
if sampler is None and isinstance(model, GPyTorchModel):
sampler = _get_sampler(mc_samples=mc_samples, qmc=qmc)
if isinstance(objective, RiskMeasureMCObjective):
ref_point = objective.preprocessing_function(objective_thresholds)
else:
ref_point = objective(objective_thresholds)
num_objectives = objective_thresholds[~torch.isnan(objective_thresholds)].shape[0]
if alpha is None:
alpha = get_default_partitioning_alpha(num_objectives=num_objectives)
return {
"model": model,
"ref_point": ref_point,
"X_baseline": X_baseline,
"sampler": sampler,
"objective": objective,
"constraints": constraints,
"X_pending": X_pending,
"eta": eta,
"prune_baseline": prune_baseline,
"alpha": alpha,
"cache_pending": cache_pending,
"max_iep": max_iep,
"incremental_nehvi": incremental_nehvi,
"cache_root": cache_root,
}
@acqf_input_constructor(qMaxValueEntropy)
def construct_inputs_qMES(
model: Model,
training_data: MaybeDict[SupervisedDataset],
bounds: List[Tuple[float, float]],
candidate_size: int = 1000,
maximize: bool = True,
# TODO: qMES also supports other inputs, such as num_fantasies
) -> Dict[str, Any]:
r"""Construct kwargs for `qMaxValueEntropy` constructor."""
X = _get_dataset_field(training_data, "X", first_only=True)
_kw = {"device": X.device, "dtype": X.dtype}
_rvs = torch.rand(candidate_size, len(bounds), **_kw)
_bounds = torch.tensor(bounds, **_kw).transpose(0, 1)
return {
"model": model,
"candidate_set": _bounds[0] + (_bounds[1] - _bounds[0]) * _rvs,
"maximize": maximize,
}
def construct_inputs_mf_base(
target_fidelities: Dict[int, Union[int, float]],
fidelity_weights: Optional[Dict[int, float]] = None,
cost_intercept: float = 1.0,
num_trace_observations: int = 0,
) -> Dict[str, Any]:
r"""Construct kwargs for a multifidelity acquisition function's constructor."""
if fidelity_weights is None:
fidelity_weights = {f: 1.0 for f in target_fidelities}
if set(target_fidelities) != set(fidelity_weights):
raise RuntimeError(
"Must provide the same indices for target_fidelities "
f"({set(target_fidelities)}) and fidelity_weights "
f" ({set(fidelity_weights)})."
)
cost_aware_utility = InverseCostWeightedUtility(
cost_model=AffineFidelityCostModel(
fidelity_weights=fidelity_weights, fixed_cost=cost_intercept
)
)
return {
"cost_aware_utility": cost_aware_utility,
"expand": lambda X: expand_trace_observations(
X=X,
fidelity_dims=sorted(target_fidelities),
num_trace_obs=num_trace_observations,
),
"project": lambda X: project_to_target_fidelity(
X=X, target_fidelities=target_fidelities
),
}
@acqf_input_constructor(qKnowledgeGradient)
def construct_inputs_qKG(
model: Model,
training_data: MaybeDict[SupervisedDataset],
bounds: List[Tuple[float, float]],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
num_fantasies: int = 64,
**optimize_objective_kwargs: TOptimizeObjectiveKwargs,
) -> Dict[str, Any]:
r"""Construct kwargs for `qKnowledgeGradient` constructor."""
X = _get_dataset_field(training_data, "X", first_only=True)
_bounds = torch.tensor(bounds, dtype=X.dtype, device=X.device)
_, current_value = optimize_objective(
model=model,
bounds=_bounds.t(),
q=1,
objective=objective,
posterior_transform=posterior_transform,
**optimize_objective_kwargs,
)
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"num_fantasies": num_fantasies,
"current_value": current_value.detach().cpu().max(),
}
@acqf_input_constructor(qMultiFidelityKnowledgeGradient)
def construct_inputs_qMFKG(
model: Model,
training_data: MaybeDict[SupervisedDataset],
bounds: List[Tuple[float, float]],
target_fidelities: Dict[int, Union[int, float]],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
fidelity_weights: Optional[Dict[int, float]] = None,
cost_intercept: float = 1.0,
num_trace_observations: int = 0,
num_fantasies: int = 64,
) -> Dict[str, Any]:
r"""Construct kwargs for `qMultiFidelityKnowledgeGradient` constructor."""
inputs_mf = construct_inputs_mf_base(
target_fidelities=target_fidelities,
fidelity_weights=fidelity_weights,
cost_intercept=cost_intercept,
num_trace_observations=num_trace_observations,
)
inputs_kg = construct_inputs_qKG(
model=model,
training_data=training_data,
bounds=bounds,
objective=objective,
posterior_transform=posterior_transform,
num_fantasies=num_fantasies,
)
return {**inputs_mf, **inputs_kg}
@acqf_input_constructor(qMultiFidelityMaxValueEntropy)
def construct_inputs_qMFMES(
model: Model,
training_data: MaybeDict[SupervisedDataset],
bounds: List[Tuple[float, float]],
target_fidelities: Dict[int, Union[int, float]],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
num_fantasies: int = 64,
X_baseline: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
objective_thresholds: Optional[Tensor] = None,
outcome_constraints: Optional[List[Tuple[Tensor, Tensor]]] = None,
fidelity_weights: Optional[Dict[int, float]] = None,
cost_intercept: float = 1.0,
num_trace_observations: int = 0,
candidate_size: int = 1000,
maximize: bool = True,
**optimize_objective_kwargs: TOptimizeObjectiveKwargs,
) -> Dict[str, Any]:
r"""Construct kwargs for `qMultiFidelityMaxValueEntropy` constructor."""
inputs_mf = construct_inputs_mf_base(
target_fidelities=target_fidelities,
fidelity_weights=fidelity_weights,
cost_intercept=cost_intercept,
num_trace_observations=num_trace_observations,
)
inputs_qmes = construct_inputs_qMES(
model=model,
training_data=training_data,
bounds=bounds,
candidate_size=candidate_size,
maximize=maximize,
)
X = _get_dataset_field(training_data, "X", first_only=True)
_bounds = torch.tensor(bounds, dtype=X.dtype, device=X.device)
_, current_value = optimize_objective(
model=model,
bounds=_bounds.t(),
q=1,
objective=objective,
posterior_transform=posterior_transform,
**optimize_objective_kwargs,
)
return {
**inputs_mf,
**inputs_qmes,
"current_value": current_value.detach().cpu().max(),
"target_fidelities": target_fidelities,
}
@acqf_input_constructor(AnalyticExpectedUtilityOfBestOption)
def construct_inputs_analytic_eubo(
model: Model,
pref_model: Model,
previous_winner: Optional[Tensor] = None,
sample_multiplier: Optional[float] = 1.0,
) -> Dict[str, Any]:
r"""Construct kwargs for the `AnalyticExpectedUtilityOfBestOption` constructor.
Args:
model: The outcome model to be used in the acquisition function.
pref_model: The preference model to be used in preference exploration.
previous_winner: The previous winner of the best option.
sample_multiplier: The scale factor for the single-sample model.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
# construct a deterministic fixed single sample model from `model`
# i.e., performing EUBO-zeta by default as described
# in https://arxiv.org/abs/2203.11382
# using pref_model.dim instead of model.num_outputs here as MTGP's
# num_outputs could be tied to the number of tasks
w = torch.randn(pref_model.dim) * sample_multiplier
one_sample_outcome_model = FixedSingleSampleModel(model=model, w=w)
return {
"pref_model": pref_model,
"outcome_model": one_sample_outcome_model,
"previous_winner": previous_winner,
}
def get_best_f_analytic(
training_data: MaybeDict[SupervisedDataset],
posterior_transform: Optional[PosteriorTransform] = None,
) -> Tensor:
if isinstance(training_data, dict) and not _field_is_shared(
training_data, fieldname="X"
):
raise NotImplementedError("Currently only block designs are supported.")
Y = _get_dataset_field(
training_data,
fieldname="Y",
join_rule=lambda field_tensors: torch.cat(field_tensors, dim=-1),
)
if posterior_transform is not None:
return posterior_transform.evaluate(Y).max(-1).values
if Y.shape[-1] > 1:
raise NotImplementedError(
"Analytic acquisition functions currently only work with "
"multi-output models if provided with a `ScalarizedObjective`."
)
return Y.max(-2).values.squeeze(-1)
def get_best_f_mc(
training_data: MaybeDict[SupervisedDataset],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
model: Optional[Model] = None,
) -> Tensor:
if isinstance(training_data, dict) and not _field_is_shared(
training_data, fieldname="X"
):
raise NotImplementedError("Currently only block designs are supported.")
X_baseline = _get_dataset_field(
training_data,
fieldname="X",
assert_shared=True,
first_only=True,
)
Y = _get_dataset_field(
training_data,
fieldname="Y",
join_rule=lambda field_tensors: torch.cat(field_tensors, dim=-1),
) # batch_shape x n x d
if posterior_transform is not None:
# retain the original tensor dimension since objective expects explicit
# output dimension.
Y_dim = Y.dim()
Y = posterior_transform.evaluate(Y)
if Y.dim() < Y_dim:
Y = Y.unsqueeze(-1)
if objective is None:
if Y.shape[-1] > 1:
raise UnsupportedError(
"Acquisition functions require an objective when "
"used with multi-output models (execpt for multi-objective"
"acquisition functions)."
)
objective = IdentityMCObjective()
obj = objective(Y, X=X_baseline) # batch_shape x n
return compute_best_feasible_objective(
samples=Y,
obj=obj,
constraints=constraints,
model=model,
objective=objective,
posterior_transform=posterior_transform,
X_baseline=X_baseline,
)
def optimize_objective(
model: Model,
bounds: Tensor,
q: int,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
qmc: bool = True,
mc_samples: int = 512,
seed_inner: Optional[int] = None,
optimizer_options: Optional[Dict[str, Any]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
sequential: bool = False,
) -> Tuple[Tensor, Tensor]:
r"""Optimize an objective under the given model.
Args:
model: The model to be used in the objective.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The cardinality of input sets on which the objective is to be evaluated.
objective: The objective to optimize.
posterior_transform: The posterior transform to be used in the
acquisition function.
linear_constraints: A tuple of (A, b). Given `k` linear constraints on a
`d`-dimensional space, `A` is `k x d` and `b` is `k x 1` such that
`A x <= b`. (Not used by single task models).
fixed_features: A dictionary of feature assignments `{feature_index: value}` to
hold fixed during generation.
qmc: Toggle for enabling (qmc=1) or disabling (qmc=0) use of Quasi Monte Carlo.
mc_samples: Integer number of samples used to estimate Monte Carlo objectives.
seed_inner: Integer seed used to initialize the sampler passed to MCObjective.
optimizer_options: Table used to lookup keyword arguments for the optimizer.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e. according to `round-trip` transformations).
batch_initial_conditions: A Tensor of initial values for the optimizer.
sequential: If False, uses joint optimization, otherwise uses sequential
optimization.
Returns:
A tuple containing the best input locations and corresponding objective values.
"""
if optimizer_options is None:
optimizer_options = {}
if objective is not None:
sampler_cls = SobolQMCNormalSampler if qmc else IIDNormalSampler
acq_function = qSimpleRegret(
model=model,
objective=objective,
posterior_transform=posterior_transform,
sampler=sampler_cls(sample_shape=torch.Size([mc_samples]), seed=seed_inner),
)
else:
acq_function = PosteriorMean(
model=model, posterior_transform=posterior_transform
)
if fixed_features:
acq_function = FixedFeatureAcquisitionFunction(
acq_function=acq_function,
d=bounds.shape[-1],
columns=list(fixed_features.keys()),
values=list(fixed_features.values()),
)
free_feature_dims = list(range(len(bounds)) - fixed_features.keys())
free_feature_bounds = bounds[:, free_feature_dims] # (2, d' <= d)
else:
free_feature_bounds = bounds
if linear_constraints is None:
inequality_constraints = None
else:
A, b = linear_constraints
inequality_constraints = []
k, d = A.shape
for i in range(k):
indicies = A[i, :].nonzero(as_tuple=False).squeeze()
coefficients = -A[i, indicies]
rhs = -b[i, 0]
inequality_constraints.append((indicies, coefficients, rhs))
return optimize_acqf(
acq_function=acq_function,
bounds=free_feature_bounds,
q=q,
num_restarts=optimizer_options.get("num_restarts", 60),
raw_samples=optimizer_options.get("raw_samples", 1024),
options={
"batch_limit": optimizer_options.get("batch_limit", 8),
"maxiter": optimizer_options.get("maxiter", 200),
"nonnegative": optimizer_options.get("nonnegative", False),
"method": optimizer_options.get("method", "L-BFGS-B"),
},
inequality_constraints=inequality_constraints,
fixed_features=None, # handled inside the acquisition function
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
return_best_only=True,
sequential=sequential,
)
@acqf_input_constructor(qJointEntropySearch)
def construct_inputs_qJES(
model: Model,
bounds: List[Tuple[float, float]],
num_optima: int = 64,
maximize: bool = True,
condition_noiseless: bool = True,
X_pending: Optional[Tensor] = None,
estimation_type: str = "LB",
num_samples: int = 64,
):
dtype = model.train_targets.dtype
optimal_inputs, optimal_outputs = get_optimal_samples(
model=model,
bounds=torch.as_tensor(bounds, dtype=dtype).T,
num_optima=num_optima,
maximize=maximize,
)
inputs = {
"model": model,
"optimal_inputs": optimal_inputs,
"optimal_outputs": optimal_outputs,
"condition_noiseless": condition_noiseless,
"maximize": maximize,
"X_pending": X_pending,
"estimation_type": estimation_type,
"num_samples": num_samples,
}
return inputs
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A general implementation of multi-step look-ahead acquistion function with configurable
value functions. See [Jiang2020multistep]_.
.. [Jiang2020multistep]
S. Jiang, D. R. Jiang, M. Balandat, B. Karrer, J. Gardner, and R. Garnett.
Efficient Nonmyopic Bayesian Optimization via One-Shot Multi-Step Trees.
In Advances in Neural Information Processing Systems 33, 2020.
"""
from __future__ import annotations
import math
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
import numpy as np
import torch
from botorch.acquisition import AcquisitionFunction, OneShotAcquisitionFunction
from botorch.acquisition.analytic import AnalyticAcquisitionFunction, PosteriorMean
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.model import Model
from botorch.optim.initializers import initialize_q_batch
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import (
match_batch_shape,
t_batch_mode_transform,
unnormalize,
)
from torch import Size, Tensor
from torch.distributions import Beta
from torch.nn import ModuleList
TAcqfArgConstructor = Callable[[Model, Tensor], Dict[str, Any]]
class qMultiStepLookahead(MCAcquisitionFunction, OneShotAcquisitionFunction):
r"""MC-based batch Multi-Step Look-Ahead (one-shot optimization)."""
def __init__(
self,
model: Model,
batch_sizes: List[int],
num_fantasies: Optional[List[int]] = None,
samplers: Optional[List[MCSampler]] = None,
valfunc_cls: Optional[List[Optional[Type[AcquisitionFunction]]]] = None,
valfunc_argfacs: Optional[List[Optional[TAcqfArgConstructor]]] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
inner_mc_samples: Optional[List[int]] = None,
X_pending: Optional[Tensor] = None,
collapse_fantasy_base_samples: bool = True,
) -> None:
r"""q-Multi-Step Look-Ahead (one-shot optimization).
Performs a `k`-step lookahead by means of repeated fantasizing.
Allows to specify the stage value functions by passing the respective class
objects via the `valfunc_cls` list. Optionally, `valfunc_argfacs` takes a list
of callables that generate additional kwargs for these constructors. By default,
`valfunc_cls` will be chosen as `[None, ..., None, PosteriorMean]`, which
corresponds to the (parallel) multi-step KnowledgeGradient. If, in addition,
`k=1` and `q_1 = 1`, this reduces to the classic Knowledge Gradient.
WARNING: The complexity of evaluating this function is exponential in the number
of lookahead steps!
Args:
model: A fitted model.
batch_sizes: A list `[q_1, ..., q_k]` containing the batch sizes for the
`k` look-ahead steps.
num_fantasies: A list `[f_1, ..., f_k]` containing the number of fantasy
points to use for the `k` look-ahead steps.
samplers: A list of MCSampler objects to be used for sampling fantasies in
each stage.
valfunc_cls: A list of `k + 1` acquisition function classes to be used as
the (stage + terminal) value functions. Each element (except for the
last one) can be `None`, in which case a zero stage value is assumed for
the respective stage. If `None`, this defaults to
`[None, ..., None, PosteriorMean]`
valfunc_argfacs: A list of `k + 1` "argument factories", i.e. callables that
map a `Model` and input tensor `X` to a dictionary of kwargs for the
respective stage value function constructor (e.g. `best_f` for
`ExpectedImprovement`). If None, only the standard (`model`, `sampler`
and `objective`) kwargs will be used.
objective: The objective under which the output is evaluated. If `None`, use
the model output (requires a single-output model or a posterior
transform). Otherwise the objective is MC-evaluated
(using `inner_sampler`).
posterior_transform: An optional PosteriorTransform. If given, this
transforms the posterior before evaluation. If `objective is None`,
then the output of the transformed posterior is used. If `objective` is
given, the `inner_sampler` is used to draw samples from the transformed
posterior, which are then evaluated under the `objective`.
inner_mc_samples: A list `[n_0, ..., n_k]` containing the number of MC
samples to be used for evaluating the stage value function. Ignored if
the objective is `None`.
X_pending: A `m x d`-dim Tensor of `m` design points that have points that
have been submitted for function evaluation but have not yet been
evaluated. Concatenated into `X` upon forward call. Copied and set to
have no gradient.
collapse_fantasy_base_samples: If True, collapse_batch_dims of the Samplers
will be applied on fantasy batch dimensions as well, meaning that base
samples are the same in all subtrees starting from the same level.
"""
if objective is not None and not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"`qMultiStepLookahead` got a non-MC `objective`. This is not supported."
" Use `posterior_transform` and `objective=None` instead."
)
super(MCAcquisitionFunction, self).__init__(model=model)
self.batch_sizes = batch_sizes
if not ((num_fantasies is None) ^ (samplers is None)):
raise UnsupportedError(
"qMultiStepLookahead requires exactly one of `num_fantasies` or "
"`samplers` as arguments."
)
if samplers is None:
# If collapse_fantasy_base_samples is False, the `batch_range_override`
# is set on the samplers during the forward call.
samplers: List[MCSampler] = [
SobolQMCNormalSampler(sample_shape=torch.Size([nf]))
for nf in num_fantasies
]
else:
num_fantasies = [sampler.sample_shape[0] for sampler in samplers]
self.num_fantasies = num_fantasies
# By default do not use stage values and use PosteriorMean as terminal value
# function (= multi-step KG)
if valfunc_cls is None:
valfunc_cls = [None for _ in num_fantasies] + [PosteriorMean]
if inner_mc_samples is None:
inner_mc_samples = [None] * (1 + len(num_fantasies))
# TODO: Allow passing in inner samplers directly
inner_samplers = _construct_inner_samplers(
batch_sizes=batch_sizes,
valfunc_cls=valfunc_cls,
objective=objective,
inner_mc_samples=inner_mc_samples,
)
if valfunc_argfacs is None:
valfunc_argfacs = [None] * (1 + len(batch_sizes))
self.objective = objective
self.posterior_transform = posterior_transform
self.set_X_pending(X_pending)
self.samplers = ModuleList(samplers)
self.inner_samplers = ModuleList(inner_samplers)
self._valfunc_cls = valfunc_cls
self._valfunc_argfacs = valfunc_argfacs
self._collapse_fantasy_base_samples = collapse_fantasy_base_samples
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qMultiStepLookahead on the candidate set X.
Args:
X: A `batch_shape x q' x d`-dim Tensor with `q'` design points for each
batch, where `q' = q_0 + f_1 q_1 + f_2 f_1 q_2 + ...`. Here `q_i`
is the number of candidates jointly considered in look-ahead step
`i`, and `f_i` is respective number of fantasies.
Returns:
The acquisition value for each batch as a tensor of shape `batch_shape`.
"""
Xs = self.get_multi_step_tree_input_representation(X)
# set batch_range on samplers if not collapsing on fantasy dims
if not self._collapse_fantasy_base_samples:
self._set_samplers_batch_range(batch_shape=X.shape[:-2])
return _step(
model=self.model,
Xs=Xs,
samplers=self.samplers,
valfunc_cls=self._valfunc_cls,
valfunc_argfacs=self._valfunc_argfacs,
inner_samplers=self.inner_samplers,
objective=self.objective,
posterior_transform=self.posterior_transform,
running_val=None,
)
@property
def _num_auxiliary(self) -> int:
r"""Number of auxiliary variables in the q-batch dimension.
Returns:
`q_aux` s.t. `q + q_aux = augmented_q_batch_size`
"""
return np.dot(self.batch_sizes, np.cumprod(self.num_fantasies)).item()
def _set_samplers_batch_range(self, batch_shape: Size) -> None:
r"""Set batch_range on samplers.
Args:
batch_shape: The batch shape of the input tensor `X`.
"""
tbatch_dim_start = -2 - len(batch_shape)
for s in self.samplers:
s.batch_range_override = (tbatch_dim_start, -2)
def get_augmented_q_batch_size(self, q: int) -> int:
r"""Get augmented q batch size for one-shot optimzation.
Args:
q: The number of candidates to consider jointly.
Returns:
The augmented size for one-shot optimzation (including variables
parameterizing the fantasy solutions): `q_0 + f_1 q_1 + f_2 f_1 q_2 + ...`
"""
return q + self._num_auxiliary
def get_split_shapes(self, X: Tensor) -> Tuple[Size, List[Size], List[int]]:
r"""Get the split shapes from X.
Args:
X: A `batch_shape x q_aug x d`-dim tensor including fantasy points.
Returns:
A 3-tuple `(batch_shape, shapes, sizes)`, where
`shape[i] = f_i x .... x f_1 x batch_shape x q_i x d` and
`size[i] = f_i * ... f_1 * q_i`.
"""
batch_shape, (q_aug, d) = X.shape[:-2], X.shape[-2:]
q = q_aug - self._num_auxiliary
batch_sizes = [q] + self.batch_sizes
# X_i needs to have shape f_i x .... x f_1 x batch_shape x q_i x d
shapes = [
torch.Size(self.num_fantasies[:i][::-1] + [*batch_shape, q_i, d])
for i, q_i in enumerate(batch_sizes)
]
# Each X_i in the split X has shape batch_shape x qtilde x d with
# qtilde = f_i * ... * f_1 * q_i
sizes = [s[: (-2 - len(batch_shape))].numel() * s[-2] for s in shapes]
return batch_shape, shapes, sizes
def get_multi_step_tree_input_representation(self, X: Tensor) -> List[Tensor]:
r"""Get the multi-step tree representation of X.
Args:
X: A `batch_shape x q' x d`-dim Tensor with `q'` design points for each
batch, where `q' = q_0 + f_1 q_1 + f_2 f_1 q_2 + ...`. Here `q_i`
is the number of candidates jointly considered in look-ahead step
`i`, and `f_i` is respective number of fantasies.
Returns:
A list `[X_j, ..., X_k]` of tensors, where `X_i` has shape
`f_i x .... x f_1 x batch_shape x q_i x d`.
"""
batch_shape, shapes, sizes = self.get_split_shapes(X=X)
# Each X_i in Xsplit has shape batch_shape x qtilde x d with
# qtilde = f_i * ... * f_1 * q_i
Xsplit = torch.split(X, sizes, dim=-2)
# now reshape (need to permute batch_shape and qtilde dimensions for i > 0)
perm = [-2] + list(range(len(batch_shape))) + [-1]
X0 = Xsplit[0].reshape(shapes[0])
Xother = [
X.permute(*perm).reshape(shape) for X, shape in zip(Xsplit[1:], shapes[1:])
]
# concatenate in pending points
if self.X_pending is not None:
X0 = torch.cat([X0, match_batch_shape(self.X_pending, X0)], dim=-2)
return [X0] + Xother
def extract_candidates(self, X_full: Tensor) -> Tensor:
r"""We only return X as the set of candidates post-optimization.
Args:
X_full: A `batch_shape x q' x d`-dim Tensor with `q'` design points for
each batch, where `q' = q + f_1 q_1 + f_2 f_1 q_2 + ...`.
Returns:
A `batch_shape x q x d`-dim Tensor with `q` design points for each batch.
"""
return X_full[..., : -self._num_auxiliary, :]
def get_induced_fantasy_model(self, X: Tensor) -> Model:
r"""Fantasy model induced by X.
Args:
X: A `batch_shape x q' x d`-dim Tensor with `q'` design points for each
batch, where `q' = q_0 + f_1 q_1 + f_2 f_1 q_2 + ...`. Here `q_i`
is the number of candidates jointly considered in look-ahead step
`i`, and `f_i` is respective number of fantasies.
Returns:
The fantasy model induced by X.
"""
Xs = self.get_multi_step_tree_input_representation(X)
# set batch_range on samplers if not collapsing on fantasy dims
if not self._collapse_fantasy_base_samples:
self._set_samplers_batch_range(batch_shape=X.shape[:-2])
return _get_induced_fantasy_model(
model=self.model, Xs=Xs, samplers=self.samplers
)
def _step(
model: Model,
Xs: List[Tensor],
samplers: List[Optional[MCSampler]],
valfunc_cls: List[Optional[Type[AcquisitionFunction]]],
valfunc_argfacs: List[Optional[TAcqfArgConstructor]],
inner_samplers: List[Optional[MCSampler]],
objective: MCAcquisitionObjective,
posterior_transform: Optional[PosteriorTransform],
running_val: Optional[Tensor] = None,
sample_weights: Optional[Tensor] = None,
step_index: int = 0,
) -> Tensor:
r"""Recursive multi-step look-ahead computation.
Helper function computing the "value-to-go" of a multi-step lookahead scheme.
Args:
model: A Model of appropriate batch size. Specifically, it must be possible to
evaluate the model's posterior at `Xs[0]`.
Xs: A list `[X_j, ..., X_k]` of tensors, where `X_i` has shape
`f_i x .... x f_1 x batch_shape x q_i x d`.
samplers: A list of `k - j` samplers, such that the number of samples of sampler
`i` is `f_i`. The last element of this list is considered the
"inner sampler", which is used for evaluating the objective in case it is an
MCAcquisitionObjective.
valfunc_cls: A list of acquisition function class to be used as the (stage +
terminal) value functions. Each element (except for the last one) can be
`None`, in which case a zero stage value is assumed for the respective
stage.
valfunc_argfacs: A list of callables that map a `Model` and input tensor `X` to
a dictionary of kwargs for the respective stage value function constructor.
If `None`, only the standard `model`, `sampler` and `objective` kwargs will
be used.
inner_samplers: A list of `MCSampler` objects, each to be used in the stage
value function at the corresponding index.
objective: The MCAcquisitionObjective under which the model output is evaluated.
posterior_transform: A PosteriorTransform. Used to transform the posterior
before sampling / evaluating the model output.
running_val: As `batch_shape`-dim tensor containing the current running value.
sample_weights: A tensor of shape `f_i x .... x f_1 x batch_shape` when called
in the `i`-th step by which to weight the stage value samples. Used in
conjunction with Gauss-Hermite integration or importance sampling. Assumed
to be `None` in the initial step (when `step_index=0`).
step_index: The index of the look-ahead step. `step_index=0` indicates the
initial step.
Returns:
A `b`-dim tensor containing the multi-step value of the design `X`.
"""
X = Xs[0]
if sample_weights is None: # only happens in the initial step
sample_weights = torch.ones(*X.shape[:-2], device=X.device, dtype=X.dtype)
# compute stage value
stage_val = _compute_stage_value(
model=model,
valfunc_cls=valfunc_cls[0],
X=X,
objective=objective,
posterior_transform=posterior_transform,
inner_sampler=inner_samplers[0],
arg_fac=valfunc_argfacs[0],
)
if stage_val is not None: # update running value
# if not None, running_val has shape f_{i-1} x ... x f_1 x batch_shape
# stage_val has shape f_i x ... x f_1 x batch_shape
# this sum will add a dimension to running_val so that
# updated running_val has shape f_i x ... x f_1 x batch_shape
running_val = stage_val if running_val is None else running_val + stage_val
# base case: no more fantasizing, return value
if len(Xs) == 1:
# compute weighted average over all leaf nodes of the tree
batch_shape = running_val.shape[step_index:]
# expand sample weights to make sure it is the same shape as running_val,
# because we need to take a sum over sample weights for computing the
# weighted average
sample_weights = sample_weights.expand(running_val.shape)
return (running_val * sample_weights).view(-1, *batch_shape).sum(dim=0)
# construct fantasy model (with batch shape f_{j+1} x ... x f_1 x batch_shape)
prop_grads = step_index > 0 # need to propagate gradients for steps > 0
fantasy_model = model.fantasize(
X=X, sampler=samplers[0], observation_noise=True, propagate_grads=prop_grads
)
# augment sample weights appropriately
sample_weights = _construct_sample_weights(
prev_weights=sample_weights, sampler=samplers[0]
)
return _step(
model=fantasy_model,
Xs=Xs[1:],
samplers=samplers[1:],
valfunc_cls=valfunc_cls[1:],
valfunc_argfacs=valfunc_argfacs[1:],
inner_samplers=inner_samplers[1:],
objective=objective,
posterior_transform=posterior_transform,
sample_weights=sample_weights,
running_val=running_val,
step_index=step_index + 1,
)
def _compute_stage_value(
model: Model,
valfunc_cls: Optional[Type[AcquisitionFunction]],
X: Tensor,
objective: MCAcquisitionObjective,
posterior_transform: Optional[PosteriorTransform],
inner_sampler: Optional[MCSampler] = None,
arg_fac: Optional[TAcqfArgConstructor] = None,
) -> Optional[Tensor]:
r"""Compute the stage value of a multi-step look-ahead policy.
Args:
model: A Model of appropriate batch size. Specifically, it must be possible to
evaluate the model's posterior at `Xs[0]`.
valfunc_cls: The acquisition function class to be used as the stage value
functions. If `None`, a zero stage value is assumed (returns `None`)
X: A tensor with shape `f_i x .... x f_1 x batch_shape x q_i x d` when called in
the `i`-th step.
objective: The MCAcquisitionObjective under which the model output is evaluated.
posterior_transform: A PosteriorTransform.
inner_sampler: An `MCSampler` object to be used in the stage value function. Can
be `None` for analytic acquisition functions or when using the default
sampler of the acquisition function class.
arg_fac: A callable mapping a `Model` and the input tensor `X` to a dictionary
of kwargs for the stage value function constructor. If `None`, only the
standard `model`, `sampler` and `objective` kwargs will be used.
Returns:
A `f_i x ... x f_1 x batch_shape`-dim tensor of stage values, or `None`
(= zero stage value).
"""
if valfunc_cls is None:
return None
common_kwargs: Dict[str, Any] = {
"model": model,
"posterior_transform": posterior_transform,
}
if issubclass(valfunc_cls, MCAcquisitionFunction):
common_kwargs["sampler"] = inner_sampler
common_kwargs["objective"] = objective
kwargs = arg_fac(model=model, X=X) if arg_fac is not None else {}
stage_val_func = valfunc_cls(**common_kwargs, **kwargs)
# shape of stage_val is f_i x ... x f_1 x batch_shape
stage_val = stage_val_func(X=X)
return stage_val
def _construct_sample_weights(
prev_weights: Tensor, sampler: MCSampler
) -> Optional[Tensor]:
r"""Iteratively construct tensor of sample weights for multi-step look-ahead.
Args:
prev_weights: A `f_i x .... x f_1 x batch_shape` tensor of previous sample
weights.
sampler: A `MCSampler` that may have sample weights as the `base_weights`
attribute. If the sampler does not have a `base_weights` attribute,
samples are weighted uniformly.
Returns:
A `f_{i+1} x .... x f_1 x batch_shape` tensor of sample weights for the next
step.
"""
new_weights = getattr(sampler, "base_weights", None) # TODO: generalize this
if new_weights is None:
# uniform weights
nf = sampler.sample_shape[0]
new_weights = torch.ones(
nf, device=prev_weights.device, dtype=prev_weights.dtype
)
# reshape new_weights to be f_{i+1} x 1 x ... x 1
new_weights = new_weights.view(-1, *(1 for _ in prev_weights.shape))
# normalize new_weights to sum to 1.0
new_weights = new_weights / new_weights.sum()
return new_weights * prev_weights
def _construct_inner_samplers(
batch_sizes: List[int],
valfunc_cls: List[Optional[Type[AcquisitionFunction]]],
inner_mc_samples: List[Optional[int]],
objective: Optional[MCAcquisitionObjective] = None,
) -> List[Optional[MCSampler]]:
r"""Check validity of inputs and construct inner samplers.
Helper function to be used internally for constructing inner samplers.
Args:
batch_sizes: A list `[q_1, ..., q_k]` containing the batch sizes for the
`k` look-ahead steps.
valfunc_cls: A list of `k + 1` acquisition function classes to be used as the
(stage + terminal) value functions. Each element (except for the last one)
can be `None`, in which case a zero stage value is assumed for the
respective stage.
inner_mc_samples: A list `[n_0, ..., n_k]` containing the number of MC
samples to be used for evaluating the stage value function. Ignored if
the objective is `None`.
objective: The objective under which the output is evaluated. If `None`, use
the model output (requires a single-output model or a posterior transform).
Otherwise the objective is MC-evaluated (using `inner_sampler`).
Returns:
A list with `k + 1` elements that are either `MCSampler`s or `None.
"""
inner_samplers = []
for q, vfc, mcs in zip([None] + batch_sizes, valfunc_cls, inner_mc_samples):
if vfc is None:
inner_samplers.append(None)
elif vfc == qMultiStepLookahead:
raise UnsupportedError(
"qMultiStepLookahead not supported as a value function "
"(I see what you did there, nice try...)."
)
elif issubclass(vfc, AnalyticAcquisitionFunction):
if objective is not None:
raise UnsupportedError(
"Only PosteriorTransforms are supported for analytic value "
f"functions. Received a {objective.__class__.__name__}."
)
# At this point, we don't know the initial q-batch size here
if q is not None and q > 1:
raise UnsupportedError(
"Only batch sizes of q=1 are supported for analytic value "
"functions."
)
if q is not None and mcs is not None:
warnings.warn(
"inner_mc_samples is ignored for analytic acquistion functions",
BotorchWarning,
)
inner_samplers.append(None)
else:
inner_sampler = SobolQMCNormalSampler(
sample_shape=torch.Size([32 if mcs is None else mcs])
)
inner_samplers.append(inner_sampler)
return inner_samplers
def _get_induced_fantasy_model(
model: Model, Xs: List[Tensor], samplers: List[Optional[MCSampler]]
) -> Model:
r"""Recursive computation of the fantasy model induced by an input tree.
Args:
model: A Model of appropriate batch size. Specifically, it must be possible to
evaluate the model's posterior at `Xs[0]`.
Xs: A list `[X_j, ..., X_k]` of tensors, where `X_i` has shape
`f_i x .... x f_1 x batch_shape x q_i x d`.
samplers: A list of `k - j` samplers, such that the number of samples of sampler
`i` is `f_i`. The last element of this list is considered the
"inner sampler", which is used for evaluating the objective in case it is an
MCAcquisitionObjective.
Returns:
A Model obtained by iteratively fantasizing over the input tree `Xs`.
"""
if len(Xs) == 1:
return model
else:
fantasy_model = model.fantasize(
X=Xs[0],
sampler=samplers[0],
observation_noise=True,
)
return _get_induced_fantasy_model(
model=fantasy_model, Xs=Xs[1:], samplers=samplers[1:]
)
def warmstart_multistep(
acq_function: qMultiStepLookahead,
bounds: Tensor,
num_restarts: int,
raw_samples: int,
full_optimizer: Tensor,
**kwargs: Any,
) -> Tensor:
r"""Warm-start initialization for multi-step look-ahead acquisition functions.
For now uses the same q' as in `full_optimizer`. TODO: allow different `q`.
Args:
acq_function: A qMultiStepLookahead acquisition function.
bounds: A `2 x d` tensor of lower and upper bounds for each column of features.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic.
full_optimizer: The full tree of optimizers of the previous iteration of shape
`batch_shape x q' x d`. Typically obtained by passing
`return_best_only=False` and `return_full_tree=True` into `optimize_acqf`.
kwargs: Optimization kwargs.
Returns:
A `num_restarts x q' x d` tensor for initial points for optimization.
This is a very simple initialization heuristic.
TODO: Use the observed values to identify the fantasy sub-tree that is closest to
the observed value.
"""
batch_shape, shapes, sizes = acq_function.get_split_shapes(full_optimizer)
Xopts = torch.split(full_optimizer, sizes, dim=-2)
tkwargs = {"device": Xopts[0].device, "dtype": Xopts[0].dtype}
B = Beta(torch.ones(1, **tkwargs), 3 * torch.ones(1, **tkwargs))
def mixin_layer(X: Tensor, bounds: Tensor, eta: float) -> Tensor:
perturbations = unnormalize(B.sample(X.shape).squeeze(-1), bounds)
return (1 - eta) * X + eta * perturbations
def make_init_tree(Xopts: List[Tensor], bounds: Tensor, etas: Tensor) -> Tensor:
Xtrs = [mixin_layer(X=X, bounds=bounds, eta=eta) for eta, X in zip(etas, Xopts)]
return torch.cat(Xtrs, dim=-2)
def mixin_tree(T: Tensor, bounds: Tensor, alpha: float) -> Tensor:
return (1 - alpha) * T + alpha * unnormalize(torch.rand_like(T), bounds)
n_repeat = math.ceil(raw_samples / batch_shape[0])
alphas = torch.linspace(0, 0.75, n_repeat, **tkwargs)
etas = torch.linspace(0.1, 1.0, len(Xopts), **tkwargs)
X_full = torch.cat(
[
mixin_tree(
T=make_init_tree(Xopts=Xopts, bounds=bounds, etas=etas),
bounds=bounds,
alpha=alpha,
)
for alpha in alphas
],
dim=0,
)
with torch.no_grad():
Y_full = acq_function(X_full)
X_init = initialize_q_batch(X=X_full, Y=Y_full, n=num_restarts, eta=1.0)
return X_init[:raw_samples]
def make_best_f(model: Model, X: Tensor) -> Dict[str, Any]:
r"""Extract the best observed training input from the model."""
return {"best_f": model.train_targets.max(dim=-1).values}
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A wrapper around AquisitionFunctions to fix certain features for optimization.
This is useful e.g. for performing contextual optimization.
"""
from __future__ import annotations
from numbers import Number
from typing import List, Optional, Sequence, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from torch import Tensor
from torch.nn import Module
def get_dtype_of_sequence(values: Sequence[Union[Tensor, float]]) -> torch.dtype:
"""
Return torch.float32 if everything is single-precision and torch.float64
otherwise.
Numbers (non-tensors) are double-precision.
"""
def _is_single(value: Union[Tensor, float]) -> bool:
return isinstance(value, Tensor) and value.dtype == torch.float32
all_single_precision = all(_is_single(value) for value in values)
return torch.float32 if all_single_precision else torch.float64
def get_device_of_sequence(values: Sequence[Union[Tensor, float]]) -> torch.dtype:
"""
CPU if everything is on the CPU; Cuda otherwise.
Numbers (non-tensors) are considered to be on the CPU.
"""
def _is_cuda(value: Union[Tensor, float]) -> bool:
return hasattr(value, "device") and value.device == torch.device("cuda")
any_cuda = any(_is_cuda(value) for value in values)
return torch.device("cuda") if any_cuda else torch.device("cpu")
class FixedFeatureAcquisitionFunction(AcquisitionFunction):
"""A wrapper around AquisitionFunctions to fix a subset of features.
Example:
>>> model = SingleTaskGP(train_X, train_Y) # d = 5
>>> qEI = qExpectedImprovement(model, best_f=0.0)
>>> columns = [2, 4]
>>> values = X[..., columns]
>>> qEI_FF = FixedFeatureAcquisitionFunction(qEI, 5, columns, values)
>>> qei = qEI_FF(test_X) # d' = 3
"""
def __init__(
self,
acq_function: AcquisitionFunction,
d: int,
columns: List[int],
values: Union[Tensor, Sequence[Union[Tensor, float]]],
) -> None:
r"""Derived Acquisition Function by fixing a subset of input features.
Args:
acq_function: The base acquisition function, operating on input
tensors `X_full` of feature dimension `d`.
d: The feature dimension expected by `acq_function`.
columns: `d_f < d` indices of columns in `X_full` that are to be
fixed to the provided values.
values: The values to which to fix the columns in `columns`. Either
a full `batch_shape x q x d_f` tensor of values (if values are
different for each of the `q` input points), or an array-like of
values that is broadcastable to the input across `t`-batch and
`q`-batch dimensions, e.g. a list of length `d_f` if values
are the same across all `t` and `q`-batch dimensions, or a
combination of `Tensor`s and numbers which can be broadcasted
to form a tensor with trailing dimension size of `d_f`.
"""
Module.__init__(self)
self.acq_func = acq_function
self.d = d
if isinstance(values, Tensor):
new_values = values.detach().clone()
else:
dtype = get_dtype_of_sequence(values)
device = get_device_of_sequence(values)
new_values = []
for value in values:
if isinstance(value, Number):
value = torch.tensor([value], dtype=dtype)
else:
if value.ndim == 0: # since we can't broadcast with zero-d tensors
value = value.unsqueeze(0)
value = value.detach().clone()
new_values.append(value.to(dtype=dtype, device=device))
# There are 3 cases for when `values` is a `Sequence`.
# 1) `values` == list of floats as earlier.
# 2) `values` == combination of floats and `Tensor`s.
# 3) `values` == a list of `Tensor`s.
# For 1), the below step creates a vector of length `len(values)`
# For 2), the below step creates a `Tensor` of shape `batch_shape x q x d_f`
# with the broadcasting functionality.
# For 3), this is simply a concatenation, yielding a `Tensor` with the
# same shape as in 2).
# The key difference arises when `_construct_X_full` is invoked.
# In 1), the expansion (`self.values.expand`) will expand the `Tensor` to
# size `batch_shape x q x d_f`.
# In 2) and 3), this expansion is a no-op because they are already of the
# required size. However, 2) and 3) _cannot_ support varying `batch_shape`,
# which means that all calls to `FixedFeatureAcquisitionFunction` have
# to have the same size throughout when `values` contains a `Tensor`.
# This is consistent with the scenario when a singular `Tensor` is passed
# as the `values` argument.
new_values = torch.cat(torch.broadcast_tensors(*new_values), dim=-1)
self.register_buffer("values", new_values)
# build selector for _construct_X_full
self._selector = []
idx_X, idx_f = 0, d - new_values.shape[-1]
for i in range(self.d):
if i in columns:
self._selector.append(idx_f)
idx_f += 1
else:
self._selector.append(idx_X)
idx_X += 1
def forward(self, X: Tensor):
r"""Evaluate base acquisition function under the fixed features.
Args:
X: Input tensor of feature dimension `d' < d` such that `d' + d_f = d`.
Returns:
Base acquisition function evaluated on tensor `X_full` constructed
by adding `values` in the appropriate places (see
`_construct_X_full`).
"""
X_full = self._construct_X_full(X)
return self.acq_func(X_full)
@property
def X_pending(self):
r"""Return the `X_pending` of the base acquisition function."""
try:
return self.acq_func.X_pending
except (ValueError, AttributeError):
raise ValueError(
f"Base acquisition function {type(self.acq_func).__name__} "
"does not have an `X_pending` attribute."
)
@X_pending.setter
def X_pending(self, X_pending: Optional[Tensor]):
r"""Sets the `X_pending` of the base acquisition function."""
if X_pending is not None:
self.acq_func.X_pending = self._construct_X_full(X_pending)
else:
self.acq_func.X_pending = X_pending
def _construct_X_full(self, X: Tensor) -> Tensor:
r"""Constructs the full input for the base acquisition function.
Args:
X: Input tensor with shape `batch_shape x q x d'` such that
`d' + d_f = d`.
Returns:
Tensor `X_full` of shape `batch_shape x q x d`, where
`X_full[..., i] = values[..., i]` if `i in columns`,
and `X_full[..., i] = X[..., j]`, with
`j = i - sum_{l<=i} 1_{l in fixed_colunns}`.
"""
d_prime, d_f = X.shape[-1], self.values.shape[-1]
if d_prime + d_f != self.d:
raise ValueError(
f"Feature dimension d' ({d_prime}) of input must be "
f"d - d_f ({self.d - d_f})."
)
# concatenate values to the end
values = self.values.to(X).expand(*X.shape[:-1], d_f)
X_perm = torch.cat([X, values], dim=-1)
# now select the appropriate column order
return X_perm[..., self._selector]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Cost functions for cost-aware acquisition functions, e.g. multi-fidelity KG.
To be used in a context where there is an objective/cost tradeoff.
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional, Union
import torch
from botorch import settings
from botorch.acquisition.objective import (
GenericMCObjective,
IdentityMCObjective,
MCAcquisitionObjective,
)
from botorch.exceptions.warnings import CostAwareWarning
from botorch.models.deterministic import DeterministicModel
from botorch.models.gpytorch import GPyTorchModel
from botorch.sampling.base import MCSampler
from torch import Tensor
from torch.nn import Module
class CostAwareUtility(Module, ABC):
r"""
Abstract base class for cost-aware utilities.
:meta private:
"""
@abstractmethod
def forward(self, X: Tensor, deltas: Tensor, **kwargs: Any) -> Tensor:
r"""Evaluate the cost-aware utility on the candidates and improvements.
Args:
X: A `batch_shape x q x d`-dim Tensor of with `q` `d`-dim design
points each for each t-batch.
deltas: A `num_fantasies x batch_shape`-dim Tensor of `num_fantasy`
samples from the marginal improvement in utility over the
current state at `X` for each t-batch.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of cost-transformed utilities.
"""
pass # pragma: no cover
class GenericCostAwareUtility(CostAwareUtility):
r"""Generic cost-aware utility wrapping a callable."""
def __init__(self, cost: Callable[[Tensor, Tensor], Tensor]) -> None:
r"""Generic cost-aware utility wrapping a callable.
Args:
cost: A callable mapping a `batch_shape x q x d'`-dim candidate set
to a `batch_shape`-dim tensor of costs
"""
super().__init__()
self._cost_callable: Callable[[Tensor, Tensor], Tensor] = cost
def forward(self, X: Tensor, deltas: Tensor, **kwargs: Any) -> Tensor:
r"""Evaluate the cost function on the candidates and improvements.
Args:
X: A `batch_shape x q x d'`-dim Tensor of with `q` `d`-dim design
points for each t-batch.
deltas: A `num_fantasies x batch_shape`-dim Tensor of `num_fantasy`
samples from the marginal improvement in utility over the
current state at `X` for each t-batch.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of cost-weighted utilities.
"""
return self._cost_callable(X, deltas)
class InverseCostWeightedUtility(CostAwareUtility):
r"""A cost-aware utility using inverse cost weighting based on a model.
Computes the cost-aware utility by inverse-weighting samples
`U = (u_1, ..., u_N)` of the increase in utility. If `use_mean=True`, this
uses the posterior mean `mean_cost` of the cost model, i.e.
`weighted utility = mean(U) / mean_cost`. If `use_mean=False`, it uses
samples `C = (c_1, ..., c_N)` from the posterior of the cost model and
performs the inverse weighting on the sample level:
`weighted utility = mean(u_1 / c_1, ..., u_N / c_N)`.
The cost is additive across multiple elements of a q-batch.
"""
def __init__(
self,
cost_model: Union[DeterministicModel, GPyTorchModel],
use_mean: bool = True,
cost_objective: Optional[MCAcquisitionObjective] = None,
min_cost: float = 1e-2,
) -> None:
r"""Cost-aware utility that weights increase in utiltiy by inverse cost.
Args:
cost_model: A model of the cost of evaluating a candidate
set `X`, where `X` are the same features as in the model for the
acquisition function this is to be used with. If no cost_objective
is specified, the outputs are required to be non-negative.
use_mean: If True, use the posterior mean, otherwise use posterior
samples from the cost model.
cost_objective: If specified, transform the posterior mean / the
posterior samples from the cost model. This can be used e.g. to
un-transform predictions/samples of a cost model fit on the
log-transformed cost (often done to ensure non-negativity). If the
cost model is multi-output, then by default this will sum the cost
across outputs.
min_cost: A value used to clamp the cost samples so that they are not
too close to zero, which may cause numerical issues.
Returns:
The inverse-cost-weighted utiltiy.
"""
super().__init__()
if cost_objective is None:
if cost_model.num_outputs == 1:
cost_objective = IdentityMCObjective()
else:
# sum over outputs
cost_objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
self.cost_model = cost_model
self.cost_objective = cost_objective
self._use_mean = use_mean
self._min_cost = min_cost
def forward(
self,
X: Tensor,
deltas: Tensor,
sampler: Optional[MCSampler] = None,
X_evaluation_mask: Optional[Tensor] = None,
**kwargs: Any,
) -> Tensor:
r"""Evaluate the cost function on the candidates and improvements.
Args:
X: A `batch_shape x q x d`-dim Tensor of with `q` `d`-dim design
points each for each t-batch.
deltas: A `num_fantasies x batch_shape`-dim Tensor of `num_fantasy`
samples from the marginal improvement in utility over the
current state at `X` for each t-batch.
sampler: A sampler used for sampling from the posterior of the cost
model (required if `use_mean=False`, ignored if `use_mean=True`).
X_evaluation_mask: A `q x m`-dim boolean tensor indicating which
outcomes should be evaluated for each design in the batch.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of cost-weighted utilities.
"""
if not self._use_mean and sampler is None:
raise RuntimeError("Must provide `sampler` if `use_mean=False`")
if X_evaluation_mask is not None:
# TODO: support different evaluation masks for each X. This requires
# either passing evaluation_mask to `cost_model.posterior`
# or assuming that evalauting `cost_model.posterior(X)` on all
# `q` points and then only selecting the costs for relevant points
# does not change the cost function for each point. This would not be
# true for instance if the incremental cost of evalauting an additional
# point decreased as the number of points increased.
if not all(
torch.equal(X_evaluation_mask[0], X_evaluation_mask[i])
for i in range(1, X_evaluation_mask.shape[0])
):
raise NotImplementedError(
"Currently, all candidates must be evaluated on the same outputs."
)
output_indices = X_evaluation_mask[0].nonzero().view(-1).tolist()
else:
output_indices = None
cost_posterior = self.cost_model.posterior(X, output_indices=output_indices)
if self._use_mean:
cost = cost_posterior.mean # batch_shape x q x m'
else:
# This will be of shape num_fantasies x batch_shape x q x m'
cost = sampler(cost_posterior)
# TODO: Make sure this doesn't change base samples in-place
cost = self.cost_objective(cost)
# Ensure non-negativity of the cost
if settings.debug.on():
if torch.any(cost < -1e-7):
warnings.warn(
"Encountered negative cost values in InverseCostWeightedUtility",
CostAwareWarning,
)
# clamp (away from zero) and sum cost across elements of the q-batch -
# this will be of shape `num_fantasies x batch_shape` or `batch_shape`
cost = cost.clamp_min(self._min_cost).sum(dim=-1)
# if we are doing inverse weighting on the sample level, clamp numerator.
if not self._use_mean:
deltas = deltas.clamp_min(0.0)
# compute and return the ratio on the sample level - If `use_mean=True`
# this operation involves broadcasting the cost across fantasies
return deltas / cost
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for acquisition functions.
"""
from __future__ import annotations
import math
from typing import Callable, Dict, List, Optional, Tuple
import torch
from botorch.acquisition.objective import (
IdentityMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.exceptions.errors import DeprecationError, UnsupportedError
from botorch.models.fully_bayesian import MCMC_DIM
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.get_sampler import get_sampler
from botorch.sampling.pathwise import draw_matheron_paths
from botorch.utils.objective import compute_feasibility_indicator
from botorch.utils.sampling import optimize_posterior_samples
from botorch.utils.transforms import is_fully_bayesian
from torch import Tensor
def get_acquisition_function(*args, **kwargs) -> None:
raise DeprecationError(
"`get_acquisition_function` has been moved to `botorch.acquisition.factory`."
)
def compute_best_feasible_objective(
samples: Tensor,
obj: Tensor,
constraints: Optional[List[Callable[[Tensor], Tensor]]],
model: Optional[Model] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_baseline: Optional[Tensor] = None,
infeasible_obj: Optional[Tensor] = None,
) -> Tensor:
"""Computes the largest `obj` value that is feasible under the `constraints`. If
`constraints` is None, returns the best unconstrained objective value.
When no feasible observations exist and `infeasible_obj` is not `None`, returns
`infeasible_obj` (potentially reshaped). When no feasible observations exist and
`infeasible_obj` is `None`, uses `model`, `objective`, `posterior_transform`, and
`X_baseline` to infer and return an `infeasible_obj` `M` s.t. `M < min_x f(x)`.
Args:
samples: `(sample_shape) x batch_shape x q x m`-dim posterior samples.
obj: A `(sample_shape) x batch_shape x q`-dim Tensor of MC objective values.
constraints: A list of constraint callables which map posterior samples to
a scalar. The associated constraint is considered satisfied if this
scalar is less than zero.
model: A Model, only required when there are no feasible observations.
objective: An MCAcquisitionObjective, only optionally used when there are no
feasible observations.
posterior_transform: A PosteriorTransform, only optionally used when there are
no feasible observations.
X_baseline: A `batch_shape x d`-dim Tensor of baseline points, only required
when there are no feasible observations.
infeasible_obj: A Tensor to be returned when no feasible points exist.
Returns:
A `(sample_shape) x batch_shape x 1`-dim Tensor of best feasible objectives.
"""
if constraints is None: # unconstrained case
# we don't need to differentiate through X_baseline for now, so taking
# the regular max over the n points to get best_f is fine
with torch.no_grad():
return obj.amax(dim=-1, keepdim=True)
is_feasible = compute_feasibility_indicator(
constraints=constraints, samples=samples
) # sample_shape x batch_shape x q
if is_feasible.any(dim=-1).all():
infeasible_value = -torch.inf
elif infeasible_obj is not None:
infeasible_value = infeasible_obj.item()
else:
if model is None:
raise ValueError(
"Must specify `model` when no feasible observation exists."
)
if X_baseline is None:
raise ValueError(
"Must specify `X_baseline` when no feasible observation exists."
)
infeasible_value = _estimate_objective_lower_bound(
model=model,
objective=objective,
posterior_transform=posterior_transform,
X=X_baseline,
).item()
obj = torch.where(is_feasible, obj, infeasible_value)
with torch.no_grad():
return obj.amax(dim=-1, keepdim=True)
def _estimate_objective_lower_bound(
model: Model,
objective: Optional[MCAcquisitionObjective],
posterior_transform: Optional[PosteriorTransform],
X: Tensor,
) -> Tensor:
"""Estimates a lower bound on the objective values by evaluating the model at convex
combinations of `X`, returning the 6-sigma lower bound of the computed statistics.
Args:
model: A fitted model.
objective: An MCAcquisitionObjective with `m` outputs.
posterior_transform: A PosteriorTransform.
X: A `n x d`-dim Tensor of design points from which to draw convex combinations.
Returns:
A `m`-dimensional Tensor of lower bounds of the objectives.
"""
convex_weights = torch.rand(
32,
X.shape[-2],
dtype=X.dtype,
device=X.device,
)
weights_sum = convex_weights.sum(dim=0, keepdim=True)
convex_weights = convex_weights / weights_sum
# infeasible cost M is such that -M < min_x f(x), thus
# 0 < min_x f(x) - (-M), so we should take -M as a lower
# bound on the best feasible objective
return -get_infeasible_cost(
X=convex_weights @ X,
model=model,
objective=objective,
posterior_transform=posterior_transform,
)
def get_infeasible_cost(
X: Tensor,
model: Model,
objective: Optional[Callable[[Tensor, Optional[Tensor]], Tensor]] = None,
posterior_transform: Optional[PosteriorTransform] = None,
) -> Tensor:
r"""Get infeasible cost for a model and objective.
For each outcome, computes an infeasible cost `M` such that
`-M < min_x f(x)` almost always, so that feasible points are preferred.
Args:
X: A `n x d` Tensor of `n` design points to use in evaluating the
minimum. These points should cover the design space well. The more
points the better the estimate, at the expense of added computation.
model: A fitted botorch model with `m` outcomes.
objective: The objective with which to evaluate the model output.
posterior_transform: A PosteriorTransform (optional).
Returns:
An `m`-dim tensor of infeasible cost values.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> objective = lambda Y: Y[..., -1] ** 2
>>> M = get_infeasible_cost(train_X, model, obj)
"""
if objective is None:
def objective(Y: Tensor, X: Optional[Tensor] = None):
return Y.squeeze(-1)
posterior = model.posterior(X, posterior_transform=posterior_transform)
lb = objective(posterior.mean - 6 * posterior.variance.clamp_min(0).sqrt(), X=X)
if lb.ndim < posterior.mean.ndim:
lb = lb.unsqueeze(-1)
# Take outcome-wise min. Looping in to handle batched models.
while lb.dim() > 1:
lb = lb.min(dim=-2).values
return -(lb.clamp_max(0.0))
def prune_inferior_points(
model: Model,
X: Tensor,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
num_samples: int = 2048,
max_frac: float = 1.0,
sampler: Optional[MCSampler] = None,
marginalize_dim: Optional[int] = None,
) -> Tensor:
r"""Prune points from an input tensor that are unlikely to be the best point.
Given a model, an objective, and an input tensor `X`, this function returns
the subset of points in `X` that have some probability of being the best
point under the objective. This function uses sampling to estimate the
probabilities, the higher the number of points `n` in `X` the higher the
number of samples `num_samples` should be to obtain accurate estimates.
Args:
model: A fitted model. Batched models are currently not supported.
X: An input tensor of shape `n x d`. Batched inputs are currently not
supported.
objective: The objective under which to evaluate the posterior.
posterior_transform: A PosteriorTransform (optional).
num_samples: The number of samples used to compute empirical
probabilities of being the best point.
max_frac: The maximum fraction of points to retain. Must satisfy
`0 < max_frac <= 1`. Ensures that the number of elements in the
returned tensor does not exceed `ceil(max_frac * n)`.
sampler: If provided, will use this customized sampler instead of
automatically constructing one with `num_samples`.
marginalize_dim: A batch dimension that should be marginalized.
For example, this is useful when using a batched fully Bayesian
model.
Returns:
A `n' x d` with subset of points in `X`, where
n' = min(N_nz, ceil(max_frac * n))
with `N_nz` the number of points in `X` that have non-zero (empirical,
under `num_samples` samples) probability of being the best point.
"""
if marginalize_dim is None and is_fully_bayesian(model):
# TODO: Properly deal with marginalizing fully Bayesian models
marginalize_dim = MCMC_DIM
if X.ndim > 2:
# TODO: support batched inputs (req. dealing with ragged tensors)
raise UnsupportedError(
"Batched inputs `X` are currently unsupported by prune_inferior_points"
)
max_points = math.ceil(max_frac * X.size(-2))
if max_points < 1 or max_points > X.size(-2):
raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}")
with torch.no_grad():
posterior = model.posterior(X=X, posterior_transform=posterior_transform)
if sampler is None:
sampler = get_sampler(
posterior=posterior, sample_shape=torch.Size([num_samples])
)
samples = sampler(posterior)
if objective is None:
objective = IdentityMCObjective()
obj_vals = objective(samples, X=X)
if obj_vals.ndim > 2:
if obj_vals.ndim == 3 and marginalize_dim is not None:
obj_vals = obj_vals.mean(dim=marginalize_dim)
else:
# TODO: support batched inputs (req. dealing with ragged tensors)
raise UnsupportedError(
"Models with multiple batch dims are currently unsupported by"
" prune_inferior_points."
)
is_best = torch.argmax(obj_vals, dim=-1)
idcs, counts = torch.unique(is_best, return_counts=True)
if len(idcs) > max_points:
counts, order_idcs = torch.sort(counts, descending=True)
idcs = order_idcs[:max_points]
return X[idcs]
def project_to_target_fidelity(
X: Tensor, target_fidelities: Optional[Dict[int, float]] = None
) -> Tensor:
r"""Project `X` onto the target set of fidelities.
This function assumes that the set of feasible fidelities is a box, so
projecting here just means setting each fidelity parameter to its target
value.
Args:
X: A `batch_shape x q x d`-dim Tensor of with `q` `d`-dim design points
for each t-batch.
target_fidelities: A dictionary mapping a subset of columns of `X` (the
fidelity parameters) to their respective target fidelity value. If
omitted, assumes that the last column of X is the fidelity parameter
with a target value of 1.0.
Return:
A `batch_shape x q x d`-dim Tensor `X_proj` with fidelity parameters
projected to the provided fidelity values.
"""
if target_fidelities is None:
target_fidelities = {-1: 1.0}
d = X.size(-1)
# normalize to positive indices
tfs = {k if k >= 0 else d + k: v for k, v in target_fidelities.items()}
ones = torch.ones(*X.shape[:-1], device=X.device, dtype=X.dtype)
# here we're looping through the feature dimension of X - this could be
# slow for large `d`, we should optimize this for that case
X_proj = torch.stack(
[X[..., i] if i not in tfs else tfs[i] * ones for i in range(d)], dim=-1
)
return X_proj
def expand_trace_observations(
X: Tensor, fidelity_dims: Optional[List[int]] = None, num_trace_obs: int = 0
) -> Tensor:
r"""Expand `X` with trace observations.
Expand a tensor of inputs with "trace observations" that are obtained during
the evaluation of the candidate set. This is used in multi-fidelity
optimization. It can be though of as augmenting the `q`-batch with additional
points that are the expected trace observations.
Let `f_i` be the `i`-th fidelity parameter. Then this functions assumes that
for each element of the q-batch, besides the fidelity `f_i`, we will observe
additonal fidelities `f_i1, ..., f_iK`, where `K = num_trace_obs`, during
evaluation of the candidate set `X`. Specifically, this function assumes
that `f_ij = (K-j) / (num_trace_obs + 1) * f_i` for all `i`. That is, the
expansion is performed in parallel for all fidelities (it does not expand
out all possible combinations).
Args:
X: A `batch_shape x q x d`-dim Tensor of with `q` `d`-dim design points
(incl. the fidelity parameters) for each t-batch.
fidelity_dims: The indices of the fidelity parameters. If omitted,
assumes that the last column of X contains the fidelity parameters.
num_trace_obs: The number of trace observations to use.
Return:
A `batch_shape x (q + num_trace_obs x q) x d` Tensor `X_expanded` that
expands `X` with trace observations.
"""
if num_trace_obs == 0: # No need to expand if we don't use trace observations
return X
if fidelity_dims is None:
fidelity_dims = [-1]
# The general strategy in the following is to expand `X` to the desired
# shape, and then multiply it (point-wise) with a tensor of scaling factors
reps = [1] * (X.ndim - 2) + [1 + num_trace_obs, 1]
X_expanded = X.repeat(*reps) # batch_shape x (q + num_trace_obs x q) x d
scale_fac = torch.ones_like(X_expanded)
s_pad = 1 / (num_trace_obs + 1)
# tensor of num_trace_obs scaling factors equally space between 1-s_pad and s_pad
sf = torch.linspace(1 - s_pad, s_pad, num_trace_obs, device=X.device, dtype=X.dtype)
# repeat each element q times
q = X.size(-2)
sf = torch.repeat_interleave(sf, q) # num_trace_obs * q
# now expand this to num_trace_obs x q x num_fidelities
sf = sf.unsqueeze(-1).expand(X_expanded.size(-2) - q, len(fidelity_dims))
# change relevant entries of the scaling tensor
scale_fac[..., q:, fidelity_dims] = sf
return scale_fac * X_expanded
def project_to_sample_points(X: Tensor, sample_points: Tensor) -> Tensor:
r"""Augment `X` with sample points at which to take weighted average.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of with one d`-dim design points
for each t-batch.
sample_points: `p x d'`-dim Tensor (`d' < d`) of `d'`-dim sample points at
which to compute the expectation. The `d'`-dims refer to the trailing
columns of X.
Returns:
A `batch_shape x p x d` Tensor where the q-batch includes the `p` sample points.
"""
batch_shape = X.shape[:-2]
p, d_prime = sample_points.shape
X_new = X.repeat(*(1 for _ in batch_shape), p, 1) # batch_shape x p x d
X_new[..., -d_prime:] = sample_points
return X_new
def get_optimal_samples(
model: Model,
bounds: Tensor,
num_optima: int,
raw_samples: int = 1024,
num_restarts: int = 20,
maximize: bool = True,
) -> Tuple[Tensor, Tensor]:
"""Draws sample paths from the posterior and maximizes the samples using GD.
Args:
model (Model): The model from which samples are drawn.
bounds: (Tensor): Bounds of the search space. If the model inputs are
normalized, the bounds should be normalized as well.
num_optima (int): The number of paths to be drawn and optimized.
raw_samples (int, optional): The number of candidates randomly sample.
Defaults to 1024.
num_restarts (int, optional): The number of candidates to do gradient-based
optimization on. Defaults to 20.
maximize: Whether to maximize or minimize the samples.
Returns:
Tuple[Tensor, Tensor]: The optimal input locations and corresponding
outputs, x* and f*.
"""
paths = draw_matheron_paths(model, sample_shape=torch.Size([num_optima]))
optimal_inputs, optimal_outputs = optimize_posterior_samples(
paths,
bounds=bounds,
raw_samples=raw_samples,
num_restarts=num_restarts,
maximize=maximize,
)
return optimal_inputs, optimal_outputs
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Acquisition function for predictive entropy search (PES). The code utilizes the
implementation designed for the multi-objective batch setting.
NOTE: The PES acquisition might not be differentiable. As a result, we recommend
optimizing the acquisition function using finite differences.
"""
from __future__ import annotations
from typing import Any, Optional
from botorch.acquisition.multi_objective.predictive_entropy_search import (
qMultiObjectivePredictiveEntropySearch,
)
from botorch.models.model import Model
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
class qPredictiveEntropySearch(qMultiObjectivePredictiveEntropySearch):
r"""The acquisition function for Predictive Entropy Search.
This acquisition function approximates the mutual information between the
observation at a candidate point `X` and the optimal set of inputs using
expectation propagation (EP).
NOTES:
(i) The expectation propagation procedure can potentially fail due to the unstable
EP updates. This is however unlikely to happen in the single-objective setting
because we have much fewer EP factors. The jitter added in the training phase
(`ep_jitter`) and testing phase (`test_jitter`) can be increased to prevent
these failures from happening. More details in the description of
`qMultiObjectivePredictiveEntropySearch`.
(ii) The estimated acquisition value could be negative.
"""
def __init__(
self,
model: Model,
optimal_inputs: Tensor,
maximize: bool = True,
X_pending: Optional[Tensor] = None,
max_ep_iterations: int = 250,
ep_jitter: float = 1e-4,
test_jitter: float = 1e-4,
threshold: float = 1e-2,
**kwargs: Any,
) -> None:
r"""Predictive entropy search acquisition function.
Args:
model: A fitted single-outcome model.
optimal_inputs: A `num_samples x d`-dim tensor containing the sampled
optimal inputs of dimension `d`. We assume for simplicity that each
sample only contains one optimal set of inputs.
maximize: If true, we consider a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
max_ep_iterations: The maximum number of expectation propagation
iterations. (The minimum number of iterations is set at 3.)
ep_jitter: The amount of jitter added for the matrix inversion that
occurs during the expectation propagation update during the training
phase.
test_jitter: The amount of jitter added for the matrix inversion that
occurs during the expectation propagation update in the testing
phase.
threshold: The convergence threshold for expectation propagation. This
assesses the relative change in the mean and covariance. We default
to one percent change i.e. `threshold = 1e-2`.
"""
super().__init__(
model=model,
pareto_sets=optimal_inputs.unsqueeze(-2),
maximize=maximize,
X_pending=X_pending,
max_ep_iterations=max_ep_iterations,
ep_jitter=ep_jitter,
test_jitter=test_jitter,
threshold=threshold,
)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qPredictiveEntropySearch on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim
design points each.
Returns:
A `batch_shape'`-dim Tensor of Predictive Entropy Search values at the
given design points `X`.
"""
return self._compute_information_gain(X)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Acquisition functions for Max-value Entropy Search (MES), General
Information-Based Bayesian Optimization (GIBBON), and
multi-fidelity MES with noisy observations and trace observations.
References
.. [Moss2021gibbon]
Moss, H. B., et al.,
GIBBON: General-purpose Information-Based Bayesian OptimisatioN.
Journal of Machine Learning Research, 2021.
.. [Takeno2020mfmves]
S. Takeno, H. Fukuoka, Y. Tsukada, T. Koyama, M. Shiga, I. Takeuchi,
M. Karasuyama. Multi-fidelity Bayesian Optimization with Max-value Entropy
Search and its Parallelization. Proceedings of the 37th International
Conference on Machine Learning, 2020.
.. [Wang2017mves]
Z. Wang, S. Jegelka, Max-value Entropy Search for Efficient
Bayesian Optimization. Proceedings of the 37th International
Conference on Machine Learning, 2017.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from copy import deepcopy
from math import log
from typing import Any, Callable, Optional
import numpy as np
import torch
from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin
from botorch.acquisition.cost_aware import CostAwareUtility, InverseCostWeightedUtility
from botorch.acquisition.objective import PosteriorTransform
from botorch.exceptions.errors import UnsupportedError
from botorch.models.cost import AffineFidelityCostModel
from botorch.models.model import Model
from botorch.models.utils import check_no_nans
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import match_batch_shape, t_batch_mode_transform
from linear_operator.functions import inv_quad
from linear_operator.utils.cholesky import psd_safe_cholesky
from scipy.optimize import brentq
from scipy.stats import norm
from torch import Tensor
CLAMP_LB = 1.0e-8
class MaxValueBase(AcquisitionFunction, ABC):
r"""Abstract base class for acquisition functions based on Max-value Entropy Search.
This class provides the basic building blocks for constructing max-value
entropy-based acquisition functions along the lines of [Wang2017mves]_.
Subclasses need to implement `_sample_max_values` and _compute_information_gain`
methods.
:meta private:
"""
def __init__(
self,
model: Model,
num_mv_samples: int,
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Single-outcome max-value entropy search-based acquisition functions.
Args:
model: A fitted single-outcome model.
num_mv_samples: Number of max value samples.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
"""
super().__init__(model=model)
if posterior_transform is None and model.num_outputs != 1:
raise UnsupportedError(
"Must specify a posterior transform when using a multi-output model."
)
# Batched GP models are not currently supported
try:
batch_shape = model.batch_shape
except NotImplementedError:
batch_shape = torch.Size()
if len(batch_shape) > 0:
raise NotImplementedError(
"Batched GP models (e.g., fantasized models) are not yet "
f"supported by `{self.__class__.__name__}`."
)
self.num_mv_samples = num_mv_samples
self.posterior_transform = posterior_transform
self.maximize = maximize
self.weight = 1.0 if maximize else -1.0
self.set_X_pending(X_pending)
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Compute max-value entropy at the design points `X`.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of MVE values at the given design points `X`.
"""
# Compute the posterior, posterior mean, variance and std
posterior = self.model.posterior(
X.unsqueeze(-3),
observation_noise=False,
posterior_transform=self.posterior_transform,
)
# batch_shape x num_fantasies x (m) x 1
mean = self.weight * posterior.mean.squeeze(-1).squeeze(-1)
variance = posterior.variance.clamp_min(CLAMP_LB).view_as(mean)
ig = self._compute_information_gain(
X=X, mean_M=mean, variance_M=variance, covar_mM=variance.unsqueeze(-1)
)
return ig.mean(dim=0) # average over fantasies
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
r"""Set pending design points.
Set "pending points" to inform the acquisition function of the candidate
points that have been generated but are pending evaluation.
Args:
X_pending: `n x d` Tensor with `n` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
if X_pending is not None:
X_pending = X_pending.detach().clone()
self._sample_max_values(num_samples=self.num_mv_samples, X_pending=X_pending)
self.X_pending = X_pending
# ------- Abstract methods that need to be implemented by subclasses ------- #
@abstractmethod
def _compute_information_gain(self, X: Tensor, **kwargs: Any) -> Tensor:
r"""Compute the information gain at the design points `X`.
`num_fantasies = 1` for non-fantasized models.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design point each.
kwargs: Other keyword arguments used by subclasses.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of information gains at the
given design points `X` (`num_fantasies=1` for non-fantasized models).
"""
pass # pragma: no cover
@abstractmethod
def _sample_max_values(
self, num_samples: int, X_pending: Optional[Tensor] = None
) -> Tensor:
r"""Draw samples from the posterior over maximum values.
These samples are used to compute Monte Carlo approximations of expecations
over the posterior over the function maximum.
Args:
num_samples: The number of samples to draw.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Returns:
A `num_samples x num_fantasies` Tensor of posterior max value samples
(`num_fantasies=1` for non-fantasized models).
"""
pass # pragma: no cover
class DiscreteMaxValueBase(MaxValueBase):
r"""Abstract base class for MES-like methods using discrete max posterior sampling.
This class provides basic functionality for sampling posterior maximum values from
a surrogate Gaussian process model using a discrete set of candidates. It supports
either exact (w.r.t. the candidate set) sampling, or using a Gumbel approximation.
"""
def __init__(
self,
model: Model,
candidate_set: Tensor,
num_mv_samples: int = 10,
posterior_transform: Optional[PosteriorTransform] = None,
use_gumbel: bool = True,
maximize: bool = True,
X_pending: Optional[Tensor] = None,
train_inputs: Optional[Tensor] = None,
) -> None:
r"""Single-outcome MES-like acquisition functions based on discrete MV sampling.
Args:
model: A fitted single-outcome model.
candidate_set: A `n x d` Tensor including `n` candidate points to
discretize the design space. Max values are sampled from the
(joint) model posterior over these points.
num_mv_samples: Number of max value samples.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
use_gumbel: If True, use Gumbel approximation to sample the max values.
maximize: If True, consider the problem a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
train_inputs: A `n_train x d` Tensor that the model has been fitted on.
Not required if the model is an instance of a GPyTorch ExactGP model.
"""
self.use_gumbel = use_gumbel
if train_inputs is None and hasattr(model, "train_inputs"):
train_inputs = model.train_inputs[0]
if train_inputs is not None:
if train_inputs.ndim > 2:
raise NotImplementedError(
"Batch GP models (e.g. fantasized models) "
"are not yet supported by `MaxValueBase`"
)
train_inputs = match_batch_shape(train_inputs, candidate_set)
candidate_set = torch.cat([candidate_set, train_inputs], dim=0)
self.candidate_set = candidate_set
super().__init__(
model=model,
num_mv_samples=num_mv_samples,
posterior_transform=posterior_transform,
maximize=maximize,
X_pending=X_pending,
)
def _sample_max_values(
self, num_samples: int, X_pending: Optional[Tensor] = None
) -> Tensor:
r"""Draw samples from the posterior over maximum values on a discrete set.
These samples are used to compute Monte Carlo approximations of expecations
over the posterior over the function maximum.
Args:
num_samples: The number of samples to draw.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Returns:
A `num_samples x num_fantasies` Tensor of posterior max value samples
(`num_fantasies=1` for non-fantasized models).
"""
if self.use_gumbel:
sample_max_values = _sample_max_value_Gumbel
else:
sample_max_values = _sample_max_value_Thompson
candidate_set = self.candidate_set
with torch.no_grad():
if X_pending is not None:
# Append X_pending to candidate set
X_pending = match_batch_shape(X_pending, self.candidate_set)
candidate_set = torch.cat([self.candidate_set, X_pending], dim=0)
# project the candidate_set to the highest fidelity,
# which is needed for the multi-fidelity MES
try:
candidate_set = self.project(candidate_set)
except AttributeError:
pass
self.posterior_max_values = sample_max_values(
model=self.model,
candidate_set=candidate_set,
num_samples=self.num_mv_samples,
posterior_transform=self.posterior_transform,
maximize=self.maximize,
)
class qMaxValueEntropy(DiscreteMaxValueBase, MCSamplerMixin):
r"""The acquisition function for Max-value Entropy Search.
This acquisition function computes the mutual information of max values and
a candidate point X. See [Wang2017mves]_ for a detailed discussion.
The model must be single-outcome. The batch case `q > 1` is supported
through cyclic optimization and fantasies.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> candidate_set = torch.rand(1000, bounds.size(1))
>>> candidate_set = bounds[0] + (bounds[1] - bounds[0]) * candidate_set
>>> MES = qMaxValueEntropy(model, candidate_set)
>>> mes = MES(test_X)
"""
def __init__(
self,
model: Model,
candidate_set: Tensor,
num_fantasies: int = 16,
num_mv_samples: int = 10,
num_y_samples: int = 128,
posterior_transform: Optional[PosteriorTransform] = None,
use_gumbel: bool = True,
maximize: bool = True,
X_pending: Optional[Tensor] = None,
train_inputs: Optional[Tensor] = None,
) -> None:
r"""Single-outcome max-value entropy search acquisition function.
Args:
model: A fitted single-outcome model.
candidate_set: A `n x d` Tensor including `n` candidate points to
discretize the design space. Max values are sampled from the
(joint) model posterior over these points.
num_fantasies: Number of fantasies to generate. The higher this
number the more accurate the model (at the expense of model
complexity, wall time and memory). Ignored if `X_pending` is `None`.
num_mv_samples: Number of max value samples.
num_y_samples: Number of posterior samples at specific design point `X`.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
use_gumbel: If True, use Gumbel approximation to sample the max values.
maximize: If True, consider the problem a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
train_inputs: A `n_train x d` Tensor that the model has been fitted on.
Not required if the model is an instance of a GPyTorch ExactGP model.
"""
super().__init__(
model=model,
candidate_set=candidate_set,
num_mv_samples=num_mv_samples,
posterior_transform=posterior_transform,
use_gumbel=use_gumbel,
maximize=maximize,
X_pending=X_pending,
train_inputs=train_inputs,
)
MCSamplerMixin.__init__(
self,
sampler=SobolQMCNormalSampler(sample_shape=torch.Size([num_y_samples])),
)
self._init_model = model # used for `fantasize()` when setting `X_pending`
self.fantasies_sampler = SobolQMCNormalSampler(
sample_shape=torch.Size([num_fantasies])
)
self.num_fantasies = num_fantasies
self.set_X_pending(X_pending) # this did not happen in the super constructor
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
r"""Set pending points.
Informs the acquisition function about pending design points,
fantasizes the model on the pending points and draws max-value samples
from the fantasized model posterior.
Args:
X_pending: `m x d` Tensor with `m` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
try:
init_model = self._init_model
except AttributeError:
# Short-circuit (this allows calling the super constructor)
return
if X_pending is not None:
# fantasize the model and use this as the new model
self.model = init_model.fantasize(
X=X_pending, sampler=self.fantasies_sampler, observation_noise=True
)
else:
self.model = init_model
super().set_X_pending(X_pending)
def _compute_information_gain(
self, X: Tensor, mean_M: Tensor, variance_M: Tensor, covar_mM: Tensor
) -> Tensor:
r"""Computes the information gain at the design points `X`.
Approximately computes the information gain at the design points `X`,
for both MES with noisy observations and multi-fidelity MES with noisy
observation and trace observations.
The implementation is inspired from the papers on multi-fidelity MES by
[Takeno2020mfmves]_. The notation in the comments in this function follows
the Appendix C of [Takeno2020mfmves]_.
`num_fantasies = 1` for non-fantasized models.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design point each.
mean_M: A `batch_shape x num_fantasies x (m)`-dim Tensor of means.
variance_M: A `batch_shape x num_fantasies x (m)`-dim Tensor of variances.
covar_mM: A
`batch_shape x num_fantasies x (m) x (1 + num_trace_observations)`-dim
Tensor of covariances.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of information gains at the
given design points `X` (`num_fantasies=1` for non-fantasized models).
"""
# compute the std_m, variance_m with noisy observation
posterior_m = self.model.posterior(
X.unsqueeze(-3),
observation_noise=True,
posterior_transform=self.posterior_transform,
)
# batch_shape x num_fantasies x (m) x (1 + num_trace_observations)
mean_m = self.weight * posterior_m.mean.squeeze(-1)
# batch_shape x num_fantasies x (m) x (1 + num_trace_observations)
variance_m = posterior_m.distribution.covariance_matrix
check_no_nans(variance_m)
# compute mean and std for fM|ym, x, Dt ~ N(u, s^2)
samples_m = self.weight * self.get_posterior_samples(posterior_m).squeeze(-1)
# s_m x batch_shape x num_fantasies x (m) (1 + num_trace_observations)
L = psd_safe_cholesky(variance_m)
temp_term = torch.cholesky_solve(covar_mM.unsqueeze(-1), L).transpose(-2, -1)
# equivalent to torch.matmul(covar_mM.unsqueeze(-2), torch.inverse(variance_m))
# batch_shape x num_fantasies (m) x 1 x (1 + num_trace_observations)
mean_pt1 = torch.matmul(temp_term, (samples_m - mean_m).unsqueeze(-1))
mean_new = mean_pt1.squeeze(-1).squeeze(-1) + mean_M
# s_m x batch_shape x num_fantasies x (m)
variance_pt1 = torch.matmul(temp_term, covar_mM.unsqueeze(-1))
variance_new = variance_M - variance_pt1.squeeze(-1).squeeze(-1)
# batch_shape x num_fantasies x (m)
stdv_new = variance_new.clamp_min(CLAMP_LB).sqrt()
# batch_shape x num_fantasies x (m)
# define normal distribution to compute cdf and pdf
normal = torch.distributions.Normal(
torch.zeros(1, device=X.device, dtype=X.dtype),
torch.ones(1, device=X.device, dtype=X.dtype),
)
# Compute p(fM <= f* | ym, x, Dt)
view_shape = torch.Size(
[
self.posterior_max_values.shape[0],
# add 1s to broadcast across the batch_shape of X
*[1 for _ in range(X.ndim - self.posterior_max_values.ndim)],
*self.posterior_max_values.shape[1:],
]
) # s_M x batch_shape x num_fantasies x (m)
max_vals = self.posterior_max_values.view(view_shape).unsqueeze(1)
# s_M x 1 x batch_shape x num_fantasies x (m)
normalized_mvs_new = (max_vals - mean_new) / stdv_new
# s_M x s_m x batch_shape x num_fantasies x (m) =
# s_M x 1 x batch_shape x num_fantasies x (m)
# - s_m x batch_shape x num_fantasies x (m)
cdf_mvs_new = normal.cdf(normalized_mvs_new).clamp_min(CLAMP_LB)
# Compute p(fM <= f* | x, Dt)
stdv_M = variance_M.sqrt()
normalized_mvs = (max_vals - mean_M) / stdv_M
# s_M x 1 x batch_shape x num_fantasies x (m) =
# s_M x 1 x 1 x num_fantasies x (m) - batch_shape x num_fantasies x (m)
cdf_mvs = normal.cdf(normalized_mvs).clamp_min(CLAMP_LB)
# s_M x 1 x batch_shape x num_fantasies x (m)
# Compute log(p(ym | x, Dt))
log_pdf_fm = posterior_m.distribution.log_prob(
self.weight * samples_m
).unsqueeze(0)
# 1 x s_m x batch_shape x num_fantasies x (m)
# H0 = H(ym | x, Dt)
H0 = posterior_m.distribution.entropy() # batch_shape x num_fantasies x (m)
# regression adjusted H1 estimation, H1_hat = H1_bar - beta * (H0_bar - H0)
# H1 = E_{f*|x, Dt}[H(ym|f*, x, Dt)]
Z = cdf_mvs_new / cdf_mvs # s_M x s_m x batch_shape x num_fantasies x (m)
# s_M x s_m x batch_shape x num_fantasies x (m)
h1 = -Z * Z.log() - Z * log_pdf_fm
check_no_nans(h1)
dim = [0, 1] # dimension of fm samples, fM samples
H1_bar = h1.mean(dim=dim)
h0 = -log_pdf_fm
H0_bar = h0.mean(dim=dim)
cov = ((h1 - H1_bar) * (h0 - H0_bar)).mean(dim=dim)
beta = cov / (h0.var(dim=dim) * h1.var(dim=dim)).sqrt()
H1_hat = H1_bar - beta * (H0_bar - H0)
ig = H0 - H1_hat # batch_shape x num_fantasies x (m)
if self.posterior_max_values.ndim == 2:
permute_idcs = [-1, *range(ig.ndim - 1)]
else:
permute_idcs = [-2, *range(ig.ndim - 2), -1]
ig = ig.permute(*permute_idcs) # num_fantasies x batch_shape x (m)
return ig
class qLowerBoundMaxValueEntropy(DiscreteMaxValueBase):
r"""The acquisition function for General-purpose Information-Based
Bayesian Optimisation (GIBBON).
This acquisition function provides a computationally cheap approximation of
the mutual information between max values and a batch of candidate points `X`.
See [Moss2021gibbon]_ for a detailed discussion.
The model must be single-outcome, unless using a PosteriorTransform.
q > 1 is supported through greedy batch filling.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> candidate_set = torch.rand(1000, bounds.size(1))
>>> candidate_set = bounds[0] + (bounds[1] - bounds[0]) * candidate_set
>>> qGIBBON = qLowerBoundMaxValueEntropy(model, candidate_set)
>>> candidates, _ = optimize_acqf(qGIBBON, bounds, q=5)
"""
def _compute_information_gain(
self, X: Tensor, mean_M: Tensor, variance_M: Tensor, covar_mM: Tensor
) -> Tensor:
r"""Compute GIBBON's approximation of information gain at the design points `X`.
When using GIBBON for batch optimization (i.e `q > 1`), we calculate the
additional information provided by adding a new candidate point to the current
batch of design points (`X_pending`), rather than calculating the information
provided by the whole batch. This allows a modest computational saving.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design point each.
mean_M: A `batch_shape x 1`-dim Tensor of means.
variance_M: A `batch_shape x 1`-dim Tensor of variances
consisting of `batch_shape` t-batches with `num_fantasies` fantasies.
covar_mM: A `batch_shape x num_fantasies x (1 + num_trace_observations)`
-dim Tensor of covariances.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of information gains at the
given design points `X`.
"""
# TODO: give the Posterior API an add_observation_noise function to avoid
# doing posterior computations twice
# compute the mean_m, variance_m with noisy observation
posterior_m = self.model.posterior(
X, observation_noise=True, posterior_transform=self.posterior_transform
)
mean_m = self.weight * posterior_m.mean.squeeze(-1)
# batch_shape x 1
variance_m = posterior_m.variance.clamp_min(CLAMP_LB).squeeze(-1)
# batch_shape x 1
check_no_nans(variance_m)
# get stdv of noiseless variance
stdv = variance_M.sqrt()
# batch_shape x 1
# define normal distribution to compute cdf and pdf
normal = torch.distributions.Normal(
torch.zeros(1, device=X.device, dtype=X.dtype),
torch.ones(1, device=X.device, dtype=X.dtype),
)
# prepare max value quantities required by GIBBON
mvs = torch.transpose(self.posterior_max_values, 0, 1)
# 1 x s_M
normalized_mvs = (mvs - mean_m) / stdv
# batch_shape x s_M
cdf_mvs = normal.cdf(normalized_mvs).clamp_min(CLAMP_LB)
pdf_mvs = torch.exp(normal.log_prob(normalized_mvs))
ratio = pdf_mvs / cdf_mvs
check_no_nans(ratio)
# prepare squared correlation between current and target fidelity
rhos_squared = torch.pow(covar_mM.squeeze(-1), 2) / (variance_m * variance_M)
# batch_shape x 1
check_no_nans(rhos_squared)
# calculate quality contribution to the GIBBON acqusition function
inner_term = 1 - rhos_squared * ratio * (normalized_mvs + ratio)
acq = -0.5 * inner_term.clamp_min(CLAMP_LB).log()
# average over posterior max samples
acq = acq.mean(dim=1).unsqueeze(0)
if self.X_pending is None:
# for q=1, no replusion term required
return acq
# for q>1 GIBBON requires repulsion terms r_i, where
# r_i = log |C_i| for the predictive
# correlation matricies C_i between each candidate point in X and
# the m current batch elements in X_pending.
# Each predictive covariance matrix can be expressed as
# V_i = [[v_i, A_i], [A_i,B]] for a shared m x m tensor B.
# So we can efficientely calculate |V_i| using the formula for
# determinant of block matricies, i.e.
# |V_i| = (v_i - A_i^T * B^{-1} * A_i) * |B|
# As the |B| term does not depend on X and we later take its log,
# it provides only a translation of the acqusition function surface
# and can thus be ignored.
if self.posterior_transform is not None:
raise UnsupportedError(
"qLowerBoundMaxValueEntropy does not support PosteriorTransforms"
"when X_pending is not None."
)
X_batches = torch.cat(
[X, self.X_pending.unsqueeze(0).repeat(X.shape[0], 1, 1)], 1
)
# batch_shape x (1 + m) x d
# NOTE: This is the blocker for supporting posterior transforms.
# We would have to process this MVN, applying whatever operations
# are typically applied for the corresponding posterior, then applying
# the posterior transform onto the resulting object.
V = self.model(X_batches)
# Evaluate terms required for A
A = V.lazy_covariance_matrix[:, 0, 1:].unsqueeze(1)
# batch_shape x 1 x m
# Evaluate terms required for B
B = self.model.posterior(
self.X_pending,
observation_noise=True,
posterior_transform=self.posterior_transform,
).distribution.covariance_matrix.unsqueeze(0)
# 1 x m x m
# use determinant of block matrix formula
inv_quad_term = inv_quad(B, A.transpose(1, 2)).unsqueeze(1)
# NOTE: Even when using Cholesky to compute inv_quad, `V_determinant` can be
# negative due to numerical issues. To avoid this, we clamp the variance
# so that `V_determinant` > 0, while still allowing gradients to be
# propagated through `inv_quad_term`, as well as through `variance_m`
# in the expression for `r` below.
# choosing eps to be small while avoiding numerical underflow
eps = 1e-6 if inv_quad_term.dtype == torch.float32 else 1e-12
V_determinant = variance_m.clamp(inv_quad_term * (1 + eps)) - inv_quad_term
# batch_shape x 1
# Take logs and convert covariances to correlations.
r = V_determinant.log() - variance_m.log() # = log(1 - inv_quad / var)
r = 0.5 * r.transpose(0, 1)
return acq + r
class qMultiFidelityMaxValueEntropy(qMaxValueEntropy):
r"""Multi-fidelity max-value entropy.
The acquisition function for multi-fidelity max-value entropy search
with support for trace observations. See [Takeno2020mfmves]_
for a detailed discussion of the basic ideas on multi-fidelity MES
(note that this implementation is somewhat different).
The model must be single-outcome, unless using a PosteriorTransform.
The batch case `q > 1` is supported through cyclic optimization and fantasies.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> candidate_set = torch.rand(1000, bounds.size(1))
>>> candidate_set = bounds[0] + (bounds[1] - bounds[0]) * candidate_set
>>> MF_MES = qMultiFidelityMaxValueEntropy(model, candidate_set)
>>> mf_mes = MF_MES(test_X)
"""
def __init__(
self,
model: Model,
candidate_set: Tensor,
num_fantasies: int = 16,
num_mv_samples: int = 10,
num_y_samples: int = 128,
posterior_transform: Optional[PosteriorTransform] = None,
use_gumbel: bool = True,
maximize: bool = True,
X_pending: Optional[Tensor] = None,
cost_aware_utility: Optional[CostAwareUtility] = None,
project: Callable[[Tensor], Tensor] = lambda X: X,
expand: Callable[[Tensor], Tensor] = lambda X: X,
) -> None:
r"""Single-outcome max-value entropy search acquisition function.
Args:
model: A fitted single-outcome model.
candidate_set: A `n x d` Tensor including `n` candidate points to
discretize the design space, which will be used to sample the
max values from their posteriors.
cost_aware_utility: A CostAwareUtility computing the cost-transformed
utility from a candidate set and samples of increases in utility.
num_fantasies: Number of fantasies to generate. The higher this
number the more accurate the model (at the expense of model
complexity and performance) and it's only used when `X_pending`
is not `None`.
num_mv_samples: Number of max value samples.
num_y_samples: Number of posterior samples at specific design point `X`.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
use_gumbel: If True, use Gumbel approximation to sample the max values.
maximize: If True, consider the problem a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
cost_aware_utility: A CostAwareUtility computing the cost-transformed
utility from a candidate set and samples of increases in utility.
project: A callable mapping a `batch_shape x q x d` tensor of design
points to a tensor of the same shape projected to the desired
target set (e.g. the target fidelities in case of multi-fidelity
optimization).
expand: A callable mapping a `batch_shape x q x d` input tensor to
a `batch_shape x (q + q_e)' x d`-dim output tensor, where the
`q_e` additional points in each q-batch correspond to
additional ("trace") observations.
"""
super().__init__(
model=model,
candidate_set=candidate_set,
num_fantasies=num_fantasies,
num_mv_samples=num_mv_samples,
num_y_samples=num_y_samples,
posterior_transform=posterior_transform,
use_gumbel=use_gumbel,
maximize=maximize,
X_pending=X_pending,
)
if cost_aware_utility is None:
cost_model = AffineFidelityCostModel(fidelity_weights={-1: 1.0})
cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)
self.cost_aware_utility = cost_aware_utility
self.expand = expand
self.project = project
self._cost_sampler = None
# @TODO make sure fidelity_dims align in project, expand & cost_aware_utility
# It seems very difficult due to the current way of handling project/expand
# resample max values after initializing self.project
# so that the max value samples are at the highest fidelity
self._sample_max_values(self.num_mv_samples)
@property
def cost_sampler(self):
if self._cost_sampler is None:
# Note: Using the deepcopy here is essential. Removing this poses a
# problem if the base model and the cost model have a different number
# of outputs or test points (this would be caused by expand), as this
# would trigger re-sampling the base samples in the fantasy sampler.
# By cloning the sampler here, the right thing will happen if the
# the sizes are compatible, if they are not this will result in
# samples being drawn using different base samples, but it will at
# least avoid changing state of the fantasy sampler.
self._cost_sampler = deepcopy(self.fantasies_sampler)
return self._cost_sampler
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluates `qMultifidelityMaxValueEntropy` at the design points `X`
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design point each.
Returns:
A `batch_shape`-dim Tensor of MF-MVES values at the design points `X`.
"""
X_expand = self.expand(X) # batch_shape x (1 + num_trace_observations) x d
X_max_fidelity = self.project(X) # batch_shape x 1 x d
X_all = torch.cat((X_expand, X_max_fidelity), dim=-2).unsqueeze(-3)
# batch_shape x num_fantasies x (2 + num_trace_observations) x d
# Compute the posterior, posterior mean, variance without noise
# `_m` and `_M` in the var names means the current and the max fidelity.
posterior = self.model.posterior(
X_all, observation_noise=False, posterior_transform=self.posterior_transform
)
mean_M = self.weight * posterior.mean[..., -1, 0] # batch_shape x num_fantasies
variance_M = posterior.variance[..., -1, 0].clamp_min(CLAMP_LB)
# get the covariance between the low fidelities and max fidelity
covar_mM = posterior.distribution.covariance_matrix[..., :-1, -1]
# batch_shape x num_fantasies x (1 + num_trace_observations)
check_no_nans(mean_M)
check_no_nans(variance_M)
check_no_nans(covar_mM)
# compute the information gain (IG)
ig = self._compute_information_gain(
X=X_expand, mean_M=mean_M, variance_M=variance_M, covar_mM=covar_mM
)
ig = self.cost_aware_utility(X=X, deltas=ig, sampler=self.cost_sampler)
return ig.mean(dim=0) # average over the fantasies
class qMultiFidelityLowerBoundMaxValueEntropy(qMultiFidelityMaxValueEntropy):
r"""Multi-fidelity acquisition function for General-purpose Information-Based
Bayesian optimization (GIBBON).
The acquisition function for multi-fidelity max-value entropy search
with support for trace observations. See [Takeno2020mfmves]_
for a detailed discussion of the basic ideas on multi-fidelity MES
(note that this implementation is somewhat different). This acquisition function
is similar to `qMultiFidelityMaxValueEntropy` but computes the information gain
from the lower bound described in [Moss2021gibbon].
The model must be single-outcome, unless using a PosteriorTransform.
The batch case `q > 1` is supported through cyclic optimization and fantasies.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> candidate_set = torch.rand(1000, bounds.size(1))
>>> candidate_set = bounds[0] + (bounds[1] - bounds[0]) * candidate_set
>>> MF_qGIBBON = qMultiFidelityLowerBoundMaxValueEntropy(model, candidate_set)
>>> mf_gibbon = MF_qGIBBON(test_X)
"""
def _compute_information_gain(
self, X: Tensor, mean_M: Tensor, variance_M: Tensor, covar_mM: Tensor
) -> Tensor:
r"""Compute GIBBON's approximation of information gain at the design points `X`.
When using GIBBON for batch optimization (i.e `q > 1`), we calculate the
additional information provided by adding a new candidate point to the current
batch of design points (`X_pending`), rather than calculating the information
provided by the whole batch. This allows a modest computational saving.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design point each.
mean_M: A `batch_shape x 1`-dim Tensor of means.
variance_M: A `batch_shape x 1`-dim Tensor of variances
consisting of `batch_shape` t-batches with `num_fantasies` fantasies.
covar_mM: A `batch_shape x num_fantasies x (1 + num_trace_observations)`
-dim Tensor of covariances.
Returns:
A `num_fantasies x batch_shape`-dim Tensor of information gains at the
given design points `X`.
"""
return qLowerBoundMaxValueEntropy._compute_information_gain(
self, X=X, mean_M=mean_M, variance_M=variance_M, covar_mM=covar_mM
)
def _sample_max_value_Thompson(
model: Model,
candidate_set: Tensor,
num_samples: int,
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
) -> Tensor:
"""Samples the max values by discrete Thompson sampling.
Should generally be called within a `with torch.no_grad()` context.
Args:
model: A fitted single-outcome model.
candidate_set: A `n x d` Tensor including `n` candidate points to
discretize the design space.
num_samples: Number of max value samples.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
Returns:
A `num_samples x num_fantasies` Tensor of posterior max value samples.
"""
posterior = model.posterior(candidate_set, posterior_transform=posterior_transform)
weight = 1.0 if maximize else -1.0
samples = weight * posterior.rsample(torch.Size([num_samples])).squeeze(-1)
# samples is num_samples x (num_fantasies) x n
max_values, _ = samples.max(dim=-1)
if len(samples.shape) == 2:
max_values = max_values.unsqueeze(-1) # num_samples x num_fantasies
return max_values
def _sample_max_value_Gumbel(
model: Model,
candidate_set: Tensor,
num_samples: int,
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
) -> Tensor:
"""Samples the max values by Gumbel approximation.
Should generally be called within a `with torch.no_grad()` context.
Args:
model: A fitted single-outcome model.
candidate_set: A `n x d` Tensor including `n` candidate points to
discretize the design space.
num_samples: Number of max value samples.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
Returns:
A `num_samples x num_fantasies` Tensor of posterior max value samples.
"""
# define the approximate CDF for the max value under the independence assumption
posterior = model.posterior(candidate_set, posterior_transform=posterior_transform)
weight = 1.0 if maximize else -1.0
mu = weight * posterior.mean
sigma = posterior.variance.clamp_min(1e-8).sqrt()
# mu, sigma is (num_fantasies) X n X 1
if len(mu.shape) == 3 and mu.shape[-1] == 1:
mu = mu.squeeze(-1).T
sigma = sigma.squeeze(-1).T
# mu, sigma is now n X num_fantasies or n X 1
# bisect search to find the quantiles 25, 50, 75
lo = (mu - 3 * sigma).min(dim=0).values
hi = (mu + 5 * sigma).max(dim=0).values
num_fantasies = mu.shape[1]
device = candidate_set.device
dtype = candidate_set.dtype
quantiles = torch.zeros(num_fantasies, 3, device=device, dtype=dtype)
for i in range(num_fantasies):
lo_, hi_ = lo[i], hi[i]
N = norm(mu[:, i].cpu().numpy(), sigma[:, i].cpu().numpy())
quantiles[i, :] = torch.tensor(
[
brentq(lambda y: np.exp(np.sum(N.logcdf(y))) - p, lo_, hi_)
for p in [0.25, 0.50, 0.75]
]
)
q25, q50, q75 = quantiles[:, 0], quantiles[:, 1], quantiles[:, 2]
# q25, q50, q75 are 1 dimensional tensor with size of either 1 or num_fantasies
# parameter fitting based on matching percentiles for the Gumbel distribution
b = (q25 - q75) / (log(log(4.0 / 3.0)) - log(log(4.0)))
a = q50 + b * log(log(2.0))
# inverse sampling from the fitted Gumbel CDF distribution
sample_shape = (num_samples, num_fantasies)
eps = torch.rand(*sample_shape, device=device, dtype=dtype)
max_values = a - b * eps.log().mul(-1.0).log()
return max_values # num_samples x num_fantasies
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Abstract base module for all botorch acquisition functions."""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from typing import Optional
import torch
from botorch.exceptions import BotorchWarning
from botorch.models.model import Model, ModelDict
from botorch.posteriors.posterior import Posterior
from botorch.sampling.base import MCSampler
from botorch.sampling.get_sampler import get_sampler
from torch import Tensor
from torch.nn import Module
class AcquisitionFunction(Module, ABC):
r"""Abstract base class for acquisition functions.
Please note that if your acquisition requires a backwards call,
you will need to wrap the backwards call inside of an enable_grad
context to be able to optimize the acquisition. See #1164.
:meta private:
"""
def __init__(self, model: Model) -> None:
r"""Constructor for the AcquisitionFunction base class.
Args:
model: A fitted model.
"""
super().__init__()
self.model: Model = model
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
r"""Informs the acquisition function about pending design points.
Args:
X_pending: `n x d` Tensor with `n` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
if X_pending is not None:
if X_pending.requires_grad:
warnings.warn(
"Pending points require a gradient but the acquisition function"
" will not provide a gradient to these points.",
BotorchWarning,
)
self.X_pending = X_pending.detach().clone()
else:
self.X_pending = X_pending
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the acquisition function on the candidate set X.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of acquisition function values at the given
design points `X`.
"""
pass # pragma: no cover
class OneShotAcquisitionFunction(AcquisitionFunction, ABC):
r"""
Abstract base class for acquisition functions using one-shot optimization
:meta private:
"""
@abstractmethod
def get_augmented_q_batch_size(self, q: int) -> int:
r"""Get augmented q batch size for one-shot optimziation.
Args:
q: The number of candidates to consider jointly.
Returns:
The augmented size for one-shot optimization (including variables
parameterizing the fantasy solutions).
"""
pass # pragma: no cover
@abstractmethod
def extract_candidates(self, X_full: Tensor) -> Tensor:
r"""Extract the candidates from a full "one-shot" parameterization.
Args:
X_full: A `b x q_aug x d`-dim Tensor with `b` t-batches of `q_aug`
design points each.
Returns:
A `b x q x d`-dim Tensor with `b` t-batches of `q` design points each.
"""
pass # pragma: no cover
class MCSamplerMixin(ABC):
r"""A mix-in for adding sampler functionality into an acquisition function class.
Attributes:
_default_sample_shape: The `sample_shape` for the default sampler.
:meta private:
"""
_default_sample_shape = torch.Size([512])
def __init__(self, sampler: Optional[MCSampler] = None) -> None:
r"""Register the sampler on the acquisition function.
Args:
sampler: The sampler used to draw base samples for MC-based acquisition
functions. If `None`, a sampler is generated on the fly within
the `get_posterior_samples` method using `get_sampler`.
"""
self.sampler = sampler
def get_posterior_samples(self, posterior: Posterior) -> Tensor:
r"""Sample from the posterior using the sampler.
Args:
posterior: The posterior to sample from.
"""
if self.sampler is None:
self.sampler = get_sampler(
posterior=posterior, sample_shape=self._default_sample_shape
)
return self.sampler(posterior=posterior)
@property
def sample_shape(self) -> torch.Size:
return (
self.sampler.sample_shape
if self.sampler is not None
else self._default_sample_shape
)
class MultiModelAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for acquisition functions that require
multiple types of models.
The intended use case for these acquisition functions are those
where we have multiple models, each serving a distinct purpose.
As an example, we can have a "regression" model that predicts
one or more outcomes, and a "classification" model that predicts
the probabilty that a given parameterization is feasible. The
multi-model acquisition function can then weight the acquisition
value computed with the "regression" model with the feasibility
value predicted by the "classification" model to produce the
composite acquisition value.
This is currently only a placeholder to help with some development
in Ax. We plan to add some acquisition functions utilizing multiple
models in the future.
:meta private:
"""
def __init__(self, model_dict: ModelDict) -> None:
r"""Constructor for the MultiModelAcquisitionFunction base class.
Args:
model_dict: A ModelDict mapping labels to models.
"""
super(AcquisitionFunction, self).__init__()
self.model_dict: ModelDict = model_dict
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Batch implementations of the LogEI family of improvements-based acquisition functions.
"""
from __future__ import annotations
from copy import deepcopy
from functools import partial
from typing import Any, Callable, List, Optional, Tuple, TypeVar, Union
import torch
from botorch.acquisition.cached_cholesky import CachedCholeskyMCAcquisitionFunction
from botorch.acquisition.monte_carlo import SampleReducingMCAcquisitionFunction
from botorch.acquisition.objective import (
ConstrainedMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.acquisition.utils import (
compute_best_feasible_objective,
prune_inferior_points,
)
from botorch.exceptions.errors import BotorchError
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.utils.safe_math import (
fatmax,
log_fatplus,
log_softplus,
logmeanexp,
smooth_amax,
)
from botorch.utils.transforms import match_batch_shape
from torch import Tensor
"""
NOTE: On the default temperature parameters:
tau_relu: It is generally important to set `tau_relu` to be very small, in particular,
smaller than the expected improvement value. Otherwise, the optimization can stagnate.
By setting `tau_relu=1e-6` by default, stagnation is exceedingly unlikely to occur due
to the smooth ReLU approximation for practical applications of BO.
IDEA: We could consider shrinking `tau_relu` with the progression of the optimization.
tau_max: This is only relevant for the batch (`q > 1`) case, and `tau_max=1e-2` is
sufficient to get a good approximation to the maximum improvement in the batch of
candidates. If `fat=False`, the smooth approximation to the maximum can saturate
numerically. It is therefore recommended to use `fat=True` when optimizing batches
of `q > 1` points.
"""
TAU_RELU = 1e-6
TAU_MAX = 1e-2
FloatOrTensor = TypeVar("FloatOrTensor", float, Tensor)
class LogImprovementMCAcquisitionFunction(SampleReducingMCAcquisitionFunction):
r"""
Abstract base class for Monte-Carlo-based batch LogEI acquisition functions.
:meta private:
"""
_log: bool = True
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
fat: bool = True,
tau_max: float = TAU_MAX,
) -> None:
r"""
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. If not given,
a sampler is generated using `get_sampler`.
NOTE: For posteriors that do not support base samples,
a sampler compatible with intended use case must be provided.
See `ForkedRNGSampler` and `StochasticSampler` as examples.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape, m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are satisfied if `constraint(samples) < 0`.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. See the docs of
`compute_(log_)constraint_indicator` for more details on this parameter.
fat: Toggles the logarithmic / linear asymptotic behavior of the smooth
approximation to the ReLU.
tau_max: Temperature parameter controlling the sharpness of the
approximation to the `max` operator over the `q` candidate points.
"""
if isinstance(objective, ConstrainedMCObjective):
raise BotorchError(
"Log-Improvement should not be used with `ConstrainedMCObjective`."
"Please pass the `constraints` directly to the constructor of the "
"acquisition function."
)
q_reduction = partial(fatmax if fat else smooth_amax, tau=tau_max)
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
sample_reduction=logmeanexp,
q_reduction=q_reduction,
constraints=constraints,
eta=eta,
fat=fat,
)
self.tau_max = tau_max
class qLogExpectedImprovement(LogImprovementMCAcquisitionFunction):
r"""MC-based batch Log Expected Improvement.
This computes qLogEI by
(1) sampling the joint posterior over q points,
(2) evaluating the smoothed log improvement over the current best for each sample,
(3) smoothly maximizing over q, and
(4) averaging over the samples in log space.
`qLogEI(X) ~ log(qEI(X)) = log(E(max(max Y - best_f, 0)))`,
where `Y ~ f(X)`, and `X = (x_1,...,x_q)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1024)
>>> qLogEI = qLogExpectedImprovement(model, best_f, sampler)
>>> qei = qLogEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
fat: bool = True,
tau_max: float = TAU_MAX,
tau_relu: float = TAU_RELU,
) -> None:
r"""q-Log Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless). Can be
a `batch_shape`-shaped tensor, which in case of a batched model
specifies potentially different values for each element of the batch.
sampler: The sampler used to draw base samples. See `MCAcquisitionFunction`
more details.
objective: The MCAcquisitionObjective under which the samples are evaluated.
Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into `X` upon forward call. Copied and set to have no
gradient.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are satisfied if `constraint(samples) < 0`.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. See the docs of
`compute_(log_)smoothed_constraint_indicator` for details.
fat: Toggles the logarithmic / linear asymptotic behavior of the smooth
approximation to the ReLU.
tau_max: Temperature parameter controlling the sharpness of the smooth
approximations to max.
tau_relu: Temperature parameter controlling the sharpness of the smooth
approximations to ReLU.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
tau_max=check_tau(tau_max, name="tau_max"),
fat=fat,
)
self.register_buffer("best_f", torch.as_tensor(best_f))
self.tau_relu = check_tau(tau_relu, name="tau_relu")
def _sample_forward(self, obj: Tensor) -> Tensor:
r"""Evaluate qLogExpectedImprovement on the candidate set `X`.
Args:
obj: `mc_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `mc_shape x batch_shape x q`-dim Tensor of expected improvement values.
"""
li = _log_improvement(
Y=obj,
best_f=self.best_f,
tau=self.tau_relu,
fat=self._fat,
)
return li
class qLogNoisyExpectedImprovement(
LogImprovementMCAcquisitionFunction, CachedCholeskyMCAcquisitionFunction
):
r"""MC-based batch Log Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. A smooth approximation
to the canonical improvement over previously observed points is computed
for each sample and the logarithm of the average is returned.
`qLogNEI(X) ~ log(qNEI(X)) = Log E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qLogNEI = qLogNoisyExpectedImprovement(model, train_X, sampler)
>>> acqval = qLogNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Union[Tensor, float] = 1e-3,
fat: bool = True,
prune_baseline: bool = False,
cache_root: bool = True,
tau_max: float = TAU_MAX,
tau_relu: float = TAU_RELU,
**kwargs: Any,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `batch_shape x r x d`-dim Tensor of `r` design points
that have already been observed. These points are considered as
the potential best design point.
sampler: The sampler used to draw base samples. See `MCAcquisitionFunction`
more details.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into `X` upon
forward call. Copied and set to have no gradient.
constraints: A list of constraint callables which map a Tensor of posterior
samples of dimension `sample_shape x batch-shape x q x m`-dim to a
`sample_shape x batch-shape x q`-dim Tensor. The associated constraints
are satisfied if `constraint(samples) < 0`.
eta: Temperature parameter(s) governing the smoothness of the sigmoid
approximation to the constraint indicators. See the docs of
`compute_(log_)smoothed_constraint_indicator` for details.
fat: Toggles the logarithmic / linear asymptotic behavior of the smooth
approximation to the ReLU.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
cache_root: A boolean indicating whether to cache the root
decomposition over `X_baseline` and use low-rank updates.
tau_max: Temperature parameter controlling the sharpness of the smooth
approximations to max.
tau_relu: Temperature parameter controlling the sharpness of the smooth
approximations to ReLU.
kwargs: Here for qNEI for compatibility.
TODO: similar to qNEHVI, when we are using sequential greedy candidate
selection, we could incorporate pending points X_baseline and compute
the incremental q(Log)NEI from the new point. This would greatly increase
efficiency for large batches.
"""
# TODO: separate out baseline variables initialization and other functions
# in qNEI to avoid duplication of both code and work at runtime.
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
constraints=constraints,
eta=eta,
fat=fat,
tau_max=tau_max,
)
self.tau_relu = tau_relu
self._init_baseline(
model=model,
X_baseline=X_baseline,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
prune_baseline=prune_baseline,
cache_root=cache_root,
**kwargs,
)
def _sample_forward(self, obj: Tensor) -> Tensor:
r"""Evaluate qLogNoisyExpectedImprovement per sample on the candidate set `X`.
Args:
obj: `mc_shape x batch_shape x q`-dim Tensor of MC objective values.
Returns:
A `sample_shape x batch_shape x q`-dim Tensor of log noisy expected smoothed
improvement values.
"""
return _log_improvement(
Y=obj,
best_f=self.compute_best_f(obj),
tau=self.tau_relu,
fat=self._fat,
)
def _init_baseline(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
prune_baseline: bool = False,
cache_root: bool = True,
**kwargs: Any,
) -> None:
# setup of CachedCholeskyMCAcquisitionFunction
self._setup(model=model, cache_root=cache_root)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model,
X=X_baseline,
objective=objective,
posterior_transform=posterior_transform,
marginalize_dim=kwargs.get("marginalize_dim"),
)
self.register_buffer("X_baseline", X_baseline)
# registering buffers for _get_samples_and_objectives in the next `if` block
self.register_buffer("baseline_samples", None)
self.register_buffer("baseline_obj", None)
if self._cache_root:
self.q_in = -1
# set baseline samples
with torch.no_grad(): # this is _get_samples_and_objectives(X_baseline)
posterior = self.model.posterior(
X_baseline, posterior_transform=self.posterior_transform
)
# Note: The root decomposition is cached in two different places. It
# may be confusing to have two different caches, but this is not
# trivial to change since each is needed for a different reason:
# - LinearOperator caching to `posterior.mvn` allows for reuse within
# this function, which may be helpful if the same root decomposition
# is produced by the calls to `self.base_sampler` and
# `self._cache_root_decomposition`.
# - self._baseline_L allows a root decomposition to be persisted outside
# this method.
self.baseline_samples = self.get_posterior_samples(posterior)
self.baseline_obj = self.objective(self.baseline_samples, X=X_baseline)
# We make a copy here because we will write an attribute `base_samples`
# to `self.base_sampler.base_samples`, and we don't want to mutate
# `self.sampler`.
self.base_sampler = deepcopy(self.sampler)
self.register_buffer(
"_baseline_best_f",
self._compute_best_feasible_objective(
samples=self.baseline_samples, obj=self.baseline_obj
),
)
self._baseline_L = self._compute_root_decomposition(posterior=posterior)
def compute_best_f(self, obj: Tensor) -> Tensor:
"""Computes the best (feasible) noisy objective value.
Args:
obj: `sample_shape x batch_shape x q`-dim Tensor of objectives in forward.
Returns:
A `sample_shape x batch_shape x 1`-dim Tensor of best feasible objectives.
"""
if self._cache_root:
val = self._baseline_best_f
else:
val = self._compute_best_feasible_objective(
samples=self.baseline_samples, obj=self.baseline_obj
)
# ensuring shape, dtype, device compatibility with obj
n_sample_dims = len(self.sample_shape)
view_shape = torch.Size(
[
*val.shape[:n_sample_dims], # sample dimensions
*(1,) * (obj.ndim - val.ndim), # pad to match obj
*val.shape[n_sample_dims:], # the rest
]
)
return val.view(view_shape).to(obj)
def _get_samples_and_objectives(self, X: Tensor) -> Tuple[Tensor, Tensor]:
r"""Compute samples at new points, using the cached root decomposition.
Args:
X: A `batch_shape x q x d`-dim tensor of inputs.
Returns:
A two-tuple `(samples, obj)`, where `samples` is a tensor of posterior
samples with shape `sample_shape x batch_shape x q x m`, and `obj` is a
tensor of MC objective values with shape `sample_shape x batch_shape x q`.
"""
n_baseline, q = self.X_baseline.shape[-2], X.shape[-2]
X_full = torch.cat([match_batch_shape(self.X_baseline, X), X], dim=-2)
# TODO: Implement more efficient way to compute posterior over both training and
# test points in GPyTorch (https://github.com/cornellius-gp/gpytorch/issues/567)
posterior = self.model.posterior(
X_full, posterior_transform=self.posterior_transform
)
if not self._cache_root:
samples_full = super().get_posterior_samples(posterior)
obj_full = self.objective(samples_full, X=X_full)
# assigning baseline buffers so `best_f` can be computed in _sample_forward
self.baseline_samples, samples = samples_full.split([n_baseline, q], dim=-2)
self.baseline_obj, obj = obj_full.split([n_baseline, q], dim=-1)
return samples, obj
# handle one-to-many input transforms
n_plus_q = X_full.shape[-2]
n_w = posterior._extended_shape()[-2] // n_plus_q
q_in = q * n_w
self._set_sampler(q_in=q_in, posterior=posterior)
samples = self._get_f_X_samples(posterior=posterior, q_in=q_in)
obj = self.objective(samples, X=X_full[..., -q:, :])
return samples, obj
def _compute_best_feasible_objective(self, samples: Tensor, obj: Tensor) -> Tensor:
return compute_best_feasible_objective(
samples=samples,
obj=obj,
constraints=self._constraints,
model=self.model,
objective=self.objective,
posterior_transform=self.posterior_transform,
X_baseline=self.X_baseline,
)
"""
###################################### utils ##########################################
"""
def _log_improvement(
Y: Tensor,
best_f: Tensor,
tau: Union[float, Tensor],
fat: bool,
) -> Tensor:
"""Computes the logarithm of the softplus-smoothed improvement, i.e.
`log_softplus(Y - best_f, beta=(1 / tau))`.
Note that softplus is an approximation to the regular ReLU objective whose maximum
pointwise approximation error is linear with respect to tau as tau goes to zero.
Args:
obj: `mc_samples x batch_shape x q`-dim Tensor of output samples.
best_f: Best previously observed objective value(s), broadcastable with `obj`.
tau: Temperature parameter for smooth approximation of ReLU.
as `tau -> 0`, maximum pointwise approximation error is linear w.r.t. `tau`.
fat: Toggles the logarithmic / linear asymptotic behavior of the
smooth approximation to ReLU.
Returns:
A `mc_samples x batch_shape x q`-dim Tensor of improvement values.
"""
log_soft_clamp = log_fatplus if fat else log_softplus
Z = Y - best_f.to(Y)
return log_soft_clamp(Z, tau=tau) # ~ ((Y - best_f) / Y_std).clamp(0)
def check_tau(tau: FloatOrTensor, name: str) -> FloatOrTensor:
"""Checks the validity of the tau arguments of the functions below, and returns
`tau` if it is valid."""
if isinstance(tau, Tensor) and tau.numel() != 1:
raise ValueError(name + f" is not a scalar: {tau.numel() = }.")
if not (tau > 0):
raise ValueError(name + f" is non-positive: {tau = }.")
return tau
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Analytic Acquisition Functions that evaluate the posterior without performing
Monte-Carlo sampling.
"""
from __future__ import annotations
import math
from abc import ABC
from contextlib import nullcontext
from copy import deepcopy
from typing import Dict, Optional, Tuple, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import PosteriorTransform
from botorch.exceptions import UnsupportedError
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.utils.constants import get_constants_like
from botorch.utils.probability import MVNXPB
from botorch.utils.probability.utils import (
log_ndtr as log_Phi,
log_phi,
log_prob_normal_in,
ndtr as Phi,
phi,
)
from botorch.utils.safe_math import log1mexp, logmeanexp
from botorch.utils.transforms import convert_to_target_pre_hook, t_batch_mode_transform
from torch import Tensor
from torch.nn.functional import pad
# the following two numbers are needed for _log_ei_helper
_neg_inv_sqrt2 = -(2**-0.5)
_log_sqrt_pi_div_2 = math.log(math.pi / 2) / 2
class AnalyticAcquisitionFunction(AcquisitionFunction, ABC):
r"""
Base class for analytic acquisition functions.
:meta private:
"""
def __init__(
self,
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
) -> None:
r"""Base constructor for analytic acquisition functions.
Args:
model: A fitted single-outcome model.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
"""
super().__init__(model=model)
if posterior_transform is None:
if model.num_outputs != 1:
raise UnsupportedError(
"Must specify a posterior transform when using a "
"multi-output model."
)
else:
if not isinstance(posterior_transform, PosteriorTransform):
raise UnsupportedError(
"AnalyticAcquisitionFunctions only support PosteriorTransforms."
)
self.posterior_transform = posterior_transform
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
raise UnsupportedError(
"Analytic acquisition functions do not account for X_pending yet."
)
def _mean_and_sigma(
self, X: Tensor, compute_sigma: bool = True, min_var: float = 1e-12
) -> Tuple[Tensor, Optional[Tensor]]:
"""Computes the first and second moments of the model posterior.
Args:
X: `batch_shape x q x d`-dim Tensor of model inputs.
compute_sigma: Boolean indicating whether or not to compute the second
moment (default: True).
min_var: The minimum value the variance is clamped too. Should be positive.
Returns:
A tuple of tensors containing the first and second moments of the model
posterior. Removes the last two dimensions if they have size one. Only
returns a single tensor of means if compute_sigma is True.
"""
self.to(device=X.device) # ensures buffers / parameters are on the same device
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
mean = posterior.mean.squeeze(-2).squeeze(-1) # removing redundant dimensions
if not compute_sigma:
return mean, None
sigma = posterior.variance.clamp_min(min_var).sqrt().view(mean.shape)
return mean, sigma
class LogProbabilityOfImprovement(AnalyticAcquisitionFunction):
r"""Single-outcome Log Probability of Improvement.
Logarithm of the probability of improvement over the current best observed value,
computed using the analytic formula under a Normal posterior distribution. Only
supports the case of q=1. Requires the posterior to be Gaussian. The model must be
single-outcome.
The logarithm of the probability of improvement is numerically better behaved
than the original function, which can lead to significantly improved optimization
of the acquisition function. This is analogous to the common practice of optimizing
the *log* likelihood of a probabilistic model - rather the likelihood - for the
sake of maximium likelihood estimation.
`logPI(x) = log(P(y >= best_f)), y ~ f(x)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> LogPI = LogProbabilityOfImprovement(model, best_f=0.2)
>>> log_pi = LogPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
):
r"""Single-outcome Probability of Improvement.
Args:
model: A fitted single-outcome model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best function value observed so far (assumed noiseless).
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.register_buffer("best_f", torch.as_tensor(best_f))
self.maximize = maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the Log Probability of Improvement on the candidate set X.
Args:
X: A `(b1 x ... bk) x 1 x d`-dim batched tensor of `d`-dim design points.
Returns:
A `(b1 x ... bk)`-dim tensor of Log Probability of Improvement values at
the given design points `X`.
"""
mean, sigma = self._mean_and_sigma(X)
u = _scaled_improvement(mean, sigma, self.best_f, self.maximize)
return log_Phi(u)
class ProbabilityOfImprovement(AnalyticAcquisitionFunction):
r"""Single-outcome Probability of Improvement.
Probability of improvement over the current best observed value, computed
using the analytic formula under a Normal posterior distribution. Only
supports the case of q=1. Requires the posterior to be Gaussian. The model
must be single-outcome.
`PI(x) = P(y >= best_f), y ~ f(x)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> PI = ProbabilityOfImprovement(model, best_f=0.2)
>>> pi = PI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
):
r"""Single-outcome Probability of Improvement.
Args:
model: A fitted single-outcome model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best function value observed so far (assumed noiseless).
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.register_buffer("best_f", torch.as_tensor(best_f))
self.maximize = maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the Probability of Improvement on the candidate set X.
Args:
X: A `(b1 x ... bk) x 1 x d`-dim batched tensor of `d`-dim design points.
Returns:
A `(b1 x ... bk)`-dim tensor of Probability of Improvement values at the
given design points `X`.
"""
mean, sigma = self._mean_and_sigma(X)
u = _scaled_improvement(mean, sigma, self.best_f, self.maximize)
return Phi(u)
class qAnalyticProbabilityOfImprovement(AnalyticAcquisitionFunction):
r"""Approximate, single-outcome batch Probability of Improvement using MVNXPB.
This implementation uses MVNXPB, a bivariate conditioning algorithm for
approximating P(a <= Y <= b) for multivariate normal Y.
See [Trinh2015bivariate]_. This (analytic) approximate q-PI is given by
`approx-qPI(X) = P(max Y >= best_f) = 1 - P(Y < best_f), Y ~ f(X),
X = (x_1,...,x_q)`, where `P(Y < best_f)` is estimated using MVNXPB.
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
) -> None:
"""qPI using an analytic approximation.
Args:
model: A fitted single-outcome model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best function value observed so far (assumed noiseless).
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.maximize = maximize
if not torch.is_tensor(best_f):
best_f = torch.tensor(best_f)
self.register_buffer("best_f", best_f)
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
"""Evaluate approximate qPI on the candidate set X.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design
points each
Returns:
A `batch_shape`-dim Tensor of approximate Probability of Improvement values
at the given design points `X`, where `batch_shape'` is the broadcasted
batch shape of model and input `X`.
"""
self.best_f = self.best_f.to(X)
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
covariance = posterior.distribution.covariance_matrix
bounds = pad(
(self.best_f.unsqueeze(-1) - posterior.distribution.mean).unsqueeze(-1),
pad=(1, 0) if self.maximize else (0, 1),
value=-float("inf") if self.maximize else float("inf"),
)
# 1 - P(no improvement over best_f)
solver = MVNXPB(covariance_matrix=covariance, bounds=bounds)
return -solver.solve().expm1()
class ExpectedImprovement(AnalyticAcquisitionFunction):
r"""Single-outcome Expected Improvement (analytic).
Computes classic Expected Improvement over the current best observed value,
using the analytic formula for a Normal posterior distribution. Unlike the
MC-based acquisition functions, this relies on the posterior at single test
point being Gaussian (and require the posterior to implement `mean` and
`variance` properties). Only supports the case of `q=1`. The model must be
single-outcome.
`EI(x) = E(max(f(x) - best_f, 0)),`
where the expectation is taken over the value of stochastic function `f` at `x`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> EI = ExpectedImprovement(model, best_f=0.2)
>>> ei = EI(test_X)
NOTE: It is *strongly* recommended to use LogExpectedImprovement instead of regular
EI, because it solves the vanishing gradient problem by taking special care of
numerical computations and can lead to substantially improved BO performance.
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
):
r"""Single-outcome Expected Improvement (analytic).
Args:
model: A fitted single-outcome model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best function value observed so far (assumed noiseless).
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.register_buffer("best_f", torch.as_tensor(best_f))
self.maximize = maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate Expected Improvement on the candidate set X.
Args:
X: A `(b1 x ... bk) x 1 x d`-dim batched tensor of `d`-dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
Returns:
A `(b1 x ... bk)`-dim tensor of Expected Improvement values at the
given design points `X`.
"""
mean, sigma = self._mean_and_sigma(X)
u = _scaled_improvement(mean, sigma, self.best_f, self.maximize)
return sigma * _ei_helper(u)
class LogExpectedImprovement(AnalyticAcquisitionFunction):
r"""Logarithm of single-outcome Expected Improvement (analytic).
Computes the logarithm of the classic Expected Improvement acquisition function, in
a numerically robust manner. In particular, the implementation takes special care
to avoid numerical issues in the computation of the acquisition value and its
gradient in regions where improvement is predicted to be virtually impossible.
`LogEI(x) = log(E(max(f(x) - best_f, 0))),`
where the expectation is taken over the value of stochastic function `f` at `x`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> LogEI = LogExpectedImprovement(model, best_f=0.2)
>>> ei = LogEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
):
r"""Logarithm of single-outcome Expected Improvement (analytic).
Args:
model: A fitted single-outcome model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best function value observed so far (assumed noiseless).
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.register_buffer("best_f", torch.as_tensor(best_f))
self.maximize = maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate logarithm of Expected Improvement on the candidate set X.
Args:
X: A `(b1 x ... bk) x 1 x d`-dim batched tensor of `d`-dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
Returns:
A `(b1 x ... bk)`-dim tensor of the logarithm of the Expected Improvement
values at the given design points `X`.
"""
mean, sigma = self._mean_and_sigma(X)
u = _scaled_improvement(mean, sigma, self.best_f, self.maximize)
return _log_ei_helper(u) + sigma.log()
class LogConstrainedExpectedImprovement(AnalyticAcquisitionFunction):
r"""Log Constrained Expected Improvement (feasibility-weighted).
Computes the logarithm of the analytic expected improvement for a Normal posterior
distribution weighted by a probability of feasibility. The objective and
constraints are assumed to be independent and have Gaussian posterior
distributions. Only supports non-batch mode (i.e. `q=1`). The model should be
multi-outcome, with the index of the objective and constraints passed to
the constructor.
`LogConstrainedEI(x) = log(EI(x)) + Sum_i log(P(y_i \in [lower_i, upper_i]))`,
where `y_i ~ constraint_i(x)` and `lower_i`, `upper_i` are the lower and
upper bounds for the i-th constraint, respectively.
Example:
# example where the 0th output has a non-negativity constraint and
# the 1st output is the objective
>>> model = SingleTaskGP(train_X, train_Y)
>>> constraints = {0: (0.0, None)}
>>> LogCEI = LogConstrainedExpectedImprovement(model, 0.2, 1, constraints)
>>> cei = LogCEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
objective_index: int,
constraints: Dict[int, Tuple[Optional[float], Optional[float]]],
maximize: bool = True,
) -> None:
r"""Analytic Log Constrained Expected Improvement.
Args:
model: A fitted multi-output model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best feasible function value observed so far (assumed noiseless).
objective_index: The index of the objective.
constraints: A dictionary of the form `{i: [lower, upper]}`, where
`i` is the output index, and `lower` and `upper` are lower and upper
bounds on that output (resp. interpreted as -Inf / Inf if None)
maximize: If True, consider the problem a maximization problem.
"""
# Use AcquisitionFunction constructor to avoid check for posterior transform.
super(AnalyticAcquisitionFunction, self).__init__(model=model)
self.posterior_transform = None
self.maximize = maximize
self.objective_index = objective_index
self.constraints = constraints
self.register_buffer("best_f", torch.as_tensor(best_f))
_preprocess_constraint_bounds(self, constraints=constraints)
self.register_forward_pre_hook(convert_to_target_pre_hook)
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate Constrained Log Expected Improvement on the candidate set X.
Args:
X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design
points each.
Returns:
A `(b)`-dim Tensor of Log Expected Improvement values at the given
design points `X`.
"""
means, sigmas = self._mean_and_sigma(X) # (b) x 1 + (m = num constraints)
ind = self.objective_index
mean_obj, sigma_obj = means[..., ind], sigmas[..., ind]
u = _scaled_improvement(mean_obj, sigma_obj, self.best_f, self.maximize)
log_ei = _log_ei_helper(u) + sigma_obj.log()
log_prob_feas = _compute_log_prob_feas(self, means=means, sigmas=sigmas)
return log_ei + log_prob_feas
class ConstrainedExpectedImprovement(AnalyticAcquisitionFunction):
r"""Constrained Expected Improvement (feasibility-weighted).
Computes the analytic expected improvement for a Normal posterior
distribution, weighted by a probability of feasibility. The objective and
constraints are assumed to be independent and have Gaussian posterior
distributions. Only supports non-batch mode (i.e. `q=1`). The model should be
multi-outcome, with the index of the objective and constraints passed to
the constructor.
`Constrained_EI(x) = EI(x) * Product_i P(y_i \in [lower_i, upper_i])`,
where `y_i ~ constraint_i(x)` and `lower_i`, `upper_i` are the lower and
upper bounds for the i-th constraint, respectively.
Example:
# example where the 0th output has a non-negativity constraint and
# 1st output is the objective
>>> model = SingleTaskGP(train_X, train_Y)
>>> constraints = {0: (0.0, None)}
>>> cEI = ConstrainedExpectedImprovement(model, 0.2, 1, constraints)
>>> cei = cEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
objective_index: int,
constraints: Dict[int, Tuple[Optional[float], Optional[float]]],
maximize: bool = True,
) -> None:
r"""Analytic Constrained Expected Improvement.
Args:
model: A fitted multi-output model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best feasible function value observed so far (assumed noiseless).
objective_index: The index of the objective.
constraints: A dictionary of the form `{i: [lower, upper]}`, where
`i` is the output index, and `lower` and `upper` are lower and upper
bounds on that output (resp. interpreted as -Inf / Inf if None)
maximize: If True, consider the problem a maximization problem.
"""
# Use AcquisitionFunction constructor to avoid check for posterior transform.
super(AnalyticAcquisitionFunction, self).__init__(model=model)
self.posterior_transform = None
self.maximize = maximize
self.objective_index = objective_index
self.constraints = constraints
self.register_buffer("best_f", torch.as_tensor(best_f))
_preprocess_constraint_bounds(self, constraints=constraints)
self.register_forward_pre_hook(convert_to_target_pre_hook)
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate Constrained Expected Improvement on the candidate set X.
Args:
X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design
points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
means, sigmas = self._mean_and_sigma(X) # (b) x 1 + (m = num constraints)
ind = self.objective_index
mean_obj, sigma_obj = means[..., ind], sigmas[..., ind]
u = _scaled_improvement(mean_obj, sigma_obj, self.best_f, self.maximize)
ei = sigma_obj * _ei_helper(u)
log_prob_feas = _compute_log_prob_feas(self, means=means, sigmas=sigmas)
return ei.mul(log_prob_feas.exp())
class LogNoisyExpectedImprovement(AnalyticAcquisitionFunction):
r"""Single-outcome Log Noisy Expected Improvement (via fantasies).
This computes Log Noisy Expected Improvement by averaging over the Expected
Improvement values of a number of fantasy models. Only supports the case
`q=1`. Assumes that the posterior distribution of the model is Gaussian.
The model must be single-outcome.
`LogNEI(x) = log(E(max(y - max Y_base), 0))), (y, Y_base) ~ f((x, X_base))`,
where `X_base` are previously observed points.
Note: This acquisition function currently relies on using a FixedNoiseGP (required
for noiseless fantasies).
Example:
>>> model = FixedNoiseGP(train_X, train_Y, train_Yvar=train_Yvar)
>>> LogNEI = LogNoisyExpectedImprovement(model, train_X)
>>> nei = LogNEI(test_X)
"""
def __init__(
self,
model: GPyTorchModel,
X_observed: Tensor,
num_fantasies: int = 20,
maximize: bool = True,
posterior_transform: Optional[PosteriorTransform] = None,
) -> None:
r"""Single-outcome Noisy Log Expected Improvement (via fantasies).
Args:
model: A fitted single-outcome model.
X_observed: A `n x d` Tensor of observed points that are likely to
be the best observed points so far.
num_fantasies: The number of fantasies to generate. The higher this
number the more accurate the model (at the expense of model
complexity and performance).
maximize: If True, consider the problem a maximization problem.
"""
if not isinstance(model, FixedNoiseGP):
raise UnsupportedError(
"Only FixedNoiseGPs are currently supported for fantasy LogNEI"
)
# sample fantasies
from botorch.sampling.normal import SobolQMCNormalSampler
# Drop gradients from model.posterior if X_observed does not require gradients
# as otherwise, gradients of the GP's kernel's hyper-parameters are tracked
# through the rsample_from_base_sample method of GPyTorchPosterior. These
# gradients are usually only required w.r.t. the marginal likelihood.
with nullcontext() if X_observed.requires_grad else torch.no_grad():
posterior = model.posterior(X=X_observed)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([num_fantasies]))
Y_fantasized = sampler(posterior).squeeze(-1)
batch_X_observed = X_observed.expand(num_fantasies, *X_observed.shape)
# The fantasy model will operate in batch mode
fantasy_model = _get_noiseless_fantasy_model(
model=model, batch_X_observed=batch_X_observed, Y_fantasized=Y_fantasized
)
super().__init__(model=fantasy_model, posterior_transform=posterior_transform)
best_f, _ = Y_fantasized.max(dim=-1) if maximize else Y_fantasized.min(dim=-1)
self.best_f, self.maximize = best_f, maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate logarithm of the mean Expected Improvement on the candidate set X.
Args:
X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points.
Returns:
A `b1 x ... bk`-dim tensor of Log Noisy Expected Improvement values at
the given design points `X`.
"""
# add batch dimension for broadcasting to fantasy models
mean, sigma = self._mean_and_sigma(X.unsqueeze(-3))
u = _scaled_improvement(mean, sigma, self.best_f, self.maximize)
log_ei = _log_ei_helper(u) + sigma.log()
# this is mathematically - though not numerically - equivalent to log(mean(ei))
return logmeanexp(log_ei, dim=-1)
class NoisyExpectedImprovement(ExpectedImprovement):
r"""Single-outcome Noisy Expected Improvement (via fantasies).
This computes Noisy Expected Improvement by averaging over the Expected
Improvement values of a number of fantasy models. Only supports the case
`q=1`. Assumes that the posterior distribution of the model is Gaussian.
The model must be single-outcome.
`NEI(x) = E(max(y - max Y_baseline), 0)), (y, Y_baseline) ~ f((x, X_baseline))`,
where `X_baseline` are previously observed points.
Note: This acquisition function currently relies on using a FixedNoiseGP (required
for noiseless fantasies).
Example:
>>> model = FixedNoiseGP(train_X, train_Y, train_Yvar=train_Yvar)
>>> NEI = NoisyExpectedImprovement(model, train_X)
>>> nei = NEI(test_X)
"""
def __init__(
self,
model: GPyTorchModel,
X_observed: Tensor,
num_fantasies: int = 20,
maximize: bool = True,
) -> None:
r"""Single-outcome Noisy Expected Improvement (via fantasies).
Args:
model: A fitted single-outcome model.
X_observed: A `n x d` Tensor of observed points that are likely to
be the best observed points so far.
num_fantasies: The number of fantasies to generate. The higher this
number the more accurate the model (at the expense of model
complexity and performance).
maximize: If True, consider the problem a maximization problem.
"""
if not isinstance(model, FixedNoiseGP):
raise UnsupportedError(
"Only FixedNoiseGPs are currently supported for fantasy NEI"
)
# sample fantasies
from botorch.sampling.normal import SobolQMCNormalSampler
# Drop gradients from model.posterior if X_observed does not require gradients
# as otherwise, gradients of the GP's kernel's hyper-parameters are tracked
# through the rsample_from_base_sample method of GPyTorchPosterior. These
# gradients are usually only required w.r.t. the marginal likelihood.
with nullcontext() if X_observed.requires_grad else torch.no_grad():
posterior = model.posterior(X=X_observed)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([num_fantasies]))
Y_fantasized = sampler(posterior).squeeze(-1)
batch_X_observed = X_observed.expand(num_fantasies, *X_observed.shape)
# The fantasy model will operate in batch mode
fantasy_model = _get_noiseless_fantasy_model(
model=model, batch_X_observed=batch_X_observed, Y_fantasized=Y_fantasized
)
best_f, _ = Y_fantasized.max(dim=-1) if maximize else Y_fantasized.min(dim=-1)
super().__init__(model=fantasy_model, best_f=best_f, maximize=maximize)
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate Expected Improvement on the candidate set X.
Args:
X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points.
Returns:
A `b1 x ... bk`-dim tensor of Noisy Expected Improvement values at
the given design points `X`.
"""
# add batch dimension for broadcasting to fantasy models
mean, sigma = self._mean_and_sigma(X.unsqueeze(-3))
u = _scaled_improvement(mean, sigma, self.best_f, self.maximize)
return (sigma * _ei_helper(u)).mean(dim=-1)
class UpperConfidenceBound(AnalyticAcquisitionFunction):
r"""Single-outcome Upper Confidence Bound (UCB).
Analytic upper confidence bound that comprises of the posterior mean plus an
additional term: the posterior standard deviation weighted by a trade-off
parameter, `beta`. Only supports the case of `q=1` (i.e. greedy, non-batch
selection of design points). The model must be single-outcome.
`UCB(x) = mu(x) + sqrt(beta) * sigma(x)`, where `mu` and `sigma` are the
posterior mean and standard deviation, respectively.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> UCB = UpperConfidenceBound(model, beta=0.2)
>>> ucb = UCB(test_X)
"""
def __init__(
self,
model: Model,
beta: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
) -> None:
r"""Single-outcome Upper Confidence Bound.
Args:
model: A fitted single-outcome GP model (must be in batch mode if
candidate sets X will be)
beta: Either a scalar or a one-dim tensor with `b` elements (batch mode)
representing the trade-off parameter between mean and covariance
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.register_buffer("beta", torch.as_tensor(beta))
self.maximize = maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the Upper Confidence Bound on the candidate set X.
Args:
X: A `(b1 x ... bk) x 1 x d`-dim batched tensor of `d`-dim design points.
Returns:
A `(b1 x ... bk)`-dim tensor of Upper Confidence Bound values at the
given design points `X`.
"""
mean, sigma = self._mean_and_sigma(X)
return (mean if self.maximize else -mean) + self.beta.sqrt() * sigma
class PosteriorMean(AnalyticAcquisitionFunction):
r"""Single-outcome Posterior Mean.
Only supports the case of q=1. Requires the model's posterior to have a
`mean` property. The model must be single-outcome.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> PM = PosteriorMean(model)
>>> pm = PM(test_X)
"""
def __init__(
self,
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
maximize: bool = True,
) -> None:
r"""Single-outcome Posterior Mean.
Args:
model: A fitted single-outcome GP model (must be in batch mode if
candidate sets X will be)
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
maximize: If True, consider the problem a maximization problem. Note
that if `maximize=False`, the posterior mean is negated. As a
consequence `optimize_acqf(PosteriorMean(gp, maximize=False))`
actually returns -1 * minimum of the posterior mean.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.maximize = maximize
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the posterior mean on the candidate set X.
Args:
X: A `(b1 x ... bk) x 1 x d`-dim batched tensor of `d`-dim design points.
Returns:
A `(b1 x ... bk)`-dim tensor of Posterior Mean values at the
given design points `X`.
"""
mean, _ = self._mean_and_sigma(X, compute_sigma=False)
return mean if self.maximize else -mean
class ScalarizedPosteriorMean(AnalyticAcquisitionFunction):
r"""Scalarized Posterior Mean.
This acquisition function returns a scalarized (across the q-batch)
posterior mean given a vector of weights.
"""
def __init__(
self,
model: Model,
weights: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
) -> None:
r"""Scalarized Posterior Mean.
Args:
model: A fitted single-outcome model.
weights: A tensor of shape `q` for scalarization. In order to minimize
the scalarized posterior mean, pass -weights.
posterior_transform: A PosteriorTransform. If using a multi-output model,
a PosteriorTransform that transforms the multi-output posterior into a
single-output posterior is required.
"""
super().__init__(model=model, posterior_transform=posterior_transform)
self.register_buffer("weights", weights)
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the scalarized posterior mean on the candidate set X.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches of `d`-dim design
points each.
Returns:
A `(b)`-dim Tensor of Posterior Mean values at the given design
points `X`.
"""
return self._mean_and_sigma(X, compute_sigma=False)[0] @ self.weights
# --------------- Helper functions for analytic acquisition functions. ---------------
def _scaled_improvement(
mean: Tensor, sigma: Tensor, best_f: Tensor, maximize: bool
) -> Tensor:
"""Returns `u = (mean - best_f) / sigma`, -u if maximize == True."""
u = (mean - best_f) / sigma
return u if maximize else -u
def _ei_helper(u: Tensor) -> Tensor:
"""Computes phi(u) + u * Phi(u), where phi and Phi are the standard normal
pdf and cdf, respectively. This is used to compute Expected Improvement.
"""
return phi(u) + u * Phi(u)
def _log_ei_helper(u: Tensor) -> Tensor:
"""Accurately computes log(phi(u) + u * Phi(u)) in a differentiable manner for u in
[-10^100, 10^100] in double precision, and [-10^20, 10^20] in single precision.
Beyond these intervals, a basic squaring of u can lead to floating point overflow.
In contrast, the implementation in _ei_helper only yields usable gradients down to
u ~ -10. As a consequence, _log_ei_helper improves the range of inputs for which a
backward pass yields usable gradients by many orders of magnitude.
"""
if not (u.dtype == torch.float32 or u.dtype == torch.float64):
raise TypeError(
f"LogExpectedImprovement only supports torch.float32 and torch.float64 "
f"dtypes, but received {u.dtype = }."
)
# The function has two branching decisions. The first is u < bound, and in this
# case, just taking the logarithm of the naive _ei_helper implementation works.
bound = -1
u_upper = u.masked_fill(u < bound, bound) # mask u to avoid NaNs in gradients
log_ei_upper = _ei_helper(u_upper).log()
# When u <= bound, we need to be more careful and rearrange the EI formula as
# log(phi(u)) + log(1 - exp(w)), where w = log(abs(u) * Phi(u) / phi(u)).
# To this end, a second branch is necessary, depending on whether or not u is
# smaller than approximately the negative inverse square root of the machine
# precision. Below this point, numerical issues in computing log(1 - exp(w)) occur
# as w approches zero from below, even though the relative contribution to log_ei
# vanishes in machine precision at that point.
neg_inv_sqrt_eps = -1e6 if u.dtype == torch.float64 else -1e3
# mask u for to avoid NaNs in gradients in first and second branch
u_lower = u.masked_fill(u > bound, bound)
u_eps = u_lower.masked_fill(u < neg_inv_sqrt_eps, neg_inv_sqrt_eps)
# compute the logarithm of abs(u) * Phi(u) / phi(u) for moderately large negative u
w = _log_abs_u_Phi_div_phi(u_eps)
# 1) Now, we use a special implementation of log(1 - exp(w)) for moderately
# large negative numbers, and
# 2) capture the leading order of log(1 - exp(w)) for very large negative numbers.
# The second special case is technically only required for single precision numbers
# but does "the right thing" regardless.
log_ei_lower = log_phi(u) + (
torch.where(
u > neg_inv_sqrt_eps,
log1mexp(w),
# The contribution of the next term relative to log_phi vanishes when
# w_lower << eps but captures the leading order of the log1mexp term.
-2 * u_lower.abs().log(),
)
)
return torch.where(u > bound, log_ei_upper, log_ei_lower)
def _log_abs_u_Phi_div_phi(u: Tensor) -> Tensor:
"""Computes log(abs(u) * Phi(u) / phi(u)), where phi and Phi are the normal pdf
and cdf, respectively. The function is valid for u < 0.
NOTE: In single precision arithmetic, the function becomes numerically unstable for
u < -1e3. For this reason, a second branch in _log_ei_helper is necessary to handle
this regime, where this function approaches -abs(u)^-2 asymptotically.
The implementation is based on the following implementation of the logarithm of
the scaled complementary error function (i.e. erfcx). Since we only require the
positive branch for _log_ei_helper, _log_abs_u_Phi_div_phi does not have a branch,
but is only valid for u < 0 (so that _neg_inv_sqrt2 * u > 0).
def logerfcx(x: Tensor) -> Tensor:
return torch.where(
x < 0,
torch.erfc(x.masked_fill(x > 0, 0)).log() + x**2,
torch.special.erfcx(x.masked_fill(x < 0, 0)).log(),
)
Further, it is important for numerical accuracy to move u.abs() into the
logarithm, rather than adding u.abs().log() to logerfcx. This is the reason
for the rather complex name of this function: _log_abs_u_Phi_div_phi.
"""
# get_constants_like allocates tensors with the appropriate dtype and device and
# caches the result, which improves efficiency.
a, b = get_constants_like(values=(_neg_inv_sqrt2, _log_sqrt_pi_div_2), ref=u)
return torch.log(torch.special.erfcx(a * u) * u.abs()) + b
def _get_noiseless_fantasy_model(
model: FixedNoiseGP, batch_X_observed: Tensor, Y_fantasized: Tensor
) -> FixedNoiseGP:
r"""Construct a fantasy model from a fitted model and provided fantasies.
The fantasy model uses the hyperparameters from the original fitted model and
assumes the fantasies are noiseless.
Args:
model: a fitted FixedNoiseGP
batch_X_observed: A `b x n x d` tensor of inputs where `b` is the number of
fantasies.
Y_fantasized: A `b x n` tensor of fantasized targets where `b` is the number of
fantasies.
Returns:
The fantasy model.
"""
# initialize a copy of FixedNoiseGP on the original training inputs
# this makes FixedNoiseGP a non-batch GP, so that the same hyperparameters
# are used across all batches (by default, a GP with batched training data
# uses independent hyperparameters for each batch).
fantasy_model = FixedNoiseGP(
train_X=model.train_inputs[0],
train_Y=model.train_targets.unsqueeze(-1),
train_Yvar=model.likelihood.noise_covar.noise.unsqueeze(-1),
)
# update training inputs/targets to be batch mode fantasies
fantasy_model.set_train_data(
inputs=batch_X_observed, targets=Y_fantasized, strict=False
)
# use noiseless fantasies
fantasy_model.likelihood.noise_covar.noise = torch.full_like(Y_fantasized, 1e-7)
# load hyperparameters from original model
state_dict = deepcopy(model.state_dict())
fantasy_model.load_state_dict(state_dict)
return fantasy_model
def _preprocess_constraint_bounds(
acqf: Union[LogConstrainedExpectedImprovement, ConstrainedExpectedImprovement],
constraints: Dict[int, Tuple[Optional[float], Optional[float]]],
) -> None:
r"""Set up constraint bounds.
Args:
constraints: A dictionary of the form `{i: [lower, upper]}`, where
`i` is the output index, and `lower` and `upper` are lower and upper
bounds on that output (resp. interpreted as -Inf / Inf if None)
"""
con_lower, con_lower_inds = [], []
con_upper, con_upper_inds = [], []
con_both, con_both_inds = [], []
con_indices = list(constraints.keys())
if len(con_indices) == 0:
raise ValueError("There must be at least one constraint.")
if acqf.objective_index in con_indices:
raise ValueError(
"Output corresponding to objective should not be a constraint."
)
for k in con_indices:
if constraints[k][0] is not None and constraints[k][1] is not None:
if constraints[k][1] <= constraints[k][0]:
raise ValueError("Upper bound is less than the lower bound.")
con_both_inds.append(k)
con_both.append([constraints[k][0], constraints[k][1]])
elif constraints[k][0] is not None:
con_lower_inds.append(k)
con_lower.append(constraints[k][0])
elif constraints[k][1] is not None:
con_upper_inds.append(k)
con_upper.append(constraints[k][1])
# tensor-based indexing is much faster than list-based advanced indexing
for name, indices in [
("con_lower_inds", con_lower_inds),
("con_upper_inds", con_upper_inds),
("con_both_inds", con_both_inds),
("con_both", con_both),
("con_lower", con_lower),
("con_upper", con_upper),
]:
acqf.register_buffer(name, tensor=torch.as_tensor(indices))
def _compute_log_prob_feas(
acqf: Union[LogConstrainedExpectedImprovement, ConstrainedExpectedImprovement],
means: Tensor,
sigmas: Tensor,
) -> Tensor:
r"""Compute logarithm of the feasibility probability for each batch of X.
Args:
X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design
points each.
means: A `(b) x m`-dim Tensor of means.
sigmas: A `(b) x m`-dim Tensor of standard deviations.
Returns:
A `b`-dim tensor of log feasibility probabilities
Note: This function does case-work for upper bound, lower bound, and both-sided
bounds. Another way to do it would be to use 'inf' and -'inf' for the
one-sided bounds and use the logic for the both-sided case. But this
causes an issue with autograd since we get 0 * inf.
TODO: Investigate further.
"""
acqf.to(device=means.device)
log_prob = torch.zeros_like(means[..., 0])
if len(acqf.con_lower_inds) > 0:
i = acqf.con_lower_inds
dist_l = (acqf.con_lower - means[..., i]) / sigmas[..., i]
log_prob = log_prob + log_Phi(-dist_l).sum(dim=-1) # 1 - Phi(x) = Phi(-x)
if len(acqf.con_upper_inds) > 0:
i = acqf.con_upper_inds
dist_u = (acqf.con_upper - means[..., i]) / sigmas[..., i]
log_prob = log_prob + log_Phi(dist_u).sum(dim=-1)
if len(acqf.con_both_inds) > 0:
i = acqf.con_both_inds
con_lower, con_upper = acqf.con_both[:, 0], acqf.con_both[:, 1]
# scaled distance to lower and upper constraint boundary:
dist_l = (con_lower - means[..., i]) / sigmas[..., i]
dist_u = (con_upper - means[..., i]) / sigmas[..., i]
log_prob = log_prob + log_prob_normal_in(a=dist_l, b=dist_u).sum(dim=-1)
return log_prob
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Prior-Guided Acquisition Functions
References
.. [Hvarfner2022]
C. Hvarfner, D. Stoll, A. Souza, M. Lindauer, F. Hutter, L. Nardi. PiBO:
Augmenting Acquisition Functions with User Beliefs for Bayesian Optimization.
ICLR 2022.
"""
from __future__ import annotations
from typing import Optional
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.monte_carlo import SampleReducingMCAcquisitionFunction
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
from torch.nn import Module
class PriorGuidedAcquisitionFunction(AcquisitionFunction):
r"""Class for weighting acquisition functions by a prior distribution.
Supports MC and batch acquisition functions via
SampleReducingAcquisitionFunction.
See [Hvarfner2022]_ for details.
"""
def __init__(
self,
acq_function: AcquisitionFunction,
prior_module: Module,
log: bool = False,
prior_exponent: float = 1.0,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Initialize the prior-guided acquisition function.
Args:
acq_function: The base acquisition function.
prior_module: A Module that computes the probability
(or log probability) for the provided inputs.
`prior_module.forward` should take a `batch_shape x q`-dim
tensor of inputs and return a `batch_shape x q`-dim tensor
of probabilities.
log: A boolean that should be true if the acquisition function emits a
log-transformed value and the prior module emits a log probability.
prior_exponent: The exponent applied to the prior. This can be used
for example to decay the effect the prior over time as in
[Hvarfner2022]_.
X_pending: `n x d` Tensor with `n` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
super().__init__(model=acq_function.model)
self.acq_func = acq_function
self.prior_module = prior_module
self._log = log
self._prior_exponent = prior_exponent
self._is_sample_reducing_af = isinstance(
acq_function, SampleReducingMCAcquisitionFunction
)
self.set_X_pending(X_pending=X_pending)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Compute the acquisition function weighted by the prior."""
# batch_shape x q
prior = self.prior_module(X)
if self._is_sample_reducing_af:
# sample_shape x batch_shape x q
af_val = self.acq_func._non_reduced_forward(X)
else:
if prior.shape[-1] > 1:
raise NotImplementedError(
"q-batches with q>1 are only supported using "
"SampleReducingMCAcquisitionFunction."
)
# batch_shape x q
af_val = self.acq_func(X).unsqueeze(-1)
if self._log:
weighted_af_val = af_val + prior * self._prior_exponent
else:
weighted_af_val = af_val * prior.pow(self._prior_exponent)
if self._is_sample_reducing_af:
return self.acq_func._sample_reduction(
self.acq_func._q_reduction(weighted_af_val)
)
return weighted_af_val.squeeze(-1) # squeeze q-dim
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Abstract base module for decoupled acquisition functions."""
from __future__ import annotations
import warnings
from abc import ABC
from typing import Optional
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.exceptions import BotorchWarning
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.logging import shape_to_str
from botorch.models.model import ModelList
from torch import Tensor
class DecoupledAcquisitionFunction(AcquisitionFunction, ABC):
"""
Abstract base class for decoupled acquisition functions.
A decoupled acquisition function where one may intend to
evaluate a design on only a subset of the outcomes.
Typically this would be handled by fantasizing, where one
would fantasize as to what the partial observation would
be if one were to evaluate a design on the subset of
outcomes (e.g. you only fantasize at those outcomes). The
`X_evaluation_mask` specifies which outcomes should be
evaluated for each design. `X_evaluation_mask` is `q x m`,
where there are q design points in the batch and m outcomes.
In the asynchronous case, where there are n' pending points,
we need to track which outcomes each pending point should be
evaluated on. In this case, we concatenate
`X_pending_evaluation_mask` with `X_evaluation_mask` to obtain
the full evaluation_mask.
This abstract class handles generating and updating an evaluation mask,
which is a boolean tensor indicating which outcomes a given design is
being evaluated on. The evaluation mask has shape `(n' + q) x m`, where
n' is the number of pending points and the q represents the new
candidates to be generated.
If `X(_pending)_evaluation_mas`k is None, it is assumed that `X(_pending)`
will be evaluated on all outcomes.
"""
def __init__(
self, model: ModelList, X_evaluation_mask: Optional[Tensor] = None, **kwargs
) -> None:
r"""Initialize.
Args:
model: A model
X_evaluation_mask: A `q x m`-dim boolean tensor
indicating which outcomes the decoupled acquisition
function should generate new candidates for.
"""
if not isinstance(model, ModelList):
raise ValueError(f"{self.__class__.__name__} requires using a ModelList.")
super().__init__(model=model, **kwargs)
self.num_outputs = model.num_outputs
self.X_evaluation_mask = X_evaluation_mask
self.X_pending_evaluation_mask = None
self.X_pending = None
@property
def X_evaluation_mask(self) -> Optional[Tensor]:
r"""Get the evaluation indices for the new candidate."""
return self._X_evaluation_mask
@X_evaluation_mask.setter
def X_evaluation_mask(self, X_evaluation_mask: Optional[Tensor] = None) -> None:
r"""Set the evaluation indices for the new candidate."""
if X_evaluation_mask is not None:
# TODO: Add batch support
if (
X_evaluation_mask.ndim != 2
or X_evaluation_mask.shape[-1] != self.num_outputs
):
raise BotorchTensorDimensionError(
"Expected X_evaluation_mask to be `q x m`, but got shape"
f" {shape_to_str(X_evaluation_mask.shape)}."
)
self._X_evaluation_mask = X_evaluation_mask
def set_X_pending(
self,
X_pending: Optional[Tensor] = None,
X_pending_evaluation_mask: Optional[Tensor] = None,
) -> None:
r"""Informs the AF about pending design points for different outcomes.
Args:
X_pending: A `n' x d` Tensor with `n'` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
X_pending_evaluation_mask: A `n' x m`-dim tensor of booleans indicating
for which outputs the pending point is being evaluated on. If
`X_pending_evaluation_mask` is `None`, it is assumed that
`X_pending` will be evaluated on all outcomes.
"""
if X_pending is not None:
if X_pending.requires_grad:
warnings.warn(
"Pending points require a gradient but the acquisition function"
" will not provide a gradient to these points.",
BotorchWarning,
)
self.X_pending = X_pending.detach().clone()
if X_pending_evaluation_mask is not None:
if (
X_pending_evaluation_mask.ndim != 2
or X_pending_evaluation_mask.shape[0] != X_pending.shape[0]
or X_pending_evaluation_mask.shape[1] != self.num_outputs
):
raise BotorchTensorDimensionError(
f"Expected `X_pending_evaluation_mask` of shape "
f"`{X_pending.shape[0]} x {self.num_outputs}`, but "
f"got {shape_to_str(X_pending_evaluation_mask.shape)}."
)
self.X_pending_evaluation_mask = X_pending_evaluation_mask
elif self.X_evaluation_mask is not None:
raise ValueError(
"If `self.X_evaluation_mask` is not None, then "
"`X_pending_evaluation_mask` must be provided."
)
else:
self.X_pending = X_pending
self.X_pending_evaluation_mask = X_pending_evaluation_mask
def construct_evaluation_mask(self, X: Tensor) -> Optional[Tensor]:
r"""Construct the boolean evaluation mask for X and X_pending
Args:
X: A `batch_shape x n x d`-dim tensor of designs.
Returns:
A `n + n' x m`-dim tensor of booleans indicating
which outputs should be evaluated.
"""
if self.X_pending_evaluation_mask is not None:
X_evaluation_mask = self.X_evaluation_mask
if X_evaluation_mask is None:
# evaluate all objectives for X
X_evaluation_mask = torch.ones(
X.shape[-2], self.num_outputs, dtype=torch.bool, device=X.device
)
elif X_evaluation_mask.shape[0] != X.shape[-2]:
raise BotorchTensorDimensionError(
"Expected the -2 dimension of X and X_evaluation_mask to match."
)
# construct mask for X
return torch.cat(
[X_evaluation_mask, self.X_pending_evaluation_mask], dim=-2
)
return self.X_evaluation_mask
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
from abc import abstractmethod
from typing import List, Optional
import torch
from botorch.acquisition.objective import GenericMCObjective, MCAcquisitionObjective
from botorch.exceptions.errors import BotorchError, BotorchTensorDimensionError
from botorch.models.model import Model
from botorch.posteriors import GPyTorchPosterior
from botorch.utils import apply_constraints
from botorch.utils.transforms import normalize_indices
from torch import Tensor
class MCMultiOutputObjective(MCAcquisitionObjective):
r"""Abstract base class for MC multi-output objectives.
Args:
_is_mo: A boolean denoting whether the objectives are multi-output.
"""
_is_mo: bool = True
@abstractmethod
def forward(self, samples: Tensor, X: Optional[Tensor] = None, **kwargs) -> Tensor:
r"""Evaluate the multi-output objective on the samples.
Args:
samples: A `sample_shape x batch_shape x q x m`-dim Tensors of samples from
a model posterior.
X: A `batch_shape x q x d`-dim Tensors of inputs.
Returns:
A `sample_shape x batch_shape x q x m'`-dim Tensor of objective values with
`m'` the output dimension. This assumes maximization in each output
dimension).
This method is usually not called directly, but via the objectives.
Example:
>>> # `__call__` method:
>>> samples = sampler(posterior)
>>> outcomes = multi_obj(samples)
"""
pass # pragma: no cover
class GenericMCMultiOutputObjective(GenericMCObjective, MCMultiOutputObjective):
r"""Multi-output objective generated from a generic callable.
Allows to construct arbitrary MC-objective functions from a generic
callable. In order to be able to use gradient-based acquisition function
optimization it should be possible to backpropagate through the callable.
"""
pass
class IdentityMCMultiOutputObjective(MCMultiOutputObjective):
r"""Trivial objective that returns the unaltered samples.
Example:
>>> identity_objective = IdentityMCMultiOutputObjective()
>>> samples = sampler(posterior)
>>> objective = identity_objective(samples)
"""
def __init__(
self, outcomes: Optional[List[int]] = None, num_outcomes: Optional[int] = None
) -> None:
r"""Initialize Objective.
Args:
outcomes: A list of the `m'` indices that the weights should be
applied to.
num_outcomes: The total number of outcomes `m`
"""
super().__init__()
if outcomes is not None:
if len(outcomes) < 2:
raise BotorchTensorDimensionError(
"Must specify at least two outcomes for MOO."
)
if any(i < 0 for i in outcomes):
if num_outcomes is None:
raise BotorchError(
"num_outcomes is required if any outcomes are less than 0."
)
outcomes = normalize_indices(outcomes, num_outcomes)
self.register_buffer("outcomes", torch.tensor(outcomes, dtype=torch.long))
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
if hasattr(self, "outcomes"):
return samples.index_select(-1, self.outcomes.to(device=samples.device))
return samples
class WeightedMCMultiOutputObjective(IdentityMCMultiOutputObjective):
r"""Objective that reweights samples by given weights vector.
Example:
>>> weights = torch.tensor([1.0, -1.0])
>>> weighted_objective = WeightedMCMultiOutputObjective(weights)
>>> samples = sampler(posterior)
>>> objective = weighted_objective(samples)
"""
def __init__(
self,
weights: Tensor,
outcomes: Optional[List[int]] = None,
num_outcomes: Optional[int] = None,
) -> None:
r"""Initialize Objective.
Args:
weights: `m'`-dim tensor of outcome weights.
outcomes: A list of the `m'` indices that the weights should be
applied to.
num_outcomes: the total number of outcomes `m`
"""
super().__init__(outcomes=outcomes, num_outcomes=num_outcomes)
if weights.ndim != 1:
raise BotorchTensorDimensionError(
f"weights must be an 1-D tensor, but got {weights.shape}."
)
elif outcomes is not None and weights.shape[0] != len(outcomes):
raise BotorchTensorDimensionError(
"weights must contain the same number of elements as outcomes, "
f"but got {weights.numel()} weights and {len(outcomes)} outcomes."
)
self.register_buffer("weights", weights)
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
samples = super().forward(samples=samples)
return samples * self.weights.to(samples)
class FeasibilityWeightedMCMultiOutputObjective(MCMultiOutputObjective):
def __init__(
self,
model: Model,
X_baseline: Tensor,
constraint_idcs: List[int],
objective: Optional[MCMultiOutputObjective] = None,
) -> None:
r"""Construct a feasibility weighted objective.
This applies feasibility weighting before calculating the objective value.
Defaults to identity if no constraints or objective is present.
NOTE: By passing in a single-output `MCAcquisitionObjective` as the `objective`,
this can be used as a single-output `MCAcquisitionObjective` as well.
Args:
model: A fitted Model.
X_baseline: An `n x d`-dim tensor of points already observed.
constraint_idcs: The outcome indices of the constraints. Constraints are
handled by weighting the samples according to a sigmoid approximation
of feasibility. A positive constraint outcome implies feasibility.
objective: An optional objective to apply after feasibility-weighting
the samples.
"""
super().__init__()
num_outputs = model.num_outputs
# Get the non-negative indices.
constraint_idcs = [
num_outputs + idx if idx < 0 else idx for idx in constraint_idcs
]
if len(constraint_idcs) != len(set(constraint_idcs)):
raise ValueError("Received duplicate entries for `constraint_idcs`.")
# Extract the indices for objective outcomes.
objective_idcs = [i for i in range(num_outputs) if i not in constraint_idcs]
if len(constraint_idcs) > 0:
# Import locally to avoid circular import.
from botorch.acquisition.utils import get_infeasible_cost
inf_cost = get_infeasible_cost(
X=X_baseline, model=model, objective=lambda y, X: y
)[objective_idcs]
def apply_feasibility_weights(
Y: Tensor, X: Optional[Tensor] = None
) -> Tensor:
return apply_constraints(
obj=Y[..., objective_idcs],
constraints=[lambda Y: -Y[..., i] for i in constraint_idcs],
samples=Y,
# This ensures that the dtype/device is set properly.
infeasible_cost=inf_cost.to(Y),
)
self.apply_feasibility_weights = apply_feasibility_weights
else:
self.apply_feasibility_weights = lambda Y: Y
if objective is None:
self.objective = lambda Y, X: Y
else:
self.objective = objective
self._verify_output_shape = objective._verify_output_shape
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
return self.objective(self.apply_feasibility_weights(samples), X=X)
class UnstandardizeMCMultiOutputObjective(IdentityMCMultiOutputObjective):
r"""Objective that unstandardizes the samples.
TODO: remove this when MultiTask models support outcome transforms.
Example:
>>> unstd_objective = UnstandardizeMCMultiOutputObjective(Y_mean, Y_std)
>>> samples = sampler(posterior)
>>> objective = unstd_objective(samples)
"""
def __init__(
self, Y_mean: Tensor, Y_std: Tensor, outcomes: Optional[List[int]] = None
) -> None:
r"""Initialize objective.
Args:
Y_mean: `m`-dim tensor of outcome means.
Y_std: `m`-dim tensor of outcome standard deviations.
outcomes: A list of `m' <= m` indices that specifies which of the `m` model
outputs should be considered as the outcomes for MOO. If omitted, use
all model outcomes. Typically used for constrained optimization.
"""
if Y_mean.ndim > 1 or Y_std.ndim > 1:
raise BotorchTensorDimensionError(
"Y_mean and Y_std must both be 1-dimensional, but got "
f"{Y_mean.ndim} and {Y_std.ndim}"
)
elif outcomes is not None and len(outcomes) > Y_mean.shape[-1]:
raise BotorchTensorDimensionError(
f"Cannot specify more ({len(outcomes)}) outcomes than are present in "
f"the normalization inputs ({Y_mean.shape[-1]})."
)
super().__init__(outcomes=outcomes, num_outcomes=Y_mean.shape[-1])
if outcomes is not None:
Y_mean = Y_mean.index_select(-1, self.outcomes.to(Y_mean.device))
Y_std = Y_std.index_select(-1, self.outcomes.to(Y_mean.device))
self.register_buffer("Y_mean", Y_mean)
self.register_buffer("Y_std", Y_std)
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
samples = super().forward(samples=samples)
return samples * self.Y_std + self.Y_mean
class AnalyticMultiOutputObjective(torch.nn.Module):
r"""Abstract base class for multi-output analyic objectives.
DEPRECATED - This will be removed in the next version.
"""
def __init__(self, *args, **kwargs) -> None:
"""Initialize objective."""
warnings.warn("AnalyticMultiOutputObjective is deprecated.", DeprecationWarning)
super().__init__(*args, **kwargs)
class IdentityAnalyticMultiOutputObjective(AnalyticMultiOutputObjective):
"""DEPRECATED - This will be removed in the next version."""
def __init__(self):
"""Initialize objective."""
super().__init__()
def forward(self, posterior: GPyTorchPosterior) -> GPyTorchPosterior:
return posterior
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.acquisition.multi_objective.analytic import (
ExpectedHypervolumeImprovement,
MultiObjectiveAnalyticAcquisitionFunction,
)
from botorch.acquisition.multi_objective.max_value_entropy_search import (
qMultiObjectiveMaxValueEntropy,
)
from botorch.acquisition.multi_objective.monte_carlo import (
MultiObjectiveMCAcquisitionFunction,
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.multi_fidelity import MOMF
from botorch.acquisition.multi_objective.objective import (
AnalyticMultiOutputObjective,
IdentityAnalyticMultiOutputObjective,
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
UnstandardizeMCMultiOutputObjective,
WeightedMCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import (
get_default_partitioning_alpha,
prune_inferior_points_multi_objective,
)
__all__ = [
"get_default_partitioning_alpha",
"prune_inferior_points_multi_objective",
"qExpectedHypervolumeImprovement",
"qNoisyExpectedHypervolumeImprovement",
"MOMF",
"qMultiObjectiveMaxValueEntropy",
"AnalyticMultiOutputObjective",
"ExpectedHypervolumeImprovement",
"IdentityAnalyticMultiOutputObjective",
"IdentityMCMultiOutputObjective",
"MCMultiOutputObjective",
"MultiObjectiveAnalyticAcquisitionFunction",
"MultiObjectiveMCAcquisitionFunction",
"UnstandardizeMCMultiOutputObjective",
"WeightedMCMultiOutputObjective",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Monte-Carlo Acquisition Functions for Multi-objective Bayesian optimization.
References
.. [Daulton2020qehvi]
S. Daulton, M. Balandat, and E. Bakshy. Differentiable Expected Hypervolume
Improvement for Parallel Multi-Objective Bayesian Optimization. Advances in Neural
Information Processing Systems 33, 2020.
.. [Daulton2021nehvi]
S. Daulton, M. Balandat, and E. Bakshy. Parallel Bayesian Optimization of
Multiple Noisy Objectives with Expected Hypervolume Improvement. Advances
in Neural Information Processing Systems 34, 2021.
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from copy import deepcopy
from itertools import combinations
from typing import Callable, List, Optional, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin
from botorch.acquisition.cached_cholesky import CachedCholeskyMCAcquisitionFunction
from botorch.acquisition.multi_objective.objective import (
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import (
prune_inferior_points_multi_objective,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.model import Model
from botorch.models.transforms.input import InputPerturbation
from botorch.sampling.base import MCSampler
from botorch.utils.multi_objective.box_decompositions.box_decomposition_list import (
BoxDecompositionList,
)
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.utils import (
_pad_batch_pareto_frontier,
)
from botorch.utils.objective import compute_smoothed_feasibility_indicator
from botorch.utils.torch import BufferDict
from botorch.utils.transforms import (
concatenate_pending_points,
is_fully_bayesian,
match_batch_shape,
t_batch_mode_transform,
)
from torch import Tensor
class MultiObjectiveMCAcquisitionFunction(AcquisitionFunction, MCSamplerMixin, ABC):
r"""Abstract base class for Multi-Objective batch acquisition functions.
NOTE: This does not inherit from `MCAcquisitionFunction` to avoid circular imports.
Args:
_default_sample_shape: The `sample_shape` for the default sampler.
"""
_default_sample_shape = torch.Size([128])
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Optional[Union[Tensor, float]] = 1e-3,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. If not given,
a sampler is generated using `get_sampler`.
NOTE: For posteriors that do not support base samples,
a sampler compatible with intended use case must be provided.
See `ForkedRNGSampler` and `StochasticSampler` as examples.
objective: The MCMultiOutputObjective under which the samples are
evaluated. Defaults to `IdentityMultiOutputObjective()`.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility.
eta: The temperature parameter for the sigmoid function used for the
differentiable approximation of the constraints. In case of a float the
same eta is used for every constraint in constraints. In case of a
tensor the length of the tensor must match the number of provided
constraints. The i-th constraint is then estimated with the i-th
eta value.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
MCSamplerMixin.__init__(self, sampler=sampler)
if objective is None:
objective = IdentityMCMultiOutputObjective()
elif not isinstance(objective, MCMultiOutputObjective):
raise UnsupportedError(
"Only objectives of type MCMultiOutputObjective are supported for "
"Multi-Objective MC acquisition functions."
)
if (
hasattr(model, "input_transform")
and isinstance(model.input_transform, InputPerturbation)
and constraints is not None
):
raise UnsupportedError(
"Constraints are not supported with input perturbations, due to"
"sample q-batch shape being different than that of the inputs."
"Use a composite objective that applies feasibility weighting to"
"samples before calculating the risk measure."
)
self.add_module("objective", objective)
self.constraints = constraints
if constraints:
if type(eta) is not Tensor:
eta = torch.full((len(constraints),), eta)
self.register_buffer("eta", eta)
self.X_pending = None
if X_pending is not None:
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `batch_shape x q x d` X Tensor of t-batches with `q` `d`-dim
design points each, and returns a Tensor with shape `batch_shape'`, where
`batch_shape'` is the broadcasted batch shape of model and input `X`. Should
utilize the result of `set_X_pending` as needed to account for pending function
evaluations.
"""
pass # pragma: no cover
class qExpectedHypervolumeImprovement(MultiObjectiveMCAcquisitionFunction):
def __init__(
self,
model: Model,
ref_point: Union[List[float], Tensor],
partitioning: NondominatedPartitioning,
sampler: Optional[MCSampler] = None,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
X_pending: Optional[Tensor] = None,
eta: Optional[Union[Tensor, float]] = 1e-3,
) -> None:
r"""q-Expected Hypervolume Improvement supporting m>=2 outcomes.
See [Daulton2020qehvi]_ for details.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> ref_point = [0.0, 0.0]
>>> qEHVI = qExpectedHypervolumeImprovement(model, ref_point, partitioning)
>>> qehvi = qEHVI(test_X)
Args:
model: A fitted model.
ref_point: A list or tensor with `m` elements representing the reference
point (in the outcome space) w.r.t. to which compute the hypervolume.
This is a reference point for the objective values (i.e. after
applying`objective` to the samples).
partitioning: A `NondominatedPartitioning` module that provides the non-
dominated front and a partitioning of the non-dominated space in hyper-
rectangles. If constraints are present, this partitioning must only
include feasible points.
sampler: The sampler used to draw base samples. If not given,
a sampler is generated using `get_sampler`.
objective: The MCMultiOutputObjective under which the samples are evaluated.
Defaults to `IdentityMultiOutputObjective()`.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility. The acqusition function will compute expected feasible
hypervolume.
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation but have not yet
been evaluated. Concatenated into `X` upon forward call. Copied and set
to have no gradient.
eta: The temperature parameter for the sigmoid function used for the
differentiable approximation of the constraints. In case of a float the
same eta is used for every constraint in constraints. In case of a
tensor the length of the tensor must match the number of provided
constraints. The i-th constraint is then estimated with the i-th
eta value.
"""
if len(ref_point) != partitioning.num_outcomes:
raise ValueError(
"The length of the reference point must match the number of outcomes. "
f"Got ref_point with {len(ref_point)} elements, but expected "
f"{partitioning.num_outcomes}."
)
ref_point = torch.as_tensor(
ref_point,
dtype=partitioning.pareto_Y.dtype,
device=partitioning.pareto_Y.device,
)
super().__init__(
model=model,
sampler=sampler,
objective=objective,
constraints=constraints,
eta=eta,
X_pending=X_pending,
)
self.register_buffer("ref_point", ref_point)
cell_bounds = partitioning.get_hypercell_bounds()
self.register_buffer("cell_lower_bounds", cell_bounds[0])
self.register_buffer("cell_upper_bounds", cell_bounds[1])
self.q_out = -1
self.q_subset_indices = BufferDict()
def _cache_q_subset_indices(self, q_out: int) -> None:
r"""Cache indices corresponding to all subsets of `q_out`.
This means that consecutive calls to `forward` with the same
`q_out` will not recompute the indices for all (2^q_out - 1) subsets.
Note: this will use more memory than regenerating the indices
for each i and then deleting them, but it will be faster for
repeated evaluations (e.g. during optimization).
Args:
q_out: The batch size of the objectives. This is typically equal
to the q-batch size of `X`. However, if using a set valued
objective (e.g., MVaR) that produces `s` objective values for
each point on the q-batch of `X`, we need to properly account
for each objective while calculating the hypervolume contributions
by using `q_out = q * s`.
"""
if q_out != self.q_out:
indices = list(range(q_out))
tkwargs = {"dtype": torch.long, "device": self.ref_point.device}
self.q_subset_indices = BufferDict(
{
f"q_choose_{i}": torch.tensor(
list(combinations(indices, i)), **tkwargs
)
for i in range(1, q_out + 1)
}
)
self.q_out = q_out
def _compute_qehvi(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Compute the expected (feasible) hypervolume improvement given MC samples.
Args:
samples: A `n_samples x batch_shape x q' x m`-dim tensor of samples.
X: A `batch_shape x q x d`-dim tensor of inputs.
Returns:
A `batch_shape x (model_batch_shape)`-dim tensor of expected hypervolume
improvement for each batch.
"""
# Note that the objective may subset the outcomes (e.g. this will usually happen
# if there are constraints present).
obj = self.objective(samples, X=X)
q = obj.shape[-2]
if self.constraints is not None:
feas_weights = compute_smoothed_feasibility_indicator(
constraints=self.constraints, samples=samples, eta=self.eta
) # `sample_shape x batch-shape x q`
self._cache_q_subset_indices(q_out=q)
batch_shape = obj.shape[:-2]
# this is n_samples x input_batch_shape x
areas_per_segment = torch.zeros(
*batch_shape,
self.cell_lower_bounds.shape[-2],
dtype=obj.dtype,
device=obj.device,
)
cell_batch_ndim = self.cell_lower_bounds.ndim - 2
sample_batch_view_shape = torch.Size(
[
batch_shape[0] if cell_batch_ndim > 0 else 1,
*[1 for _ in range(len(batch_shape) - max(cell_batch_ndim, 1))],
*self.cell_lower_bounds.shape[1:-2],
]
)
view_shape = (
*sample_batch_view_shape,
self.cell_upper_bounds.shape[-2],
1,
self.cell_upper_bounds.shape[-1],
)
for i in range(1, self.q_out + 1):
# TODO: we could use batches to compute (q choose i) and (q choose q-i)
# simultaneously since subsets of size i and q-i have the same number of
# elements. This would decrease the number of iterations, but increase
# memory usage.
q_choose_i = self.q_subset_indices[f"q_choose_{i}"]
# this tensor is mc_samples x batch_shape x i x q_choose_i x m
obj_subsets = obj.index_select(dim=-2, index=q_choose_i.view(-1))
obj_subsets = obj_subsets.view(
obj.shape[:-2] + q_choose_i.shape + obj.shape[-1:]
)
# since all hyperrectangles share one vertex, the opposite vertex of the
# overlap is given by the component-wise minimum.
# take the minimum in each subset
overlap_vertices = obj_subsets.min(dim=-2).values
# add batch-dim to compute area for each segment (pseudo-pareto-vertex)
# this tensor is mc_samples x batch_shape x num_cells x q_choose_i x m
overlap_vertices = torch.min(
overlap_vertices.unsqueeze(-3), self.cell_upper_bounds.view(view_shape)
)
# substract cell lower bounds, clamp min at zero
lengths_i = (
overlap_vertices - self.cell_lower_bounds.view(view_shape)
).clamp_min(0.0)
# take product over hyperrectangle side lengths to compute area
# sum over all subsets of size i
areas_i = lengths_i.prod(dim=-1)
# if constraints are present, apply a differentiable approximation of
# the indicator function
if self.constraints is not None:
feas_subsets = feas_weights.index_select(
dim=-1, index=q_choose_i.view(-1)
).view(feas_weights.shape[:-1] + q_choose_i.shape)
areas_i = areas_i * feas_subsets.unsqueeze(-3).prod(dim=-1)
areas_i = areas_i.sum(dim=-1)
# Using the inclusion-exclusion principle, set the sign to be positive
# for subsets of odd sizes and negative for subsets of even size
areas_per_segment += (-1) ** (i + 1) * areas_i
# sum over segments and average over MC samples
return areas_per_segment.sum(dim=-1).mean(dim=0)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.get_posterior_samples(posterior)
return self._compute_qehvi(samples=samples, X=X)
class qNoisyExpectedHypervolumeImprovement(
qExpectedHypervolumeImprovement, CachedCholeskyMCAcquisitionFunction
):
def __init__(
self,
model: Model,
ref_point: Union[List[float], Tensor],
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
X_pending: Optional[Tensor] = None,
eta: Optional[Union[Tensor, float]] = 1e-3,
prune_baseline: bool = False,
alpha: float = 0.0,
cache_pending: bool = True,
max_iep: int = 0,
incremental_nehvi: bool = True,
cache_root: bool = True,
marginalize_dim: Optional[int] = None,
) -> None:
r"""q-Noisy Expected Hypervolume Improvement supporting m>=2 outcomes.
See [Daulton2021nehvi]_ for details.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> ref_point = [0.0, 0.0]
>>> qNEHVI = qNoisyExpectedHypervolumeImprovement(model, ref_point, train_X)
>>> qnehvi = qNEHVI(test_X)
Args:
model: A fitted model.
ref_point: A list or tensor with `m` elements representing the reference
point (in the outcome space) w.r.t. to which compute the hypervolume.
This is a reference point for the objective values (i.e. after
applying `objective` to the samples).
X_baseline: A `r x d`-dim Tensor of `r` design points that have already
been observed. These points are considered as potential approximate
pareto-optimal design points.
sampler: The sampler used to draw base samples. If not given,
a sampler is generated using `get_sampler`.
Note: a pareto front is created for each mc sample, which can be
computationally intensive for `m` > 2.
objective: The MCMultiOutputObjective under which the samples are
evaluated. Defaults to `IdentityMultiOutputObjective()`.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility. The acqusition function will compute expected feasible
hypervolume.
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that
have points that have been submitted for function evaluation, but
have not yet been evaluated.
eta: The temperature parameter for the sigmoid function used for the
differentiable approximation of the constraints. In case of a float the
same eta is used for every constraint in constraints. In case of a
tensor the length of the tensor must match the number of provided
constraints. The i-th constraint is then estimated with the i-th
eta value. For more details, on this parameter, see the docs of
`compute_smoothed_feasibility_indicator`.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the pareto optimal and better than the
reference point. This can significantly improve computation time and
is generally recommended. In order to customize pruning parameters,
instead manually call `prune_inferior_points_multi_objective` on
`X_baseline` before instantiating the acquisition function.
alpha: The hyperparameter controlling the approximate non-dominated
partitioning. The default value of 0.0 means an exact partitioning
is used. As the number of objectives `m` increases, consider increasing
this parameter in order to limit computational complexity.
cache_pending: A boolean indicating whether to use cached box
decompositions (CBD) for handling pending points. This is
generally recommended.
max_iep: The maximum number of pending points before the box
decompositions will be recomputed.
incremental_nehvi: A boolean indicating whether to compute the
incremental NEHVI from the `i`th point where `i=1, ..., q`
under sequential greedy optimization, or the full qNEHVI over
`q` points.
cache_root: A boolean indicating whether to cache the root
decomposition over `X_baseline` and use low-rank updates.
"""
if len(ref_point) < 2:
raise ValueError(
"qNoisyExpectedHypervolumeImprovement supports m>=2 outcomes "
f"but ref_point has length {len(ref_point)}, which is smaller than 2."
)
ref_point = torch.as_tensor(
ref_point, dtype=X_baseline.dtype, device=X_baseline.device
)
super(qExpectedHypervolumeImprovement, self).__init__(
model=model,
sampler=sampler,
objective=objective,
constraints=constraints,
eta=eta,
)
self._setup(model=model, cache_root=cache_root)
if X_baseline.ndim > 2:
raise UnsupportedError(
"qNoisyExpectedHypervolumeImprovement does not support batched "
f"X_baseline. Expected 2 dims, got {X_baseline.ndim}."
)
if prune_baseline:
X_baseline = prune_inferior_points_multi_objective(
model=model,
X=X_baseline,
objective=objective,
constraints=constraints,
ref_point=ref_point,
marginalize_dim=marginalize_dim,
)
self.register_buffer("ref_point", ref_point)
self.alpha = alpha
self.q_in = -1
self.q_out = -1
self.q_subset_indices = BufferDict()
self.partitioning = None
# set partitioning class and args
self.p_kwargs = {}
if self.alpha > 0:
self.p_kwargs["alpha"] = self.alpha
self.p_class = NondominatedPartitioning
else:
self.p_class = FastNondominatedPartitioning
self.register_buffer("_X_baseline", X_baseline)
self.register_buffer("_X_baseline_and_pending", X_baseline)
self.register_buffer(
"cache_pending",
torch.tensor(cache_pending, dtype=bool),
)
self.register_buffer(
"_prev_nehvi",
torch.tensor(0.0, dtype=ref_point.dtype, device=ref_point.device),
)
self.register_buffer(
"_max_iep",
torch.tensor(max_iep, dtype=torch.long),
)
self.register_buffer(
"incremental_nehvi",
torch.tensor(incremental_nehvi, dtype=torch.bool),
)
# Base sampler is initialized in _set_cell_bounds.
self.base_sampler = None
if X_pending is not None:
# This will call self._set_cell_bounds if the number of pending
# points is greater than self._max_iep.
self.set_X_pending(X_pending)
# In the case that X_pending is not None, but there are fewer than
# max_iep pending points, the box decompositions are not performed in
# set_X_pending. Therefore, we need to perform a box decomposition over
# f(X_baseline) here.
if X_pending is None or X_pending.shape[-2] <= self._max_iep:
self._set_cell_bounds(num_new_points=X_baseline.shape[0])
# Set q_in=-1 to so that self.sampler is updated at the next forward call.
self.q_in = -1
@property
def X_baseline(self) -> Tensor:
r"""Return X_baseline augmented with pending points cached using CBD."""
return self._X_baseline_and_pending
def _compute_initial_hvs(self, obj: Tensor, feas: Optional[Tensor] = None) -> None:
r"""Compute hypervolume dominated by f(X_baseline) under each sample.
Args:
obj: A `sample_shape x batch_shape x n x m`-dim tensor of samples
of objectives.
feas: `sample_shape x batch_shape x n`-dim tensor of samples
of feasibility indicators.
"""
initial_hvs = []
for i, sample in enumerate(obj):
if self.constraints is not None:
sample = sample[feas[i]]
dominated_partitioning = DominatedPartitioning(
ref_point=self.ref_point,
Y=sample,
)
hv = dominated_partitioning.compute_hypervolume()
initial_hvs.append(hv)
self.register_buffer(
"_initial_hvs",
torch.tensor(initial_hvs, dtype=obj.dtype, device=obj.device).view(
self._batch_sample_shape, *obj.shape[-2:]
),
)
def _set_cell_bounds(self, num_new_points: int) -> None:
r"""Compute the box decomposition under each posterior sample.
Args:
num_new_points: The number of new points (beyond the points
in X_baseline) that were used in the previous box decomposition.
In the first box decomposition, this should be the number of points
in X_baseline.
"""
feas = None
if self.X_baseline.shape[0] > 0:
with torch.no_grad():
posterior = self.model.posterior(self.X_baseline)
# Reset sampler, accounting for possible one-to-many transform.
self.q_in = -1
if self.base_sampler is None:
# Initialize the base sampler if needed.
samples = self.get_posterior_samples(posterior)
self.base_sampler = deepcopy(self.sampler)
else:
samples = self.base_sampler(posterior)
n_w = posterior._extended_shape()[-2] // self.X_baseline.shape[-2]
self._set_sampler(q_in=num_new_points * n_w, posterior=posterior)
# cache posterior
if self._cache_root:
# Note that this implicitly uses LinearOperator's caching to check if
# the proper root decomposition has already been cached to
# `posterior.mvn.lazy_covariance_matrix`, which it may have been in
# the call to `self.base_sampler`, and computes it if not found
self._baseline_L = self._compute_root_decomposition(posterior=posterior)
obj = self.objective(samples, X=self.X_baseline)
if self.constraints is not None:
feas = torch.stack(
[c(samples) <= 0 for c in self.constraints], dim=0
).all(dim=0)
else:
sample_shape = (
self.sampler.sample_shape
if self.sampler is not None
else self._default_sample_shape
)
obj = torch.empty(
*sample_shape,
0,
self.ref_point.shape[-1],
dtype=self.ref_point.dtype,
device=self.ref_point.device,
)
self._batch_sample_shape = obj.shape[:-2]
# collapse batch dimensions
# use numel() rather than view(-1) to handle case of no baseline points
new_batch_shape = self._batch_sample_shape.numel()
obj = obj.view(new_batch_shape, *obj.shape[-2:])
if self.constraints is not None and feas is not None:
feas = feas.view(new_batch_shape, *feas.shape[-1:])
if self.partitioning is None and not self.incremental_nehvi:
self._compute_initial_hvs(obj=obj, feas=feas)
if self.ref_point.shape[-1] > 2:
# the partitioning algorithms run faster on the CPU
# due to advanced indexing
ref_point_cpu = self.ref_point.cpu()
obj_cpu = obj.cpu()
if self.constraints is not None and feas is not None:
feas_cpu = feas.cpu()
obj_cpu = [obj_cpu[i][feas_cpu[i]] for i in range(obj.shape[0])]
partitionings = []
for sample in obj_cpu:
partitioning = self.p_class(
ref_point=ref_point_cpu, Y=sample, **self.p_kwargs
)
partitionings.append(partitioning)
self.partitioning = BoxDecompositionList(*partitionings)
else:
# use batched partitioning
obj = _pad_batch_pareto_frontier(
Y=obj,
ref_point=self.ref_point.unsqueeze(0).expand(
obj.shape[0], self.ref_point.shape[-1]
),
feasibility_mask=feas,
)
self.partitioning = self.p_class(
ref_point=self.ref_point, Y=obj, **self.p_kwargs
)
cell_bounds = self.partitioning.get_hypercell_bounds().to(self.ref_point)
cell_bounds = cell_bounds.view(
2, *self._batch_sample_shape, *cell_bounds.shape[-2:]
)
self.register_buffer("cell_lower_bounds", cell_bounds[0])
self.register_buffer("cell_upper_bounds", cell_bounds[1])
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
r"""Informs the acquisition function about pending design points.
Args:
X_pending: `n x d` Tensor with `n` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
if X_pending is None:
self.X_pending = None
else:
if X_pending.requires_grad:
warnings.warn(
"Pending points require a gradient but the acquisition function"
" will not provide a gradient to these points.",
BotorchWarning,
)
X_pending = X_pending.detach().clone()
if self.cache_pending:
X_baseline = torch.cat([self._X_baseline, X_pending], dim=-2)
# Number of new points is the total number of points minus
# (the number of previously cached pending points plus the
# of number of baseline points).
num_new_points = X_baseline.shape[0] - self.X_baseline.shape[0]
if num_new_points > 0:
if num_new_points > self._max_iep:
# Set the new baseline points to include pending points.
self.register_buffer("_X_baseline_and_pending", X_baseline)
# Recompute box decompositions.
self._set_cell_bounds(num_new_points=num_new_points)
if not self.incremental_nehvi:
self._prev_nehvi = (
(self._hypervolumes - self._initial_hvs)
.clamp_min(0.0)
.mean()
)
# Set to None so that pending points are not concatenated in
# forward.
self.X_pending = None
# Set q_in=-1 to so that self.sampler is updated at the next
# forward call.
self.q_in = -1
else:
self.X_pending = X_pending[-num_new_points:]
else:
self.X_pending = X_pending
@property
def _hypervolumes(self) -> Tensor:
r"""Compute hypervolume over X_baseline under each posterior sample.
Returns:
A `n_samples`-dim tensor of hypervolumes.
"""
return (
self.partitioning.compute_hypervolume()
.to(self.ref_point) # for m > 2, the partitioning is on the CPU
.view(self._batch_sample_shape)
)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
X_full = torch.cat([match_batch_shape(self.X_baseline, X), X], dim=-2)
# Note: it is important to compute the full posterior over `(X_baseline, X)`
# to ensure that we properly sample `f(X)` from the joint distribution `
# `f(X_baseline, X) ~ P(f | D)` given that we can already fixed the sampled
# function values for `f(X_baseline)`.
# TODO: improve efficiency by not recomputing baseline-baseline
# covariance matrix.
posterior = self.model.posterior(X_full)
# Account for possible one-to-many transform and the MCMC batch dimension in
# `SaasFullyBayesianSingleTaskGP`
event_shape_lag = 1 if is_fully_bayesian(self.model) else 2
n_w = (
posterior._extended_shape()[X_full.dim() - event_shape_lag]
// X_full.shape[-2]
)
q_in = X.shape[-2] * n_w
self._set_sampler(q_in=q_in, posterior=posterior)
samples = self._get_f_X_samples(posterior=posterior, q_in=q_in)
# Add previous nehvi from pending points.
return self._compute_qehvi(samples=samples, X=X) + self._prev_nehvi
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Acquisition functions for joint entropy search for Bayesian optimization (JES).
References:
.. [Tu2022]
B. Tu, A. Gandy, N. Kantas and B.Shafei. Joint Entropy Search for Multi-Objective
Bayesian Optimization. Advances in Neural Information Processing Systems, 35.
2022.
"""
from __future__ import annotations
from abc import abstractmethod
from math import pi
from typing import Any, Optional, Tuple, Union
import torch
from botorch import settings
from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin
from botorch.exceptions.errors import UnsupportedError
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.utils import fantasize as fantasize_flag
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
from torch.distributions import Normal
class LowerBoundMultiObjectiveEntropySearch(AcquisitionFunction, MCSamplerMixin):
r"""Abstract base class for the lower bound multi-objective entropy search
acquisition functions.
"""
def __init__(
self,
model: Model,
pareto_sets: Tensor,
pareto_fronts: Tensor,
hypercell_bounds: Tensor,
X_pending: Optional[Tensor] = None,
estimation_type: str = "LB",
num_samples: int = 64,
**kwargs: Any,
) -> None:
r"""Lower bound multi-objective entropy search acquisition function.
Args:
model: A fitted batch model with 'M' number of outputs.
pareto_sets: A `num_pareto_samples x num_pareto_points x d`-dim Tensor
containing the sampled Pareto optimal sets of inputs.
pareto_fronts: A `num_pareto_samples x num_pareto_points x M`-dim Tensor
containing the sampled Pareto optimal sets of outputs.
hypercell_bounds: A `num_pareto_samples x 2 x J x M`-dim Tensor
containing the hyper-rectangle bounds for integration, where `J` is
the number of hyper-rectangles. In the unconstrained case, this gives
the partition of the dominated space. In the constrained case, this
gives the partition of the feasible dominated space union the
infeasible space.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
estimation_type: A string to determine which entropy estimate is
computed: "0", "LB", "LB2", or "MC".
num_samples: The number of Monte Carlo samples for the Monte Carlo
estimate.
"""
super().__init__(model=model)
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([num_samples]))
MCSamplerMixin.__init__(self, sampler=sampler)
# Batch GP models (e.g. fantasized models) are not currently supported
if isinstance(model, ModelListGP):
train_X = model.models[0].train_inputs[0]
else:
train_X = model.train_inputs[0]
if (model.num_outputs > 1 and train_X.ndim > 3) or (
model.num_outputs == 1 and train_X.ndim > 2
):
raise NotImplementedError(
"Batch GP models (e.g. fantasized models) are not supported."
)
self.initial_model = model
if (pareto_sets is not None and pareto_sets.ndim != 3) or (
pareto_fronts is not None and pareto_fronts.ndim != 3
):
raise UnsupportedError(
"The Pareto set and front should have a shape of "
"`num_pareto_samples x num_pareto_points x input_dim` and "
"`num_pareto_samples x num_pareto_points x num_objectives`, "
"respectively"
)
else:
self.pareto_sets = pareto_sets
self.pareto_fronts = pareto_fronts
if hypercell_bounds.ndim != 4:
raise UnsupportedError(
"The hypercell_bounds should have a shape of "
"`num_pareto_samples x 2 x num_boxes x num_objectives`."
)
else:
self.hypercell_bounds = hypercell_bounds
self.num_pareto_samples = hypercell_bounds.shape[0]
self.estimation_type = estimation_type
estimation_types = ["0", "LB", "LB2", "MC"]
if estimation_type not in estimation_types:
raise NotImplementedError(
"Currently the only supported estimation type are: "
+ ", ".join(f'"{h}"' for h in estimation_types)
+ "."
)
self.set_X_pending(X_pending)
@abstractmethod
def _compute_posterior_statistics(
self, X: Tensor
) -> dict[str, Union[GPyTorchPosterior, Tensor]]:
r"""Compute the posterior statistics.
Args:
X: A `batch_shape x q x d`-dim Tensor of inputs.
Returns:
A dictionary containing the posterior variables used to estimate the
entropy.
- "initial_entropy": A `batch_shape`-dim Tensor containing the entropy of
the Gaussian random variable `p(Y| X, D_n)`.
- "posterior_mean": A `batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the posterior mean at the input `X`.
- "posterior_variance": A `batch_shape x num_pareto_samples x q x 1 x M`
-dim Tensor containing the posterior variance at the input `X`
excluding the observation noise.
- "observation_noise": A `batch_shape x num_pareto_samples x q x 1 x M`
-dim Tensor containing the observation noise at the input `X`.
- "posterior_with_noise": The posterior distribution at `X` which
includes the observation noise. This is used to compute the marginal
log-probabilities with respect to `p(y| x, D_n)` for `x` in `X`.
"""
pass # pragma: no cover
@abstractmethod
def _compute_monte_carlo_variables(
self, posterior: GPyTorchPosterior
) -> Tuple[Tensor, Tensor]:
r"""Compute the samples and log-probability associated with a posterior
distribution.
Args:
posterior: A posterior distribution.
Returns:
A two-element tuple containing:
- samples: A `num_mc_samples x batch_shape x num_pareto_samples x q x 1
x M`-dim Tensor containing the Monte Carlo samples.
- samples_log_prob: A `num_mc_samples x batch_shape x num_pareto_samples
x q`-dim Tensor containing the log-probabilities of the Monte Carlo
samples.
"""
pass # pragma: no cover
def _compute_lower_bound_information_gain(self, X: Tensor) -> Tensor:
r"""Evaluates the lower bound information gain at the design points `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
posterior_statistics = self._compute_posterior_statistics(X)
initial_entropy = posterior_statistics["initial_entropy"]
post_mean = posterior_statistics["posterior_mean"]
post_var = posterior_statistics["posterior_variance"]
obs_noise = posterior_statistics["observation_noise"]
# Estimate the expected conditional entropy.
# `batch_shape x q` dim Tensor of entropy estimates
if self.estimation_type == "0":
conditional_entropy = _compute_entropy_noiseless(
hypercell_bounds=self.hypercell_bounds,
mean=post_mean,
variance=post_var,
observation_noise=obs_noise,
)
elif self.estimation_type == "LB":
conditional_entropy = _compute_entropy_upper_bound(
hypercell_bounds=self.hypercell_bounds,
mean=post_mean,
variance=post_var,
observation_noise=obs_noise,
only_diagonal=False,
)
elif self.estimation_type == "LB2":
conditional_entropy = _compute_entropy_upper_bound(
hypercell_bounds=self.hypercell_bounds,
mean=post_mean,
variance=post_var,
observation_noise=obs_noise,
only_diagonal=True,
)
elif self.estimation_type == "MC":
posterior_with_noise = posterior_statistics["posterior_with_noise"]
samples, samples_log_prob = self._compute_monte_carlo_variables(
posterior_with_noise
)
conditional_entropy = _compute_entropy_monte_carlo(
hypercell_bounds=self.hypercell_bounds,
mean=post_mean,
variance=post_var,
observation_noise=obs_noise,
samples=samples,
samples_log_prob=samples_log_prob,
)
# Sum over the batch.
return initial_entropy - conditional_entropy.sum(dim=-1)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Compute lower bound multi-objective entropy search at the design points
`X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
pass # pragma: no cover
class qLowerBoundMultiObjectiveJointEntropySearch(
LowerBoundMultiObjectiveEntropySearch
):
r"""The acquisition function for the multi-objective joint entropy search, where
the batches `q > 1` are supported through the lower bound formulation.
This acquisition function computes the mutual information between the observation
at a candidate point `X` and the Pareto optimal input-output pairs.
See [Tu2022]_ for a discussion on the estimation procedure.
NOTES:
(i) The estimated acquisition value could be negative.
(ii) The lower bound batch acquisition function might not be monotone in the
sense that adding more elements to the batch does not necessarily increase the
acquisition value. Specifically, the acquisition value can become smaller when
more inputs are added.
"""
def __init__(
self,
model: Model,
pareto_sets: Tensor,
pareto_fronts: Tensor,
hypercell_bounds: Tensor,
X_pending: Optional[Tensor] = None,
estimation_type: str = "LB",
num_samples: int = 64,
**kwargs: Any,
) -> None:
r"""Lower bound multi-objective joint entropy search acquisition function.
Args:
model: A fitted batch model with 'M' number of outputs.
pareto_sets: A `num_pareto_samples x num_pareto_points x d`-dim Tensor
containing the sampled Pareto optimal sets of inputs.
pareto_fronts: A `num_pareto_samples x num_pareto_points x M`-dim Tensor
containing the sampled Pareto optimal sets of outputs.
hypercell_bounds: A `num_pareto_samples x 2 x J x M`-dim Tensor
containing the hyper-rectangle bounds for integration. In the
unconstrained case, this gives the partition of the dominated space.
In the constrained case, this gives the partition of the feasible
dominated space union the infeasible space.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
estimation_type: A string to determine which entropy estimate is
computed: "0", "LB", "LB2", or "MC".
num_samples: The number of Monte Carlo samples used for the Monte Carlo
estimate.
"""
super().__init__(
model=model,
pareto_sets=pareto_sets,
pareto_fronts=pareto_fronts,
hypercell_bounds=hypercell_bounds,
X_pending=X_pending,
estimation_type=estimation_type,
num_samples=num_samples,
)
# Condition the model on the sampled pareto optimal points.
# TODO: Apparently, we need to make a call to the posterior otherwise
# we run into a gpytorch runtime error:
# "Fantasy observations can only be added after making predictions with a
# model so that all test independent caches exist."
with fantasize_flag():
with settings.propagate_grads(False):
_ = self.initial_model.posterior(
self.pareto_sets, observation_noise=False
)
# Condition with observation noise.
self.conditional_model = self.initial_model.condition_on_observations(
X=self.initial_model.transform_inputs(self.pareto_sets),
Y=self.pareto_fronts,
)
def _compute_posterior_statistics(
self, X: Tensor
) -> dict[str, Union[Tensor, GPyTorchPosterior]]:
r"""Compute the posterior statistics.
Args:
X: A `batch_shape x q x d`-dim Tensor of inputs.
Returns:
A dictionary containing the posterior variables used to estimate the
entropy.
- "initial_entropy": A `batch_shape`-dim Tensor containing the entropy of
the Gaussian random variable `p(Y| X, D_n)`.
- "posterior_mean": A `batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the posterior mean at the input `X`.
- "posterior_variance": A `batch_shape x num_pareto_samples x q x 1 x M`
-dim Tensor containing the posterior variance at the input `X`
excluding the observation noise.
- "observation_noise": A `batch_shape x num_pareto_samples x q x 1 x M`
-dim Tensor containing the observation noise at the input `X`.
- "posterior_with_noise": The posterior distribution at `X` which
includes the observation noise. This is used to compute the marginal
log-probabilities with respect to `p(y| x, D_n)` for `x` in `X`.
"""
tkwargs = {"dtype": X.dtype, "device": X.device}
CLAMP_LB = torch.finfo(tkwargs["dtype"]).eps
# Compute the prior entropy term depending on `X`.
initial_posterior_plus_noise = self.initial_model.posterior(
X, observation_noise=True
)
# Additional constant term.
add_term = (
0.5
* self.model.num_outputs
* (1 + torch.log(2 * pi * torch.ones(1, **tkwargs)))
)
# The variance initially has shape `batch_shape x (q*M) x (q*M)`
# prior_entropy has shape `batch_shape`.
initial_entropy = add_term + 0.5 * torch.logdet(
initial_posterior_plus_noise.mvn.covariance_matrix
)
posterior_statistics = {"initial_entropy": initial_entropy}
# Compute the posterior entropy term.
conditional_posterior_with_noise = self.conditional_model.posterior(
X.unsqueeze(-2).unsqueeze(-3), observation_noise=True
)
# `batch_shape x num_pareto_samples x q x 1 x M`
post_mean = conditional_posterior_with_noise.mean.swapaxes(-4, -3)
post_var_with_noise = conditional_posterior_with_noise.variance.clamp_min(
CLAMP_LB
).swapaxes(-4, -3)
# TODO: This computes the observation noise via a second evaluation of the
# posterior. This step could be done better.
conditional_posterior = self.conditional_model.posterior(
X.unsqueeze(-2).unsqueeze(-3), observation_noise=False
)
# `batch_shape x num_pareto_samples x q x 1 x M`
post_var = conditional_posterior.variance.clamp_min(CLAMP_LB).swapaxes(-4, -3)
obs_noise = (post_var_with_noise - post_var).clamp_min(CLAMP_LB)
posterior_statistics["posterior_mean"] = post_mean
posterior_statistics["posterior_variance"] = post_var
posterior_statistics["observation_noise"] = obs_noise
posterior_statistics["posterior_with_noise"] = conditional_posterior_with_noise
return posterior_statistics
def _compute_monte_carlo_variables(
self, posterior: GPyTorchPosterior
) -> Tuple[Tensor, Tensor]:
r"""Compute the samples and log-probability associated with the posterior
distribution that conditions on the Pareto optimal points.
Args:
posterior: The conditional posterior distribution at an input `X`, where
we have also conditioned over the `num_pareto_samples` of optimal
points. Note that this posterior includes the observation noise.
Returns:
A two-element tuple containing
- samples: A `num_mc_samples x batch_shape x num_pareto_samples x q x 1
x M`-dim Tensor containing the Monte Carlo samples.
- samples_log_probs: A `num_mc_samples x batch_shape x num_pareto_samples
x q`-dim Tensor containing the log-probabilities of the Monte Carlo
samples.
"""
# `num_mc_samples x batch_shape x q x num_pareto_samples x 1 x M`
samples = self.get_posterior_samples(posterior)
# `num_mc_samples x batch_shape x q x num_pareto_samples`
if self.model.num_outputs == 1:
samples_log_prob = posterior.mvn.log_prob(samples.squeeze(-1))
else:
samples_log_prob = posterior.mvn.log_prob(samples)
# Swap axes to get the correct shape:
# samples:`num_mc_samples x batch_shape x num_pareto_samples x q x 1 x M`
# log prob:`num_mc_samples x batch_shape x num_pareto_samples x q`
return samples.swapaxes(-4, -3), samples_log_prob.swapaxes(-2, -1)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluates qLowerBoundMultiObjectiveJointEntropySearch at the design
points `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
return self._compute_lower_bound_information_gain(X)
def _compute_entropy_noiseless(
hypercell_bounds: Tensor,
mean: Tensor,
variance: Tensor,
observation_noise: Tensor,
) -> Tensor:
r"""Computes the entropy estimate at the design points `X` assuming noiseless
observations. This is used for the JES-0 and MES-0 estimate.
Args:
hypercell_bounds: A `num_pareto_samples x 2 x J x M` -dim Tensor containing
the box decomposition bounds, where `J = max(num_boxes)`.
mean: A `batch_shape x num_pareto_samples x q x 1 x M`-dim Tensor containing
the posterior mean at X.
variance: A `batch_shape x num_pareto_samples x q x 1 x M`-dim Tensor
containing the posterior variance at X excluding observation noise.
observation_noise: A `batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the observation noise at X.
Returns:
A `batch_shape x q`-dim Tensor of entropy estimate at the given design points
`X`.
"""
tkwargs = {"dtype": hypercell_bounds.dtype, "device": hypercell_bounds.device}
CLAMP_LB = torch.finfo(tkwargs["dtype"]).eps
variance_plus_noise = variance + observation_noise
# Standardize the box decomposition bounds and compute normal quantities.
# `batch_shape x num_pareto_samples x q x 2 x J x M`
g = (hypercell_bounds.unsqueeze(-4) - mean.unsqueeze(-2)) / torch.sqrt(
variance.unsqueeze(-2)
)
normal = Normal(torch.zeros_like(g), torch.ones_like(g))
gcdf = normal.cdf(g)
gpdf = torch.exp(normal.log_prob(g))
g_times_gpdf = g * gpdf
# Compute the differences between the upper and lower terms.
Wjm = (gcdf[..., 1, :, :] - gcdf[..., 0, :, :]).clamp_min(CLAMP_LB)
Vjm = g_times_gpdf[..., 1, :, :] - g_times_gpdf[..., 0, :, :]
# Compute W.
Wj = torch.exp(torch.sum(torch.log(Wjm), dim=-1, keepdims=True))
W = torch.sum(Wj, dim=-2, keepdims=True).clamp_max(1.0)
# Compute the sum of ratios.
ratios = 0.5 * (Wj * (Vjm / Wjm)) / W
# `batch_shape x num_pareto_samples x q x 1 x 1`
ratio_term = torch.sum(ratios, dim=(-2, -1), keepdims=True)
# Compute the logarithm of the variance.
log_term = 0.5 * torch.log(variance_plus_noise).sum(-1, keepdims=True)
# `batch_shape x num_pareto_samples x q x 1 x 1`
log_term = log_term + torch.log(W)
# Additional constant term.
M_plus_K = mean.shape[-1]
add_term = 0.5 * M_plus_K * (1 + torch.log(torch.ones(1, **tkwargs) * 2 * pi))
# `batch_shape x num_pareto_samples x q`
entropy = add_term + (log_term - ratio_term).squeeze(-1).squeeze(-1)
return entropy.mean(-2)
def _compute_entropy_upper_bound(
hypercell_bounds: Tensor,
mean: Tensor,
variance: Tensor,
observation_noise: Tensor,
only_diagonal: bool = False,
) -> Tensor:
r"""Computes the entropy upper bound at the design points `X`. This is used for
the JES-LB and MES-LB estimate. If `only_diagonal` is True, then this computes
the entropy estimate for the JES-LB2 and MES-LB2.
Args:
hypercell_bounds: A `num_pareto_samples x 2 x J x M` -dim Tensor containing
the box decomposition bounds, where `J` = max(num_boxes).
mean: A `batch_shape x num_pareto_samples x q x 1 x M`-dim Tensor containing
the posterior mean at X.
variance: A `batch_shape x num_pareto_samples x q x 1 x M`-dim Tensor
containing the posterior variance at X excluding observation noise.
observation_noise: A `batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the observation noise at X.
only_diagonal: If true, we only compute the diagonal elements of the variance.
Returns:
A `batch_shape x q`-dim Tensor of entropy estimate at the given design points
`X`.
"""
tkwargs = {"dtype": hypercell_bounds.dtype, "device": hypercell_bounds.device}
CLAMP_LB = torch.finfo(tkwargs["dtype"]).eps
variance_plus_noise = variance + observation_noise
# Standardize the box decomposition bounds and compute normal quantities.
# `batch_shape x num_pareto_samples x q x 2 x J x M`
g = (hypercell_bounds.unsqueeze(-4) - mean.unsqueeze(-2)) / torch.sqrt(
variance.unsqueeze(-2)
)
normal = Normal(torch.zeros_like(g), torch.ones_like(g))
gcdf = normal.cdf(g)
gpdf = torch.exp(normal.log_prob(g))
g_times_gpdf = g * gpdf
# Compute the differences between the upper and lower terms.
Wjm = (gcdf[..., 1, :, :] - gcdf[..., 0, :, :]).clamp_min(CLAMP_LB)
Vjm = g_times_gpdf[..., 1, :, :] - g_times_gpdf[..., 0, :, :]
Gjm = gpdf[..., 1, :, :] - gpdf[..., 0, :, :]
# Compute W.
Wj = torch.exp(torch.sum(torch.log(Wjm), dim=-1, keepdims=True))
W = torch.sum(Wj, dim=-2, keepdims=True).clamp_max(1.0)
Cjm = Gjm / Wjm
# First moment:
Rjm = Cjm * Wj / W
# `batch_shape x num_pareto_samples x q x 1 x M
mom1 = mean - torch.sqrt(variance) * Rjm.sum(-2, keepdims=True)
# diagonal weighted sum
# `batch_shape x num_pareto_samples x q x 1 x M
diag_weighted_sum = (Wj * variance * Vjm / Wjm / W).sum(-2, keepdims=True)
if only_diagonal:
# `batch_shape x num_pareto_samples x q x 1 x M`
mean_squared = mean.pow(2)
cross_sum = -2 * (mean * torch.sqrt(variance) * Rjm).sum(-2, keepdims=True)
# `batch_shape x num_pareto_samples x q x 1 x M`
mom2 = variance_plus_noise - diag_weighted_sum + cross_sum + mean_squared
var = (mom2 - mom1.pow(2)).clamp_min(CLAMP_LB)
# `batch_shape x num_pareto_samples x q
log_det_term = 0.5 * torch.log(var).sum(dim=-1).squeeze(-1)
else:
# First moment x First moment
# `batch_shape x num_pareto_samples x q x 1 x M x M
cross_mom1 = torch.einsum("...i,...j->...ij", mom1, mom1)
# Second moment:
# `batch_shape x num_pareto_samples x q x 1 x M x M
# firstly compute the general terms
mom2_cross1 = -torch.einsum(
"...i,...j->...ij", mean, torch.sqrt(variance) * Cjm
)
mom2_cross2 = -torch.einsum(
"...i,...j->...ji", mean, torch.sqrt(variance) * Cjm
)
mom2_mean_squared = torch.einsum("...i,...j->...ij", mean, mean)
mom2_weighted_sum = (
(mom2_cross1 + mom2_cross2) * Wj.unsqueeze(-1) / W.unsqueeze(-1)
).sum(-3, keepdims=True)
mom2_weighted_sum = mom2_weighted_sum + mom2_mean_squared
# Compute the additional off-diagonal terms.
mom2_off_diag = torch.einsum(
"...i,...j->...ij", torch.sqrt(variance) * Cjm, torch.sqrt(variance) * Cjm
)
mom2_off_diag_sum = (mom2_off_diag * Wj.unsqueeze(-1) / W.unsqueeze(-1)).sum(
-3, keepdims=True
)
# Compute the diagonal terms and subtract the diagonal computed before.
init_diag = torch.diagonal(mom2_off_diag_sum, dim1=-2, dim2=-1)
diag_weighted_sum = torch.diag_embed(
variance_plus_noise - diag_weighted_sum - init_diag
)
mom2 = mom2_weighted_sum + mom2_off_diag_sum + diag_weighted_sum
# Compute the variance
var = (mom2 - cross_mom1).squeeze(-3)
# Jitter the diagonal.
# The jitter is probably not needed here at all.
jitter_diag = 1e-6 * torch.diag_embed(torch.ones(var.shape[:-1], **tkwargs))
log_det_term = 0.5 * torch.logdet(var + jitter_diag)
# Additional terms.
M_plus_K = mean.shape[-1]
add_term = 0.5 * M_plus_K * (1 + torch.log(torch.ones(1, **tkwargs) * 2 * pi))
# `batch_shape x num_pareto_samples x q
entropy = add_term + log_det_term
return entropy.mean(-2)
def _compute_entropy_monte_carlo(
hypercell_bounds: Tensor,
mean: Tensor,
variance: Tensor,
observation_noise: Tensor,
samples: Tensor,
samples_log_prob: Tensor,
) -> Tensor:
r"""Computes the Monte Carlo entropy at the design points `X`. This is used for
the JES-MC and MES-MC estimate.
Args:
hypercell_bounds: A `num_pareto_samples x 2 x J x M`-dim Tensor containing
the box decomposition bounds, where `J` = max(num_boxes).
mean: A `batch_shape x num_pareto_samples x q x 1 x M`-dim Tensor containing
the posterior mean at X.
variance: A `batch_shape x num_pareto_samples x q x 1 x M`-dim Tensor
containing the posterior variance at X excluding observation noise.
observation_noise: A `batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the observation noise at X.
samples: A `num_mc_samples x batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the noisy samples at `X` from the posterior conditioned
on the Pareto optimal points.
samples_log_prob: A `num_mc_samples x batch_shape x num_pareto_samples
x q`-dim Tensor containing the log probability densities of the samples.
Returns:
A `batch_shape x q`-dim Tensor of entropy estimate at the given design points
`X`.
"""
tkwargs = {"dtype": hypercell_bounds.dtype, "device": hypercell_bounds.device}
CLAMP_LB = torch.finfo(tkwargs["dtype"]).eps
variance_plus_noise = variance + observation_noise
####################################################################
# Standardize the box decomposition bounds and compute normal quantities.
# `batch_shape x num_pareto_samples x q x 2 x J x M`
g = (hypercell_bounds.unsqueeze(-4) - mean.unsqueeze(-2)) / torch.sqrt(
variance.unsqueeze(-2)
)
# `batch_shape x num_pareto_samples x q x 1 x M`
rho = torch.sqrt(variance / variance_plus_noise)
# Compute the initial normal quantities.
normal = Normal(torch.zeros_like(g), torch.ones_like(g))
gcdf = normal.cdf(g)
# Compute the differences between the upper and lower terms.
Wjm = (gcdf[..., 1, :, :] - gcdf[..., 0, :, :]).clamp_min(CLAMP_LB)
# Compute W.
Wj = torch.exp(torch.sum(torch.log(Wjm), dim=-1, keepdims=True))
# `batch_shape x num_pareto_samples x q x 1 x 1`
W = torch.sum(Wj, dim=-2, keepdims=True).clamp_max(1.0)
####################################################################
g = g.unsqueeze(0)
rho = rho.unsqueeze(0).unsqueeze(-2)
# `num_mc_samples x batch_shape x num_pareto_samples x q x 1 x 1 x M`
z = ((samples - mean) / torch.sqrt(variance_plus_noise)).unsqueeze(-2)
# `num_mc_samples x batch_shape x num_pareto_samples x q x 2 x J x M`
# Clamping here is important because `1 - rho^2 = 0` at an input where
# observation noise is zero.
g_new = (g - rho * z) / torch.sqrt((1 - rho * rho).clamp_min(CLAMP_LB))
# Compute the initial normal quantities.
normal_new = Normal(torch.zeros_like(g_new), torch.ones_like(g_new))
gcdf_new = normal_new.cdf(g_new)
# Compute the differences between the upper and lower terms.
Wjm_new = (gcdf_new[..., 1, :, :] - gcdf_new[..., 0, :, :]).clamp_min(CLAMP_LB)
# Compute W+.
Wj_new = torch.exp(torch.sum(torch.log(Wjm_new), dim=-1, keepdims=True))
# `num_mc_samples x batch_shape x num_pareto_samples x q x 1 x 1`
W_new = torch.sum(Wj_new, dim=-2, keepdims=True).clamp_max(1.0)
####################################################################
# W_ratio = W+ / W
W_ratio = torch.exp(torch.log(W_new) - torch.log(W).unsqueeze(0))
samples_log_prob = samples_log_prob.unsqueeze(-1).unsqueeze(-1)
# Compute the Monte Carlo average: - E[W_ratio * log(W+ p(y))] + log(W)
log_term = torch.log(W_new) + samples_log_prob
mc_estimate = -(W_ratio * log_term).mean(0)
# `batch_shape x num_pareto_samples x q
entropy = (mc_estimate + torch.log(W)).squeeze(-1).squeeze(-1)
# An alternative Monte Carlo estimate: - E[W_ratio * log(W_ratio p(y))]
# log_term = torch.log(W_ratio) + samples_log_prob
# mc_estimate = - (W_ratio * log_term).mean(0)
# # `batch_shape x num_pareto_samples x q
# entropy = mc_estimate.squeeze(-1).squeeze(-1)
return entropy.mean(-2)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for multi-objective acquisition functions.
"""
from __future__ import annotations
import math
import warnings
from math import ceil
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from botorch.acquisition import monte_carlo # noqa F401
from botorch.acquisition.multi_objective.objective import (
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.fully_bayesian import MCMC_DIM
from botorch.models.model import Model
from botorch.sampling.get_sampler import get_sampler
from botorch.utils.gp_sampling import get_gp_samples
from botorch.utils.multi_objective.box_decompositions.box_decomposition import (
BoxDecomposition,
)
from botorch.utils.multi_objective.box_decompositions.box_decomposition_list import (
BoxDecompositionList,
)
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.sampling import draw_sobol_samples
from botorch.utils.transforms import is_fully_bayesian, normalize_indices
from torch import Tensor
def get_default_partitioning_alpha(num_objectives: int) -> float:
r"""Determines an approximation level based on the number of objectives.
If `alpha` is 0, FastNondominatedPartitioning should be used. Otherwise,
an approximate NondominatedPartitioning should be used with approximation
level `alpha`.
Args:
num_objectives: the number of objectives.
Returns:
The approximation level `alpha`.
"""
if num_objectives <= 4:
return 0.0
elif num_objectives > 6:
warnings.warn("EHVI works best for less than 7 objectives.", BotorchWarning)
return 10 ** (-8 + num_objectives)
def prune_inferior_points_multi_objective(
model: Model,
X: Tensor,
ref_point: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
num_samples: int = 2048,
max_frac: float = 1.0,
marginalize_dim: Optional[int] = None,
) -> Tensor:
r"""Prune points from an input tensor that are unlikely to be pareto optimal.
Given a model, an objective, and an input tensor `X`, this function returns
the subset of points in `X` that have some probability of being pareto
optimal, better than the reference point, and feasible. This function uses
sampling to estimate the probabilities, the higher the number of points `n`
in `X` the higher the number of samples `num_samples` should be to obtain
accurate estimates.
Args:
model: A fitted model. Batched models are currently not supported.
X: An input tensor of shape `n x d`. Batched inputs are currently not
supported.
ref_point: The reference point.
objective: The objective under which to evaluate the posterior.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility.
num_samples: The number of samples used to compute empirical
probabilities of being the best point.
max_frac: The maximum fraction of points to retain. Must satisfy
`0 < max_frac <= 1`. Ensures that the number of elements in the
returned tensor does not exceed `ceil(max_frac * n)`.
marginalize_dim: A batch dimension that should be marginalized.
For example, this is useful when using a batched fully Bayesian
model.
Returns:
A `n' x d` with subset of points in `X`, where
n' = min(N_nz, ceil(max_frac * n))
with `N_nz` the number of points in `X` that have non-zero (empirical,
under `num_samples` samples) probability of being pareto optimal.
"""
if marginalize_dim is None and is_fully_bayesian(model):
# TODO: Properly deal with marginalizing fully Bayesian models
marginalize_dim = MCMC_DIM
if X.ndim > 2:
# TODO: support batched inputs (req. dealing with ragged tensors)
raise UnsupportedError(
"Batched inputs `X` are currently unsupported by "
"prune_inferior_points_multi_objective"
)
max_points = math.ceil(max_frac * X.size(-2))
if max_points < 1 or max_points > X.size(-2):
raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}")
with torch.no_grad():
posterior = model.posterior(X=X)
sampler = get_sampler(posterior, sample_shape=torch.Size([num_samples]))
samples = sampler(posterior)
if objective is None:
objective = IdentityMCMultiOutputObjective()
obj_vals = objective(samples, X=X)
if obj_vals.ndim > 3:
if obj_vals.ndim == 4 and marginalize_dim is not None:
obj_vals = obj_vals.mean(dim=marginalize_dim)
else:
# TODO: support batched inputs (req. dealing with ragged tensors)
raise UnsupportedError(
"Models with multiple batch dims are currently unsupported by"
" prune_inferior_points_multi_objective."
)
if constraints is not None:
infeas = torch.stack([c(samples) > 0 for c in constraints], dim=0).any(dim=0)
if infeas.ndim == 3 and marginalize_dim is not None:
# make sure marginalize_dim is not negative
if marginalize_dim < 0:
# add 1 to the normalize marginalize_dim since we have already
# removed the output dim
marginalize_dim = (
1 + normalize_indices([marginalize_dim], d=infeas.ndim)[0]
)
infeas = infeas.float().mean(dim=marginalize_dim).round().bool()
# set infeasible points to be the ref point
obj_vals[infeas] = ref_point
pareto_mask = is_non_dominated(obj_vals, deduplicate=False) & (
obj_vals > ref_point
).all(dim=-1)
probs = pareto_mask.to(dtype=X.dtype).mean(dim=0)
idcs = probs.nonzero().view(-1)
if idcs.shape[0] > max_points:
counts, order_idcs = torch.sort(probs, descending=True)
idcs = order_idcs[:max_points]
effective_n_w = obj_vals.shape[-2] // X.shape[-2]
idcs = (idcs / effective_n_w).long().unique()
return X[idcs]
def compute_sample_box_decomposition(
pareto_fronts: Tensor,
partitioning: BoxDecomposition = DominatedPartitioning,
maximize: bool = True,
num_constraints: Optional[int] = 0,
) -> Tensor:
r"""Computes the box decomposition associated with some sampled optimal
objectives. This also supports the single-objective and constrained optimization
setting. An objective `y` is feasible if `y <= 0`.
To take advantage of batch computations, we pad the hypercell bounds with a
`2 x (M + K)`-dim Tensor of zeros `[0, 0]`.
Args:
pareto_fronts: A `num_pareto_samples x num_pareto_points x M` dim Tensor
containing the sampled optimal set of objectives.
partitioning: A `BoxDecomposition` module that is used to obtain the
hyper-rectangle bounds for integration. In the unconstrained case, this
gives the partition of the dominated space. In the constrained case, this
gives the partition of the feasible dominated space union the infeasible
space.
maximize: If true, the box-decomposition is computed assuming maximization.
num_constraints: The number of constraints `K`.
Returns:
A `num_pareto_samples x 2 x J x (M + K)`-dim Tensor containing the bounds for
the hyper-rectangles. The number `J` is the smallest number of boxes needed
to partition all the Pareto samples.
"""
tkwargs = {"dtype": pareto_fronts.dtype, "device": pareto_fronts.device}
# We will later compute `norm.log_prob(NEG_INF)`, this is `-inf` if `NEG_INF` is
# too small.
NEG_INF = -1e10
if pareto_fronts.ndim != 3:
raise UnsupportedError(
"Currently this only supports Pareto fronts of the shape "
"`num_pareto_samples x num_pareto_points x num_objectives`."
)
num_pareto_samples = pareto_fronts.shape[0]
M = pareto_fronts.shape[-1]
K = num_constraints
ref_point = torch.ones(M, **tkwargs) * NEG_INF
weight = 1.0 if maximize else -1.0
if M == 1:
# Only consider a Pareto front with one element.
extreme_values = weight * torch.max(weight * pareto_fronts, dim=-2).values
ref_point = weight * ref_point.expand(extreme_values.shape)
if maximize:
hypercell_bounds = torch.stack(
[ref_point, extreme_values], axis=-2
).unsqueeze(-1)
else:
hypercell_bounds = torch.stack(
[extreme_values, ref_point], axis=-2
).unsqueeze(-1)
else:
bd_list = []
for i in range(num_pareto_samples):
bd_list = bd_list + [
partitioning(ref_point=ref_point, Y=weight * pareto_fronts[i, :, :])
]
# `num_pareto_samples x 2 x J x (M + K)`
hypercell_bounds = (
BoxDecompositionList(*bd_list).get_hypercell_bounds().movedim(0, 1)
)
# If minimizing, then the bounds should be negated and flipped
if not maximize:
hypercell_bounds = weight * torch.flip(hypercell_bounds, dims=[1])
# Add an extra box for the inequality constraint.
if K > 0:
# `num_pareto_samples x 2 x (J - 1) x K`
feasible_boxes = torch.zeros(
hypercell_bounds.shape[:-1] + torch.Size([K]), **tkwargs
)
feasible_boxes[..., 0, :, :] = NEG_INF
# `num_pareto_samples x 2 x (J - 1) x (M + K)`
hypercell_bounds = torch.cat([hypercell_bounds, feasible_boxes], dim=-1)
# `num_pareto_samples x 2 x 1 x (M + K)`
infeasible_box = torch.zeros(
hypercell_bounds.shape[:-2] + torch.Size([1, M + K]), **tkwargs
)
infeasible_box[..., 1, :, M:] = -NEG_INF
infeasible_box[..., 0, :, 0:M] = NEG_INF
infeasible_box[..., 1, :, 0:M] = -NEG_INF
# `num_pareto_samples x 2 x J x (M + K)`
hypercell_bounds = torch.cat([hypercell_bounds, infeasible_box], dim=-2)
# `num_pareto_samples x 2 x J x (M + K)`
return hypercell_bounds
def random_search_optimizer(
model: GenericDeterministicModel,
bounds: Tensor,
num_points: int,
maximize: bool,
pop_size: int = 1024,
max_tries: int = 10,
) -> Tuple[Tensor, Tensor]:
r"""Optimize a function via random search.
Args:
model: The model.
bounds: A `2 x d`-dim Tensor containing the input bounds.
num_points: The number of optimal points to be outputted.
maximize: If true, we consider a maximization problem.
pop_size: The number of function evaluations per try.
max_tries: The maximum number of tries.
Returns:
A two-element tuple containing
- A `num_points x d`-dim Tensor containing the collection of optimal inputs.
- A `num_points x M`-dim Tensor containing the collection of optimal
objectives.
"""
tkwargs = {"dtype": bounds.dtype, "device": bounds.device}
weight = 1.0 if maximize else -1.0
optimal_inputs = torch.tensor([], **tkwargs)
optimal_outputs = torch.tensor([], **tkwargs)
num_tries = 0
ratio = 2
while ratio > 1 and num_tries < max_tries:
X = draw_sobol_samples(bounds=bounds, n=pop_size, q=1).squeeze(-2)
Y = model.posterior(X).mean
X_aug = torch.cat([optimal_inputs, X], dim=0)
Y_aug = torch.cat([optimal_outputs, Y], dim=0)
pareto_mask = is_non_dominated(weight * Y_aug)
optimal_inputs = X_aug[pareto_mask]
optimal_outputs = Y_aug[pareto_mask]
num_found = len(optimal_inputs)
ratio = ceil(num_points / num_found)
num_tries = num_tries + 1
# If maximum number of retries exceeded throw out a runtime error.
if ratio > 1:
error_text = f"Only found {num_found} optimal points instead of {num_points}."
raise RuntimeError(error_text)
else:
return optimal_inputs[:num_points], optimal_outputs[:num_points]
def sample_optimal_points(
model: Model,
bounds: Tensor,
num_samples: int,
num_points: int,
optimizer: Callable[
[GenericDeterministicModel, Tensor, int, bool, Any], Tuple[Tensor, Tensor]
] = random_search_optimizer,
num_rff_features: int = 512,
maximize: bool = True,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Compute a collection of optimal inputs and outputs from samples of a Gaussian
Process (GP).
Steps:
(1) The samples are generated using random Fourier features (RFFs).
(2) The samples are optimized sequentially using an optimizer.
TODO: We can generalize the GP sampling step to accommodate for other sampling
strategies rather than restricting to RFFs e.g. decoupled sampling.
TODO: Currently this defaults to random search optimization, might want to
explore some other alternatives.
Args:
model: The model. This does not support models which include fantasy
observations.
bounds: A `2 x d`-dim Tensor containing the input bounds.
num_samples: The number of GP samples.
num_points: The number of optimal points to be outputted.
optimizer: A callable that solves the deterministic optimization problem.
num_rff_features: The number of random Fourier features.
maximize: If true, we consider a maximization problem.
optimizer_kwargs: The additional arguments for the optimizer.
Returns:
A two-element tuple containing
- A `num_samples x num_points x d`-dim Tensor containing the collection of
optimal inputs.
- A `num_samples x num_points x M`-dim Tensor containing the collection of
optimal objectives.
"""
tkwargs = {"dtype": bounds.dtype, "device": bounds.device}
M = model.num_outputs
d = bounds.shape[-1]
if M == 1:
if num_points > 1:
raise UnsupportedError(
"For single-objective optimization `num_points` should be 1."
)
if optimizer_kwargs is None:
optimizer_kwargs = {}
pareto_sets = torch.zeros((num_samples, num_points, d), **tkwargs)
pareto_fronts = torch.zeros((num_samples, num_points, M), **tkwargs)
for i in range(num_samples):
sample_i = get_gp_samples(
model=model, num_outputs=M, n_samples=1, num_rff_features=num_rff_features
)
ps_i, pf_i = optimizer(
model=sample_i,
bounds=bounds,
num_points=num_points,
maximize=maximize,
**optimizer_kwargs,
)
pareto_sets[i, ...] = ps_i
pareto_fronts[i, ...] = pf_i
return pareto_sets, pareto_fronts
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Acquisition function for predictive entropy search for multi-objective Bayesian
optimization (PES). The code does not support constraint handling.
NOTE: The PES acquisition might not be differentiable. As a result, we recommend
optimizing the acquisition function using finite differences.
References:
.. [Garrido-Merchan2019]
E. Garrido-Merchan and D. Hernandez-Lobato. Predictive Entropy Search for
Multi-objective Bayesian Optimization with Constraints. Neurocomputing. 2019.
The computation follows the procedure described in the supplementary material:
https://www.sciencedirect.com/science/article/abs/pii/S0925231219308525
"""
from __future__ import annotations
from typing import Any, Optional, Tuple
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.exceptions import InputDataError
from botorch.exceptions.errors import UnsupportedError
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.utils import check_no_nans
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
from torch.distributions import Normal
class qMultiObjectivePredictiveEntropySearch(AcquisitionFunction):
r"""The acquisition function for Predictive Entropy Search. The code supports
both single and multiple objectives as well as batching.
This acquisition function approximates the mutual information between the
observation at a candidate point `X` and the Pareto optimal input using the
moment-matching procedure known as expectation propagation (EP).
See the Appendix of [Garrido-Merchan2019]_ for the description of the EP
procedure.
IMPORTANT NOTES:
(i) The PES acquisition function estimated using EP is sometimes not
differentiable, and therefore we advise using a finite-difference estimate of
the gradient as opposed to the gradients identified using automatic
differentiation, which occasionally outputs `nan` values.
The source of this differentiability is in the `_update_damping` function, which
finds the damping factor `a` that is used to update the EP parameters
`a * param_new + (1 - a) * param_old`. The damping factor has to ensure
that the updated covariance matrices, `a * cov_f_new + (1 - a) cov_f_old`, is
positive semi-definiteness. We follow the original paper, which identifies
`a` via a successive halving scheme i.e. we check `a=1` then `a=0.5` etc. This
procedure means `a` is a function of the test input `X`. This function is not
differentiable in `X`.
(ii) EP could potentially fail for a number of reasons:
(a) When the sampled Pareto optimal points `x_p` is poor compared to the
training or testing data `x_n`.
(b) When the training or testing data `x_n` is close the Pareto optimal
points `x_p`.
(c) When the convergence threshold is set too small.
Problem (a) occurs because we have to compute the variable:
`alpha = (mean(x_n) - mean(x_p)) / std(x_n - x_p)`, which becomes very
large when `x_n` is better than `x_p` with high-probability. This leads to a
log(0) error when we compute `log(1 - cdf(alpha))`. We have pre-emptively
clamped some values depending on `1`alpha` in order to mitigate this.
Problem (b) occurs because we have to compute matrix inverses for the
two-dimensional marginals (x_n, x_p). To address this we manually add jitter
to the diagonal of the covariance matrix i.e. `ep_jitter` when training and
`test_jitter` when testing. The default choice is not always appropriate
because the same jitter is used for the inversion of the covariance
and precision matrix, which are on different scales.
TODO: come up with strategy to adaptively update the jitter.
Problem (c) occurs because a smaller threshold usually means that more EP
iterations are required. Running too many EP iterations could lead to
invertibility problems such as in problem (b). Setting a larger threshold
or reducing the number of EP iterations could alleviate this.
(iii) The estimated acquisition value could be negative.
"""
def __init__(
self,
model: Model,
pareto_sets: Tensor,
maximize: bool = True,
X_pending: Optional[Tensor] = None,
max_ep_iterations: int = 250,
ep_jitter: float = 1e-4,
test_jitter: float = 1e-4,
threshold: float = 1e-2,
**kwargs: Any,
) -> None:
r"""Multi-objective predictive entropy search acquisition function.
Args:
model: A fitted batched model with `M` number of outputs.
pareto_sets: A `num_pareto_samples x P x d`-dim tensor containing the
Pareto optimal set of inputs, where `P` is the number of pareto
optimal points. The points in each sample have to be discrete
otherwise expectation propagation will fail.
maximize: If true, we consider a maximization problem.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
max_ep_iterations: The maximum number of expectation propagation
iterations. (The minimum number of iterations is set at 3.)
ep_jitter: The amount of jitter added for the matrix inversion that
occurs during the expectation propagation update during the training
phase.
test_jitter: The amount of jitter added for the matrix inversion that
occurs during the expectation propagation update in the testing
phase.
threshold: The convergence threshold for expectation propagation. This
assesses the relative change in the mean and covariance. We default
to one percent change i.e. `threshold = 1e-2`.
"""
super().__init__(model=model)
self.model = model
self.maximize = maximize
self.set_X_pending(X_pending)
if model.num_outputs > 1 or isinstance(model, ModelListGP):
train_X = self.model.train_inputs[0][0]
else:
train_X = self.model.train_inputs[0]
# Batch GP models (e.g. fantasized models) are not currently supported
if train_X.ndim > 2:
raise NotImplementedError(
"Batch GP models (e.g. fantasized models) are not supported."
)
if pareto_sets.ndim != 3 or pareto_sets.shape[-1] != train_X.shape[-1]:
raise UnsupportedError(
"The Pareto set should have a shape of "
"`num_pareto_samples x num_pareto_points x input_dim`."
)
else:
self.pareto_sets = pareto_sets
# add the pareto set to the existing training data
self.num_pareto_samples = pareto_sets.shape[0]
self.augmented_X = torch.cat(
[train_X.repeat(self.num_pareto_samples, 1, 1), self.pareto_sets], dim=-2
)
self.max_ep_iterations = max_ep_iterations
self.ep_jitter = ep_jitter
self.test_jitter = test_jitter
self.threshold = threshold
self._expectation_propagation()
def _expectation_propagation(self) -> None:
r"""Perform expectation propagation to obtain the covariance factors that
depend on the Pareto sets.
The updates are performed in the natural parameter space. For a multivariate
normal distribution with mean mu and covariance Sigma, we call Sigma^{-1}
the natural covariance and Sigma^{-1} mu the natural mean.
"""
###########################################################################
# INITIALIZATION
###########################################################################
M = self.model.num_outputs
if self.model.num_outputs > 1 or isinstance(self.model, ModelListGP):
train_X = self.model.train_inputs[0][0]
else:
train_X = self.model.train_inputs[0]
tkwargs = {"dtype": train_X.dtype, "device": train_X.device}
N = len(train_X)
num_pareto_samples = self.num_pareto_samples
P = self.pareto_sets.shape[-2]
# initialize the predictive natural mean and variances
(
pred_nat_mean,
pred_nat_cov,
pred_mean,
pred_cov,
) = _initialize_predictive_matrices(
X=self.augmented_X,
model=self.model,
observation_noise=False,
jitter=self.ep_jitter,
natural=True,
)
pred_f_mean = pred_mean[..., 0:M, :]
pred_f_nat_mean = pred_nat_mean[..., 0:M, :]
pred_f_cov = pred_cov[..., 0:M, :, :]
pred_f_nat_cov = pred_nat_cov[..., 0:M, :, :]
# initialize the marginals
# `num_pareto_samples x M x (N + P)`
mean_f = pred_f_mean.clone()
nat_mean_f = pred_f_nat_mean.clone()
# `num_pareto_samples x M x (N + P) x (N + P)`
cov_f = pred_f_cov.clone()
nat_cov_f = pred_f_nat_cov.clone()
# initialize omega the function which encodes the fact that the pareto points
# are optimal in the feasible space i.e. any point in the feasible space
# should not dominate the Pareto efficient points.
# `num_pareto_samples x M x (N + P) x P x 2`
omega_f_nat_mean = torch.zeros((num_pareto_samples, M, N + P, P, 2), **tkwargs)
# `num_pareto_samples x M x (N + P) x P x 2 x 2`
omega_f_nat_cov = torch.zeros(
(num_pareto_samples, M, N + P, P, 2, 2), **tkwargs
)
###########################################################################
# EXPECTATION PROPAGATION
###########################################################################
damping = torch.ones(num_pareto_samples, M, **tkwargs)
iteration = 0
while (torch.sum(damping) > 0) and (iteration < self.max_ep_iterations):
# Compute the new natural mean and covariance
####################################################################
# OBJECTIVE FUNCTION: OMEGA UPDATE
####################################################################
omega_f_nat_mean_new, omega_f_nat_cov_new = _safe_update_omega(
mean_f=mean_f,
cov_f=cov_f,
omega_f_nat_mean=omega_f_nat_mean,
omega_f_nat_cov=omega_f_nat_cov,
N=N,
P=P,
M=M,
maximize=self.maximize,
jitter=self.ep_jitter,
)
####################################################################
# OBJECTIVE FUNCTION: MARGINAL UPDATE
####################################################################
nat_mean_f_new, nat_cov_f_new = _update_marginals(
pred_f_nat_mean=pred_f_nat_mean,
pred_f_nat_cov=pred_f_nat_cov,
omega_f_nat_mean=omega_f_nat_mean_new,
omega_f_nat_cov=omega_f_nat_cov_new,
N=N,
P=P,
)
########################################################################
# OBJECTIVE FUNCTION: DAMPING UPDATE
########################################################################
# update damping of objectives
damping, cholesky_nat_cov_f = _update_damping(
nat_cov=nat_cov_f,
nat_cov_new=nat_cov_f_new,
damping_factor=damping,
jitter=self.ep_jitter,
)
check_no_nans(cholesky_nat_cov_f)
########################################################################
# OBJECTIVE FUNCTION: DAMPED UPDATE
########################################################################
# Damp update of omega
omega_f_nat_mean = _damped_update(
old_factor=omega_f_nat_mean,
new_factor=omega_f_nat_mean_new,
damping_factor=damping,
)
omega_f_nat_cov = _damped_update(
old_factor=omega_f_nat_cov,
new_factor=omega_f_nat_cov_new,
damping_factor=damping,
)
# update the mean and covariance
nat_mean_f = _damped_update(
old_factor=nat_mean_f, new_factor=nat_mean_f_new, damping_factor=damping
)
nat_cov_f = _damped_update(
old_factor=nat_cov_f, new_factor=nat_cov_f_new, damping_factor=damping
)
# compute cholesky inverse
cov_f_new = torch.cholesky_inverse(cholesky_nat_cov_f)
mean_f_new = torch.einsum("...ij,...j->...i", cov_f_new, nat_mean_f)
check_no_nans(cov_f_new)
########################################################################
# OBJECTIVE FUNCTION: CONVERGENCE UPDATE
########################################################################
# Set the damping to zero when the change in the mean and
# covariance is less than the threshold
damping, delta_mean_f, delta_cov_f = _update_damping_when_converged(
mean_old=mean_f,
mean_new=mean_f_new,
cov_old=cov_f,
cov_new=cov_f_new,
damping_factor=damping,
threshold=self.threshold,
iteration=iteration,
)
cov_f = cov_f_new
mean_f = mean_f_new
iteration = iteration + 1
############################################################################
# SAVE OMEGA AND PHI FACTORS
############################################################################
check_no_nans(omega_f_nat_mean)
check_no_nans(omega_f_nat_cov)
# save phi and omega for the forward
self._omega_f_nat_mean = omega_f_nat_mean
self._omega_f_nat_cov = omega_f_nat_cov
def _compute_information_gain(self, X: Tensor) -> Tensor:
r"""Evaluate qMultiObjectivePredictiveEntropySearch on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim
design points each.
Returns:
A `batch_shape'`-dim Tensor of Predictive Entropy Search values at the
given design points `X`.
"""
tkwargs = {"dtype": X.dtype, "device": X.device}
batch_shape = X.shape[0:-2]
q = X.shape[-2]
M = self.model.num_outputs
if M > 1 or isinstance(self.model, ModelListGP):
N = len(self.model.train_inputs[0][0])
else:
N = len(self.model.train_inputs[0])
P = self.pareto_sets.shape[-2]
num_pareto_samples = self.num_pareto_samples
###########################################################################
# AUGMENT X WITH THE SAMPLED PARETO SET
###########################################################################
new_shape = batch_shape + torch.Size([num_pareto_samples]) + X.shape[-2:]
expanded_X = X.unsqueeze(-3).expand(new_shape)
expanded_ps = self.pareto_sets.expand(X.shape[0:-2] + self.pareto_sets.shape)
# `batch_shape x num_pareto_samples x (q + P) x d`
aug_X = torch.cat([expanded_X, expanded_ps], dim=-2)
###########################################################################
# COMPUTE THE POSTERIORS AND OBSERVATION NOISE
###########################################################################
# compute predictive distribution without observation noise
(
pred_nat_mean,
pred_nat_cov,
pred_mean,
pred_cov,
) = _initialize_predictive_matrices(
X=aug_X,
model=self.model,
observation_noise=True,
jitter=self.test_jitter,
natural=True,
)
pred_f_mean = pred_mean[..., 0:M, :]
pred_f_nat_mean = pred_nat_mean[..., 0:M, :]
pred_f_cov = pred_cov[..., 0:M, :, :]
pred_f_nat_cov = pred_nat_cov[..., 0:M, :, :]
(_, _, _, pred_cov_noise) = _initialize_predictive_matrices(
X=aug_X,
model=self.model,
observation_noise=True,
jitter=self.test_jitter,
natural=False,
)
pred_f_cov_noise = pred_cov_noise[..., 0:M, :, :]
observation_noise = pred_f_cov_noise - pred_f_cov
###########################################################################
# INITIALIZE THE EP FACTORS
###########################################################################
# `batch_shape x num_pareto_samples x M x (q + P) x P x 2`
omega_f_nat_mean = torch.zeros(
batch_shape + torch.Size([num_pareto_samples, M, q + P, P, 2]), **tkwargs
)
# `batch_shape x num_pareto_samples x M x (q + P) x P x 2 x 2`
omega_f_nat_cov = torch.zeros(
batch_shape + torch.Size([num_pareto_samples, M, q + P, P, 2, 2]), **tkwargs
)
###########################################################################
# RUN EP ONCE
###########################################################################
# run update omega once
omega_f_nat_mean, omega_f_nat_cov = _safe_update_omega(
mean_f=pred_f_mean,
cov_f=pred_f_cov,
omega_f_nat_mean=omega_f_nat_mean,
omega_f_nat_cov=omega_f_nat_cov,
N=q,
P=P,
M=M,
maximize=self.maximize,
jitter=self.test_jitter,
)
###########################################################################
# ADD THE CACHE FACTORS BACK
###########################################################################
omega_f_nat_mean, omega_f_nat_cov = _augment_factors_with_cached_factors(
q=q,
N=N,
omega_f_nat_mean=omega_f_nat_mean,
cached_omega_f_nat_mean=self._omega_f_nat_mean,
omega_f_nat_cov=omega_f_nat_cov,
cached_omega_f_nat_cov=self._omega_f_nat_cov,
)
###########################################################################
# COMPUTE THE MARGINAL
###########################################################################
nat_mean_f, nat_cov_f = _update_marginals(
pred_f_nat_mean=pred_f_nat_mean,
pred_f_nat_cov=pred_f_nat_cov,
omega_f_nat_mean=omega_f_nat_mean,
omega_f_nat_cov=omega_f_nat_cov,
N=q,
P=P,
)
###########################################################################
# COMPUTE THE DAMPED UPDATE
###########################################################################
# # update damping of objectives
damping = torch.ones(
batch_shape + torch.Size([num_pareto_samples, M]), **tkwargs
)
damping, cholesky_nat_cov_f_new = _update_damping(
nat_cov=pred_f_nat_cov,
nat_cov_new=nat_cov_f,
damping_factor=damping,
jitter=self.test_jitter,
)
# invert matrix
cov_f_new = torch.cholesky_inverse(cholesky_nat_cov_f_new)
check_no_nans(cov_f_new)
###########################################################################
# COMPUTE THE LOG DETERMINANTS
###########################################################################
# compute the initial log determinant term
log_det_pred_f_cov_noise = _compute_log_determinant(cov=pred_f_cov_noise, q=q)
# compute the post log determinant term
log_det_cov_f = _compute_log_determinant(cov=cov_f_new + observation_noise, q=q)
###########################################################################
# COMPUTE THE ACQUISITION FUNCTION
###########################################################################
q_pes_f = log_det_pred_f_cov_noise - log_det_cov_f
check_no_nans(q_pes_f)
return 0.5 * q_pes_f
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qMultiObjectivePredictiveEntropySearch on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim
design points each.
Returns:
A `batch_shape'`-dim Tensor of acquisition values at the given design
points `X`.
"""
return self._compute_information_gain(X)
def log_cdf_robust(x: Tensor) -> Tensor:
r"""Computes the logarithm of the normal cumulative density robustly. This uses
the approximation log(1-z) ~ -z when z is small:
if x > 5:
log(cdf(x)) = log(1-cdf(-x)) approx -cdf(-x)
else:
log(cdf(x)).
Args:
x: a `x_shape`-dim Tensor.
Returns
A `x_shape`-dim Tensor.
"""
CLAMP_LB = torch.finfo(x.dtype).eps
NEG_INF = torch.finfo(x.dtype).min
normal = Normal(torch.zeros_like(x), torch.ones_like(x))
cdf_x = normal.cdf(x)
neg_cdf_neg_x = -normal.cdf(-x)
log_cdf_x = torch.where(x < 5, torch.log(cdf_x), neg_cdf_neg_x)
return log_cdf_x.clamp(NEG_INF, -CLAMP_LB)
def _initialize_predictive_matrices(
X: Tensor,
model: Model,
observation_noise: bool = True,
jitter: float = 1e-4,
natural: bool = True,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
r"""Initializes the natural predictive mean and covariance matrix. For a
multivariate normal distribution with mean mu and covariance Sigma, the natural
mean is Sigma^{-1} mu and the natural covariance is Sigma^{-1}.
Args:
X: A `batch_shape x R x d`-dim Tensor.
model: The fitted model.
observation_noise: If true, the posterior is computed with observation noise.
jitter: The jitter added to the covariance matrix.
natural: If true, we compute the natural statistics as well.
Return:
A four-element tuple containing
- pred_nat_mean: A `batch_shape x num_outputs x R `-dim Tensor containing the
predictive natural mean vectors.
- pred_nat_cov: A `batch_shape x num_outputs x R x R`-dim Tensor containing
the predictive natural covariance matrices.
- pred_mean: A `batch_shape x num_outputs x R`-dim Tensor containing the
predictive mean vectors.
- pred_cov: A `batch_shape x num_outputs x R x R`-dim Tensor containing the
predictive covariance matrices.
"""
tkwargs = {"dtype": X.dtype, "device": X.device}
# compute the predictive mean and covariances at X
posterior = model.posterior(X, observation_noise=observation_noise)
# `batch_shape x (R * num_outputs) x (R * num_outputs)`
init_pred_cov = posterior.mvn.covariance_matrix
num_outputs = model.num_outputs
R = int(init_pred_cov.shape[-1] / num_outputs)
pred_cov = [
init_pred_cov[..., (m * R) : ((m + 1) * R), (m * R) : ((m + 1) * R)].unsqueeze(
-1
)
for m in range(num_outputs)
]
# `batch_shape x R x R x num_outputs` (before swap axes)
# `batch_shape x num_outputs x R * R`
pred_cov = torch.cat(pred_cov, axis=-1).swapaxes(-2, -1).swapaxes(-3, -2)
identity = torch.diag_embed(torch.ones(pred_cov.shape[:-1], **tkwargs))
pred_cov = pred_cov + jitter * identity
# `batch_shape x num_outputs x R`
pred_mean = posterior.mean.swapaxes(-2, -1)
#############################################################
if natural:
# natural parameters
# `batch_shape x num_outputs x R x R`
cholesky_pred_cov, _ = torch.linalg.cholesky_ex(pred_cov)
pred_nat_cov = torch.cholesky_inverse(cholesky_pred_cov)
# `batch_shape x num_outputs x R`
pred_nat_mean = torch.einsum("...ij,...j->...i", pred_nat_cov, pred_mean)
return pred_nat_mean, pred_nat_cov, pred_mean, pred_cov
else:
return None, None, pred_mean, pred_cov
def _get_omega_f_contribution(
mean: Tensor, cov: Tensor, N: int, P: int, M: int
) -> Tuple[Tensor, Tensor]:
r"""Extract the mean vector and covariance matrix corresponding to the `2 x 2`
multivariate normal blocks in the objective model between the points in `X` and
the Pareto optimal set.
[There is likely a more efficient way to do this.]
Args:
mean: A `batch_shape x M x (N + P)`-dim Tensor containing the natural
mean matrix for the objectives.
cov: A `batch_shape x M x (N + P) x (N + P)`-dim Tensor containing
the natural mean matrix for the objectives.
N: The number of design points.
P: The number of Pareto optimal points.
M: The number of objectives.
Return:
A two-element tuple containing
- mean_fX_fS: A `batch_shape x M x (N + P) x P x 2`-dim Tensor containing the
means of the inputs and Pareto optimal points.
- cov_fX_fS: A `batch_shape x M x (N + P) x P x 2 x 2`-dim Tensor containing
the covariances between the inputs and Pareto optimal points.
"""
tkwargs = {"dtype": mean.dtype, "device": mean.device}
batch_shape = mean.shape[:-2]
# `batch_shape x M x (N + P) x P x 2 x 2`
cov_fX_fS = torch.zeros(batch_shape + torch.Size([M, N + P, P, 2, 2]), **tkwargs)
# `batch_shape x M x (N + P) x P x 2`
mean_fX_fS = torch.zeros(batch_shape + torch.Size([M, N + P, P, 2]), **tkwargs)
# `batch_shape x M x (N + P) x P`
mean_fX_fS[..., 0] = mean.unsqueeze(-1).expand(mean.shape + torch.Size([P]))
# `batch_shape x M x (N + P) x P`
mean_fX_fS[..., 1] = (
mean[..., N:].unsqueeze(-2).expand(mean.shape + torch.Size([P]))
)
# `batch_shape x M x (N + P) x P`
cov_fX_fS[..., 0, 0] = (
cov[..., range(N + P), range(N + P)]
.unsqueeze(-1)
.expand(batch_shape + torch.Size([M, N + P, P]))
)
# `batch_shape x M x (N + P) x P`
cov_fX_fS[..., 1, 1] = (
cov[..., range(N, N + P), range(N, N + P)]
.unsqueeze(-2)
.expand(batch_shape + torch.Size([M, N + P, P]))
)
for p in range(P):
# `batch_shape x M x (N + P)`
cov_p = cov[..., range(N + P), N + p]
cov_fX_fS[..., p, 0, 1] = cov_p
cov_fX_fS[..., p, 1, 0] = cov_p
return mean_fX_fS, cov_fX_fS
def _replace_pareto_diagonal(A: Tensor) -> Tensor:
"""Replace the pareto diagonal with identity matricx.
The Pareto diagonal of the omega factor shouldn't be updated because does not
contribute anything: `omega(x_p, x_p) = 1` for any pareto optimal input `x_p`.
Args:
A: a `batch_shape x M x (N + P) x P x 2 x 2`-dim Tensor.
Returns:
A `batch_shape x M x (N + P) x P x 2 x 2`-dim Tensor, where the Pareto
diagonal is padded with identity matrices.
"""
tkwargs = {"dtype": A.dtype, "device": A.device}
batch_shape = A.shape[:-5]
P = A.shape[-3]
N = A.shape[-4] - P
M = A.shape[-5]
identity = torch.diag_embed(torch.ones(batch_shape + torch.Size([M, 2]), **tkwargs))
for p in range(P):
A[..., N + p, p, :, :] = identity
return A
def _update_omega(
mean_f: Tensor,
cov_f: Tensor,
omega_f_nat_mean: Tensor,
omega_f_nat_cov: Tensor,
N: int,
P: int,
M: int,
maximize: bool = True,
jitter: float = 1e-6,
) -> Tuple[Tensor, Tensor]:
r"""Computes the new omega factors by matching the moments.
Args:
mean_f: A `batch_shape x M x (N + P)`-dim Tensor containing the mean vector
for the objectives.
cov_f: A `batch_shape x M x (N + P) x (N + P)`-dim Tensor containing the
covariance matrix for the objectives.
omega_f_nat_mean: A `batch_shape x M x (N + P) x P x 2`-dim Tensor containing
the omega natural mean factors for the objective matrix.
omega_f_nat_cov: A `batch_shape x M x (N + P) x P x 2 x 2`-dim Tensor
containing the omega natural covariance factors for the objective matrix.
N: The number of design points.
M: The number of Pareto optimal points.
M: The number of objectives.
maximize: If true, we consider the Pareto maximum domination relation.
jitter: The jitter for the matrix inverse.
Return:
A two-element tuple containing
- omega_f_nat_mean_new: A `batch_shape x M x (N + P) x P x 2` containing the
new omega natural mean factors for the objective matrix.
- omega_f_nat_cov_new: A `batch_shape x M x (N + P) x P x 2 x 2` containing
the new omega natural covariance factors for the objective matrix.
"""
tkwargs = {"dtype": mean_f.dtype, "device": mean_f.device}
CLAMP_LB = torch.finfo(tkwargs["dtype"]).eps
NEG_INF = torch.finfo(tkwargs["dtype"]).min
weight = 1.0 if maximize else -1.0
###############################################################################
# EXTRACT THE NECESSARY COMPONENTS
###############################################################################
# `batch_shape x M x (N + P) x P x 2`-dim mean
# `batch_shape x M x (N + P) x P x 2 x 2`-dim covariance
mean_fX_fS, cov_fX_fS = _get_omega_f_contribution(mean_f, cov_f, N, P, M)
identity = torch.diag_embed(torch.ones(cov_fX_fS.shape[:-1], **tkwargs))
# remove the Pareto diagonal
cov_fX_fS = _replace_pareto_diagonal(cov_fX_fS + jitter * identity)
nat_cov_fX_fS = torch.inverse(cov_fX_fS)
nat_mean_fX_fS = torch.einsum("...ij,...j->...i", nat_cov_fX_fS, mean_fX_fS)
###############################################################################
# COMPUTE THE CAVITIES
###############################################################################
# cavity distribution
# natural parameters
cav_nat_mean_f = nat_mean_fX_fS - omega_f_nat_mean
cav_nat_cov_f = nat_cov_fX_fS - omega_f_nat_cov
# transform to standard parameters
# remove the Pareto diagonal
cav_nat_cov_f = _replace_pareto_diagonal(cav_nat_cov_f)
identity = torch.diag_embed(torch.ones(cav_nat_cov_f.shape[:-1], **tkwargs))
cav_cov_f = torch.inverse(cav_nat_cov_f + jitter * identity)
cav_mean_f = torch.einsum("...ij,...j->...i", cav_cov_f, cav_nat_mean_f)
###############################################################################
# COMPUTE THE NORMALIZATION CONSTANT
###############################################################################
# `batch_shape x M x (N + P) x P`
# Equation 29
cav_var_fX_minus_fS = (
cav_cov_f[..., 0, 0] + cav_cov_f[..., 1, 1] - 2 * cav_cov_f[..., 0, 1]
).clamp_min(CLAMP_LB)
cav_std_fX_minus_fS = torch.sqrt(cav_var_fX_minus_fS).clamp_min(CLAMP_LB)
# `batch_shape x M x (N + P) x P`
cav_mean_fX_minus_fS = weight * (cav_mean_f[..., 0] - cav_mean_f[..., 1])
# Equation 30
cav_alpha = cav_mean_fX_minus_fS / cav_std_fX_minus_fS
# compute alpha pdf and cdf
normal_alpha = Normal(torch.zeros_like(cav_alpha), torch.ones_like(cav_alpha))
# `batch_shape x M x (N + P) x P`
cav_alpha_log_cdf = log_cdf_robust(cav_alpha)
# `batch_shape x M x (N + P) x P`
cav_alpha_log_pdf = normal_alpha.log_prob(cav_alpha).clamp_min(NEG_INF)
# `batch_shape x (N + P) x P`
cav_sum_alpha_log_cdf = torch.sum(cav_alpha_log_cdf, dim=-3).clamp_min(NEG_INF)
# compute normalization constant Z
# Equation 35
cav_log_zeta = torch.log1p(-torch.exp(cav_sum_alpha_log_cdf)).clamp_min(NEG_INF)
# Need to clamp log values to prevent `exp(-inf) = nan`
cav_logZ = cav_log_zeta
# Equation 40 [first bit]
# `batch_shape x (N + P) x P`
cav_log_rho = -cav_logZ + cav_sum_alpha_log_cdf
# Equation 40 [second bit]
# `batch_shape x M x (N + P) x P`
cav_log_rho = cav_log_rho.unsqueeze(-3) - cav_alpha_log_cdf + cav_alpha_log_pdf
cav_rho = -torch.exp(cav_log_rho).clamp(NEG_INF, -NEG_INF)
###############################################################################
# COMPUTE THE PARTIAL DERIVATIVES
###############################################################################
# `batch_shape x M x (N + P) x P x 2`
# Final vector: `[1, -1]`
ones_mean = torch.ones(cav_mean_f.shape, **tkwargs)
ones_mean[..., 1] = -ones_mean[..., 1]
# `batch_shape x M x (N + P) x P x 2 x 2`
# Final matrix: `[[1, -1], [-1, 1]]`
ones_cov = torch.ones(cav_cov_f.shape, **tkwargs)
ones_cov[..., 0, 1] = -ones_cov[..., 0, 1]
ones_cov[..., 1, 0] = -ones_cov[..., 1, 0]
# first partial derivation of the log Z with respect to the mean
# assuming maximization (this is also where the sign will change)
# Equation 41
cav_dlogZ_dm = cav_rho / cav_std_fX_minus_fS
cav_dlogZ_dm = weight * cav_dlogZ_dm.unsqueeze(-1) * ones_mean
# second partial derivation of the log Z with respect to the mean
# Equation 42
cav_d2logZ_dm2 = -cav_rho * (cav_rho + cav_alpha) / cav_var_fX_minus_fS
cav_d2logZ_dm2 = cav_d2logZ_dm2.unsqueeze(-1).unsqueeze(-1) * ones_cov
###############################################################################
# COMPUTE THE NEW MEAN AND COVARIANCE
###############################################################################
# compute the new mean and covariance
cav_updated_mean_f = cav_mean_f + torch.einsum(
"...ij,...j->...i", cav_cov_f, cav_dlogZ_dm
)
cav_updated_cov_f = cav_cov_f + torch.einsum(
"...ij,...jk,...kl->...il", cav_cov_f, cav_d2logZ_dm2, cav_cov_f
)
# transform to natural parameters
# remove the Pareto diagonal
cav_updated_cov_f = _replace_pareto_diagonal(cav_updated_cov_f)
identity = torch.diag_embed(torch.ones(cav_updated_cov_f.shape[:-1], **tkwargs))
cav_updated_nat_cov_f = torch.inverse(cav_updated_cov_f + jitter * identity)
cav_updated_nat_mean_f = torch.einsum(
"...ij,...j->...i", cav_updated_nat_cov_f, cav_updated_mean_f
)
# match the moments to compute the gain
omega_f_nat_mean_new = cav_updated_nat_mean_f - cav_nat_mean_f
omega_f_nat_cov_new = cav_updated_nat_cov_f - cav_nat_cov_f
# it is also possible to calculate the update directly as in the original paper:
# identity = torch.diag_embed(torch.ones(cav_d2logZ_dm2.shape[:-1], **tkwargs))
# denominator = torch.inverse(cav_cov_f @ cav_d2logZ_dm2 + identity)
# omega_f_nat_cov_new = - cav_d2logZ_dm2 @ denominator
# omega_f_nat_mean_new = torch.einsum(
# '...ij,...j->...i', denominator,
# cav_dlogZ_dm - torch.einsum('...ij,...j->...i', cav_d2logZ_dm2, cav_mean_f)
# )
return omega_f_nat_mean_new, omega_f_nat_cov_new
def _safe_update_omega(
mean_f: Tensor,
cov_f: Tensor,
omega_f_nat_mean: Tensor,
omega_f_nat_cov: Tensor,
N: int,
P: int,
M: int,
maximize: bool = True,
jitter: float = 1e-6,
) -> Tuple[Tensor, Tensor]:
r"""Try to update the new omega factors by matching the moments. If the update
is not possible then this returns the initial omega factors.
Args:
mean_f: A `batch_shape x M x (N + P)`-dim Tensor containing the mean vector
for the objectives.
cov_f: A `batch_shape x M x (N + P) x (N + P)`-dim Tensor containing the
covariance matrix for the objectives.
omega_f_nat_mean: A `batch_shape x M x (N + P) x P x 2`-dim Tensor containing
the omega natural mean factors for the objective matrix.
omega_f_nat_cov: A `batch_shape x M x (N + P) x P x 2 x 2`-dim Tensor
containing the omega natural covariance factors for the objective
matrix.
N: The number of design points.
M: The number of Pareto optimal points.
M: The number of objectives.
maximize: If true, we consider the Pareto maximum domination relation.
jitter: The jitter for the matrix inverse.
Return:
A two-element tuple containing
- omega_f_nat_mean_new: A `batch_shape x M x (N + P) x P x 2` containing the
new omega natural mean factors for the objective matrix.
- omega_f_nat_cov_new: A `batch_shape x M x (N + P) x P x 2 x 2` containing
the new omega natural covariance factors for the objective matrix.
"""
try:
omega_f_nat_mean_new, omega_f_nat_cov_new = _update_omega(
mean_f=mean_f,
cov_f=cov_f,
omega_f_nat_mean=omega_f_nat_mean,
omega_f_nat_cov=omega_f_nat_cov,
N=N,
P=P,
M=M,
maximize=maximize,
jitter=jitter,
)
check_no_nans(omega_f_nat_mean_new)
check_no_nans(omega_f_nat_cov_new)
return omega_f_nat_mean_new, omega_f_nat_cov_new
except RuntimeError or InputDataError:
return omega_f_nat_mean, omega_f_nat_cov
def _update_marginals(
pred_f_nat_mean: Tensor,
pred_f_nat_cov: Tensor,
omega_f_nat_mean: Tensor,
omega_f_nat_cov: Tensor,
N: int,
P: int,
) -> Tuple[Tensor, Tensor]:
r"""Computes the new marginal by summing up all the natural factors.
Args:
pred_f_nat_mean: A `batch_shape x M x (N + P)`-dim Tensor containing the
natural predictive mean matrix for the objectives.
pred_f_nat_cov: A `batch_shape x M x (N + P) x (N + P)`-dim Tensor containing
the natural predictive covariance matrix for the objectives.
omega_f_nat_mean: A `batch_shape x M x (N + P) x P x 2`-dim Tensor containing
the omega natural mean factors for the objective matrix.
omega_f_nat_cov: A `batch_shape x M x (N + P) x P x 2 x 2`-dim Tensor
containing the omega natural covariance factors for the objective matrix.
N: The number of design points.
P: The number of Pareto optimal points.
Returns:
A two-element tuple containing
- nat_mean_f: A `batch_shape x M x (N + P)`-dim Tensor containing the updated
natural mean matrix for the objectives.
- nat_cov_f: A `batch_shape x M x (N + P) x (N + P)`-dim Tensor containing
the updated natural predictive covariance matrix for the objectives.
"""
# `batch_shape x M x (N + P)`
nat_mean_f = pred_f_nat_mean.clone()
# `batch_shape x M x (N + P) x (N + P)
nat_cov_f = pred_f_nat_cov.clone()
################################################################################
# UPDATE THE OBJECTIVES
################################################################################
# remove Pareto diagonal
# zero out the diagonal
omega_f_nat_mean[..., range(N, N + P), range(P), :] = 0
omega_f_nat_cov[..., range(N, N + P), range(P), :, :] = 0
# `batch_shape x M x (N + P)`
# sum over the pareto dim
nat_mean_f = nat_mean_f + omega_f_nat_mean[..., 0].sum(dim=-1)
# `batch_shape x M x P`
# sum over the data dim
nat_mean_f[..., N:] = nat_mean_f[..., N:] + omega_f_nat_mean[..., 1].sum(dim=-2)
# `batch_shape x M x (N + P)`
nat_cov_f[..., range(N + P), range(N + P)] = nat_cov_f[
..., range(N + P), range(N + P)
] + omega_f_nat_cov[..., 0, 0].sum(dim=-1)
# `batch_shape x M x P`
nat_cov_f[..., range(N, N + P), range(N, N + P)] = nat_cov_f[
..., range(N, N + P), range(N, N + P)
] + omega_f_nat_cov[..., 1, 1].sum(dim=-2)
for p in range(P):
# `batch_shape x M x (N + P)`
nat_cov_f[..., range(N + P), N + p] = (
nat_cov_f[..., range(N + P), N + p] + omega_f_nat_cov[..., p, 0, 1]
)
# `batch_shape x M x (N + P)`
nat_cov_f[..., N + p, range(N + P)] = (
nat_cov_f[..., N + p, range(N + P)] + omega_f_nat_cov[..., p, 1, 0]
)
return nat_mean_f, nat_cov_f
def _damped_update(
old_factor: Tensor,
new_factor: Tensor,
damping_factor: Tensor,
) -> Tensor:
r"""Computes the damped updated for natural factor.
Args:
old_factor: A `batch_shape x param_shape`-dim Tensor containing the old
natural factor.
new_factor: A `batch_shape x param_shape`-dim Tensor containing the new
natural factor.
damping_factor: A `batch_shape`-dim Tensor containing the damping factor.
Returns:
A `batch_shape x param_shape`-dim Tensor containing the updated natural
factor.
"""
bs = damping_factor.shape
fs = old_factor.shape
df = damping_factor
for _ in range(len(fs[len(bs) :])):
df = df.unsqueeze(-1)
return df * new_factor + (1 - df) * old_factor
def _update_damping(
nat_cov: Tensor,
nat_cov_new: Tensor,
damping_factor: Tensor,
jitter: Tensor,
) -> Tuple[Tensor, Tensor]:
r"""Updates the damping factor whilst ensuring the covariance matrix is positive
definite by trying a Cholesky decomposition.
Args:
nat_cov: A `batch_shape x R x R`-dim Tensor containing the old natural
covariance matrix.
nat_cov_new: A `batch_shape x R x R`-dim Tensor containing the new natural
covariance matrix.
damping_factor: A`batch_shape`-dim Tensor containing the damping factor.
jitter: The amount of jitter added before matrix inversion.
Returns:
A two-element tuple containing
- A `batch_shape x param_shape`-dim Tensor containing the updated damping
factor.
- A `batch_shape x R x R`-dim Tensor containing the Cholesky factor.
"""
tkwargs = {"dtype": nat_cov.dtype, "device": nat_cov.device}
df = damping_factor
jitter = jitter * torch.diag_embed(torch.ones(nat_cov.shape[:-1], **tkwargs))
_, info = torch.linalg.cholesky_ex(nat_cov + jitter)
if torch.sum(info) > 1:
raise ValueError(
"The previous covariance is not positive semi-definite. "
"This usually happens if the predictive covariance is "
"ill-conditioned and the added jitter is insufficient."
)
damped_nat_cov = _damped_update(
old_factor=nat_cov, new_factor=nat_cov_new, damping_factor=df
)
cholesky_factor, info = torch.linalg.cholesky_ex(damped_nat_cov)
contains_nans = torch.any(torch.isnan(cholesky_factor)).item()
run = 0
while torch.sum(info) > 1 or contains_nans:
# propose an alternate damping factor which is half the original
df_alt = 0.5 * df
# hard threshold at 1e-3
df_alt = torch.where(
df_alt > 1e-3, df_alt, torch.zeros(df_alt.shape, **tkwargs)
)
# only change the damping factor where psd failure occurs
df_new = torch.where(info == 0, df, df_alt)
# new damped covariance
damped_nat_cov = _damped_update(nat_cov, nat_cov_new, df_new)
# try cholesky decomposition
cholesky_factor, info = torch.linalg.cholesky_ex(damped_nat_cov + jitter)
contains_nans = torch.any(torch.isnan(cholesky_factor)).item()
df = df_new
run = run + 1
return df, cholesky_factor
def _update_damping_when_converged(
mean_old: Tensor,
mean_new: Tensor,
cov_old: Tensor,
cov_new: Tensor,
damping_factor: Tensor,
iteration: Tensor,
threshold: float = 1e-3,
) -> Tensor:
r"""Set the damping factor to 0 once converged. Convergence is determined by the
relative change in the entries of the mean and covariance matrix.
Args:
mean_old: A `batch_shape x R`-dim Tensor containing the old natural mean
matrix for the objective.
mean_new: A `batch_shape x R`-dim Tensor containing the new natural mean
matrix for the objective.
cov_old: A `batch_shape x R x R`-dim Tensor containing the old natural
covariance matrix for the objective.
cov_new: A `batch_shape x R x R`-dim Tensor containing the new natural
covariance matrix for the objective.
iteration: The current iteration number
damping_factor: A `batch_shape`-dim Tensor containing the damping factor.
Returns:
A `batch_shape x param_shape`-dim Tensor containing the updated damping
factor.
"""
df = damping_factor.clone()
delta_mean = mean_new - mean_old
delta_cov = cov_new - cov_old
am = torch.amax(abs(mean_old), dim=-1)
ac = torch.amax(abs(cov_old), dim=(-2, -1))
if iteration > 2:
mask_mean = torch.amax(abs(delta_mean), dim=-1) < threshold * am
mask_cov = torch.amax(abs(delta_cov), dim=(-2, -1)) < threshold * ac
mask = torch.logical_and(mask_mean, mask_cov)
df[mask] = 0
return df, delta_mean, delta_cov
def _augment_factors_with_cached_factors(
q: int,
N: int,
omega_f_nat_mean: Tensor,
cached_omega_f_nat_mean: Tensor,
omega_f_nat_cov: Tensor,
cached_omega_f_nat_cov: Tensor,
) -> Tuple[Tensor, Tensor]:
r"""Incorporate the cached Pareto updated factors in the forward call and
augment them with the previously computed factors.
Args:
q: The batch size.
N: The number of training points.
omega_f_nat_mean: A `batch_shape x num_pareto_samples x M x (q + P) x P x 2`
-dim Tensor containing the omega natural mean for the objective at `X`.
cached_omega_f_nat_mean: A `num_pareto_samples x M x (N + P) x P x 2`-dim
Tensor containing the omega natural mean for the objective at `X`.
omega_f_nat_cov: A `batch_shape x num_pareto_samples x M x (q + P) x P x 2
x 2` -dim Tensor containing the omega natural covariance for the
objective at `X`.
cached_omega_f_nat_cov: A `num_pareto_samples x M x (N + P) x P x 2 x 2`-dim
Tensor containing the omega covariance mean for the objective at `X`.
Returns:
A two-element tuple containing
- omega_f_nat_mean_new: A `batch_shape x num_pareto_samples x M x (q + P)
x P x 2`-dim Tensor containing the omega natural mean for the objective
at `X`.
- omega_f_nat_cov_new: A `batch_shape x num_pareto_samples x M x (q + P) x
P x 2 x 2`-dim Tensor containing the omega natural covariance for the
objective at `X`.
"""
##############################################################################
# omega_f_nat_mean
##############################################################################
# retrieve the natural mean contribution of the Pareto block omega(x_p, x_p) for
# the objective
exp_cached_omega_f_nat_mean = cached_omega_f_nat_mean[..., N:, :, :].expand(
omega_f_nat_mean[..., q:, :, :].shape
)
omega_f_nat_mean[..., q:, :, :] = exp_cached_omega_f_nat_mean
##############################################################################
# omega_f_nat_cov
##############################################################################
# retrieve the natural covariance contribution of the Pareto block
# omega(x_p, x_p) for the objective
exp_omega_f_nat_cov = cached_omega_f_nat_cov[..., N:, :, :, :].expand(
omega_f_nat_cov[..., q:, :, :, :].shape
)
omega_f_nat_cov[..., q:, :, :, :] = exp_omega_f_nat_cov
return omega_f_nat_mean, omega_f_nat_cov
def _compute_log_determinant(cov: Tensor, q: int) -> Tensor:
r"""Computes the sum of the log determinants of a block diagonal covariance
matrices averaged over the Pareto samples.
Args:
cov: A `batch_shape x num_pareto_samples x num_outputs x (q + P) x (q + P)`
-dim Tensor containing the covariance matrices.
q: The batch size.
Return:
log_det_cov: A `batch_shape`-dim Tensor containing the sum of the
determinants for each output.
"""
log_det_cov = torch.logdet(cov[..., 0:q, 0:q])
check_no_nans(log_det_cov)
return log_det_cov.sum(dim=-1).mean(dim=-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-output extensions of the risk measures, implemented as Monte-Carlo
objectives. Except for MVaR, the risk measures are computed over each
output dimension independently. In contrast, MVaR is computed using the
joint distribution of the outputs, and provides more accurate risk estimates.
References
.. [Prekopa2012MVaR]
A. Prekopa. Multivariate value at risk and related topics.
Annals of Operations Research, 2012.
.. [Cousin2013MVaR]
A. Cousin and E. Di Bernardino. On multivariate extensions of Value-at-Risk.
Journal of Multivariate Analysis, 2013.
.. [Daulton2022MARS]
S. Daulton, S, Cakmak, M. Balandat, M. Osborne, E. Zhou, and E. Bakshy.
Robust multi-objective Bayesian optimization under input noise.
Proceedings of the 39th International Conference on Machine Learning, 2022.
"""
import warnings
from abc import ABC, abstractmethod
from math import ceil
from typing import Callable, List, Optional, Union
import torch
from botorch.acquisition.multi_objective.objective import (
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
)
from botorch.acquisition.risk_measures import CVaR, RiskMeasureMCObjective, VaR
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.model import Model
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.transforms import normalize
from torch import Tensor
class MultiOutputRiskMeasureMCObjective(
RiskMeasureMCObjective, MCMultiOutputObjective, ABC
):
r"""Objective transforming the multi-output posterior samples to samples
of a multi-output risk measure.
The risk measure is calculated over joint q-batch samples from the posterior.
If the q-batch includes samples corresponding to multiple inputs, it is assumed
that first `n_w` samples correspond to first input, second `n_w` samples
correspond to second input, etc.
:meta private:
"""
def __init__(
self,
n_w: int,
preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None,
) -> None:
r"""Transform the posterior samples to samples of a risk measure.
Args:
n_w: The size of the `w_set` to calculate the risk measure over.
preprocessing_function: A preprocessing function to apply to the
samples before computing the risk measure. This can be used to
remove non-objective outcomes or to align all outcomes for
maximization. For constrained optimization, this should also
apply feasibility-weighting to samples. Given a `batch x m`-dim
tensor of samples, this should return a `batch x m'`-dim tensor.
"""
super().__init__(n_w=n_w, preprocessing_function=preprocessing_function)
def _prepare_samples(self, samples: Tensor) -> Tensor:
r"""Prepare samples for risk measure calculations by scaling and
separating out the q-batch dimension.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
Returns:
A `sample_shape x batch_shape x q x n_w x m'`-dim tensor of
prepared samples.
"""
samples = self.preprocessing_function(samples)
return samples.view(*samples.shape[:-2], -1, self.n_w, samples.shape[-1])
@abstractmethod
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the risk measure corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q x m'`-dim tensor of risk measure samples.
"""
pass # pragma: no cover
class MultiOutputExpectation(MultiOutputRiskMeasureMCObjective):
r"""A multi-output MC expectation risk measure.
For unconstrained problems, we recommend using the `ExpectationPosteriorTransform`
instead. `ExpectationPosteriorTransform` directly transforms the posterior
distribution over `q * n_w` to a posterior of `q` expectations, significantly
reducing the cost of posterior sampling as a result.
"""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the expectation of the given samples. Expectation is
calculated over each `n_w` samples in the q-batch dimension.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q x m'`-dim tensor of expectation samples.
"""
prepared_samples = self._prepare_samples(samples)
return prepared_samples.mean(dim=-2)
class IndependentCVaR(CVaR, MultiOutputRiskMeasureMCObjective):
r"""The multi-output Conditional Value-at-Risk risk measure that operates on
each output dimension independently. Since this does not consider the joint
distribution of the outputs (i.e., that the outputs were evaluated on same
perturbed input and are not independent), the risk estimates provided by
`IndependentCVaR` in general are more optimistic than the definition of CVaR
would suggest.
The Conditional Value-at-Risk measures the expectation of the worst outcomes
(small rewards or large losses) with a total probability of `1 - alpha`. It
is commonly defined as the conditional expectation of the reward function,
with the condition that the reward is smaller than the corresponding
Value-at-Risk (also defined below).
NOTE: Due to the use of a discrete `w_set` of samples, the VaR and CVaR
calculated here are (possibly biased) Monte-Carlo approximations of the
true risk measures.
"""
def _get_sorted_prepared_samples(self, samples: Tensor) -> Tensor:
r"""Get the prepared samples that are sorted over the `n_w` dimension.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
Returns:
A `sample_shape x batch_shape x q x n_w x m'`-dim tensor of sorted samples.
"""
prepared_samples = self._prepare_samples(samples)
return prepared_samples.sort(dim=-2, descending=True).values
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the CVaR corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q x m'`-dim tensor of CVaR samples.
"""
sorted_samples = self._get_sorted_prepared_samples(samples)
return sorted_samples[..., self.alpha_idx :, :].mean(dim=-2)
class IndependentVaR(IndependentCVaR):
r"""The multi-output Value-at-Risk risk measure that operates on each output
dimension independently. For the same reasons as `IndependentCVaR`, the risk
estimates provided by this are in general more optimistic than the definition
of VaR would suggest.
Value-at-Risk measures the smallest possible reward (or largest possible loss)
after excluding the worst outcomes with a total probability of `1 - alpha`. It
is commonly used in financial risk management, and it corresponds to the
`1 - alpha` quantile of a given random variable.
"""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the VaR corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q x m'`-dim tensor of VaR samples.
"""
sorted_samples = self._get_sorted_prepared_samples(samples)
return sorted_samples[..., self.alpha_idx, :]
class MultiOutputWorstCase(MultiOutputRiskMeasureMCObjective):
r"""The multi-output worst-case risk measure."""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the worst-case measure corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q x m'`-dim tensor of worst-case samples.
"""
prepared_samples = self._prepare_samples(samples)
return prepared_samples.min(dim=-2).values
class MVaR(MultiOutputRiskMeasureMCObjective):
r"""The multivariate Value-at-Risk as introduced in [Prekopa2012MVaR]_.
MVaR is defined as the non-dominated set of points in the extended domain
of the random variable that have multivariate CDF greater than or equal to
`alpha`. Note that MVaR is set valued and the size of the set depends on the
particular realizations of the random variable. [Cousin2013MVaR]_ instead
propose to use the expectation of the set-valued MVaR as the multivariate
VaR. We support this alternative with an `expectation` flag.
"""
_verify_output_shape = False
def __init__(
self,
n_w: int,
alpha: float,
expectation: bool = False,
preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None,
*,
pad_to_n_w: bool = False,
filter_dominated: bool = True,
) -> None:
r"""The multivariate Value-at-Risk.
Args:
n_w: The size of the `w_set` to calculate the risk measure over.
alpha: The risk level of MVaR, float in `(0.0, 1.0]`. Each MVaR value
dominates `alpha` fraction of all observations.
expectation: If True, returns the expectation of the MVaR set as is
done in [Cousin2013MVaR]_. Otherwise, it returns the union of all
values in the MVaR set. Default: False.
preprocessing_function: A preprocessing function to apply to the
samples before computing the risk measure. This can be used to
remove non-objective outcomes or to align all outcomes for
maximization. For constrained optimization, this should also
apply feasibility-weighting to samples. Given a `batch x m`-dim
tensor of samples, this should return a `batch x m'`-dim tensor.
pad_to_n_w: If True, instead of padding up to `k'`, which is the size of
the largest MVaR set across all batches, we pad the MVaR set up to
`n_w`. This produces a return tensor of known size, however, it may
in general be much larger than the alternative. See `forward` for
more details on the return shape.
NOTE: this is only relevant if `expectation=False`.
filter_dominated: If True, returns the non-dominated subset of
alpha level points (this is MVaR as defined by [Prekopa2012MVaR]_).
Disabling this will make it faster, and may be preferable if
the dominated points will be filtered out later, e.g., while
calculating the hypervolume. Disabling this is not recommended
if `expectation=True`.
"""
super().__init__(n_w=n_w, preprocessing_function=preprocessing_function)
if not 0 < alpha <= 1:
raise ValueError("`alpha` must be in (0.0, 1.0]")
self.alpha = alpha
self.expectation = expectation
self.pad_to_n_w = pad_to_n_w
self.filter_dominated = filter_dominated
def get_mvar_set_cpu(self, Y: Tensor) -> Tensor:
r"""Find MVaR set based on the definition in [Prekopa2012MVaR]_.
NOTE: This is much faster on CPU for large `n_w` than the alternative but it
is significantly slower on GPU. Based on empirical evidence, this is recommended
when running on CPU with `n_w > 64`.
This first calculates the CDF for each point on the extended domain of the
random variable (the grid defined by the given samples), then takes the
values with CDF equal to (rounded if necessary) `alpha`. The non-dominated
subset of these form the MVaR set.
Args:
Y: A `batch x n_w x m`-dim tensor of outcomes. This is currently
restricted to `m = 2` objectives.
TODO: Support `m > 2` objectives.
Returns:
A `batch` length list of `k x m`-dim tensor of MVaR values, where `k`
depends on the corresponding batch inputs. Note that MVaR values in general
are not in-sample points.
"""
if Y.dim() == 3:
return [self.get_mvar_set_cpu(y_) for y_ in Y]
m = Y.shape[-1]
if m != 2: # pragma: no cover
raise ValueError("`get_mvar_set_cpu` only supports `m=2` outcomes!")
# Generate sets of all unique values in each output dimension.
# Note that points in MVaR are bounded from above by the
# independent VaR of each objective. Hence, we only need to
# consider the unique outcomes that are less than or equal to
# the VaR of the independent objectives
var_alpha_idx = ceil(self.alpha * self.n_w) - 1
Y_sorted = Y.topk(Y.shape[0] - var_alpha_idx, dim=0, largest=False).values
unique_outcomes_list = [
Y_sorted[:, i].unique().tolist()[::-1] for i in range(m)
]
# Convert this into a list of m dictionaries mapping values to indices.
unique_outcomes = [
dict(zip(outcomes, range(len(outcomes))))
for outcomes in unique_outcomes_list
]
# Initialize a tensor counting the number of points in Y that a given grid point
# is dominated by. This will essentially be a non-normalized CDF.
counter_tensor = torch.zeros(
[len(outcomes) for outcomes in unique_outcomes],
dtype=torch.long,
device=Y.device,
)
# populate the tensor, counting the dominated points.
# we only need to consider points in Y where at least one
# objective is less than the max objective value in
# unique_outcomes_list
max_vals = torch.tensor(
[o[0] for o in unique_outcomes_list], dtype=Y.dtype, device=Y.device
)
mask = (Y < max_vals).any(dim=-1)
counter_tensor += self.n_w - mask.sum()
Y_pruned = Y[mask]
for y_ in Y_pruned:
starting_idcs = [unique_outcomes[i].get(y_[i].item(), 0) for i in range(m)]
counter_tensor[starting_idcs[0] :, starting_idcs[1] :] += 1
# Get the count alpha-level points should have.
alpha_count = ceil(self.alpha * self.n_w)
# Get the alpha level indices.
alpha_level_indices = (counter_tensor == alpha_count).nonzero(as_tuple=False)
# If there are no exact alpha level points, get the smallest alpha' > alpha
# and find the corresponding alpha level indices.
if alpha_level_indices.numel() == 0:
min_greater_than_alpha = counter_tensor[counter_tensor > alpha_count].min()
alpha_level_indices = (counter_tensor == min_greater_than_alpha).nonzero(
as_tuple=False
)
unique_outcomes = [
torch.as_tensor(list(outcomes.keys()), device=Y.device, dtype=Y.dtype)
for outcomes in unique_outcomes
]
alpha_level_points = torch.stack(
[
unique_outcomes[i][alpha_level_indices[:, i]]
for i in range(len(unique_outcomes))
],
dim=-1,
)
# MVaR is simply the non-dominated subset of alpha level points.
if self.filter_dominated:
mask = is_non_dominated(alpha_level_points)
mvar = alpha_level_points[mask]
else:
mvar = alpha_level_points
return mvar
def get_mvar_set_gpu(self, Y: Tensor) -> Tensor:
r"""Find MVaR set based on the definition in [Prekopa2012MVaR]_.
NOTE: This is much faster on GPU than the alternative but it scales very poorly
on CPU as `n_w` increases. This should be preferred if a GPU is available or
when `n_w <= 64`. In addition, this supports `m >= 2` outcomes (vs `m = 2` for
the CPU version) and it should be used if `m > 2`.
This first calculates the CDF for each point on the extended domain of the
random variable (the grid defined by the given samples), then takes the
values with CDF equal to (rounded if necessary) `alpha`. The non-dominated
subset of these form the MVaR set.
Args:
Y: A `batch x n_w x m`-dim tensor of observations.
Returns:
A `batch` length list of `k x m`-dim tensor of MVaR values, where `k`
depends on the corresponding batch inputs. Note that MVaR values in general
are not in-sample points.
"""
if Y.dim() == 2:
Y = Y.unsqueeze(0)
batch, m = Y.shape[0], Y.shape[-1]
# Note that points in MVaR are bounded from above by the
# independent VaR of each objective. Hence, we only need to
# consider the unique outcomes that are less than or equal to
# the VaR of the independent objectives
var_alpha_idx = ceil(self.alpha * self.n_w) - 1
n_points = Y.shape[-2] - var_alpha_idx
Y_sorted = Y.topk(n_points, dim=-2, largest=False).values
# `y_grid` is the grid formed by all inputs in each batch.
if m == 2:
# This is significantly faster but only works with m=2.
y_grid = torch.stack(
[
Y_sorted[..., 0].repeat_interleave(repeats=n_points, dim=-1),
Y_sorted[..., 1].repeat(1, n_points),
],
dim=-1,
)
else:
y_grid = torch.stack(
[
torch.stack(
torch.meshgrid(
[Y_sorted[b, :, i] for i in range(m)], indexing=None
),
dim=-1,
).view(-1, m)
for b in range(batch)
],
dim=0,
)
# Get the non-normalized CDF.
cdf = (Y.unsqueeze(-2) >= y_grid.unsqueeze(-3)).all(dim=-1).sum(dim=-2)
# Get the alpha level points
alpha_count = ceil(self.alpha * self.n_w)
# NOTE: Need to loop here since mvar may have different shapes.
mvar = []
for b in range(batch):
alpha_level_points = y_grid[b][cdf[b] == alpha_count]
# If there are no exact alpha level points, get the smallest alpha' > alpha
# and find the corresponding alpha level indices.
if alpha_level_points.numel() == 0:
min_greater_than_alpha = cdf[b][cdf[b] > alpha_count].min()
alpha_level_points = y_grid[b][cdf[b] == min_greater_than_alpha]
# MVaR is the non-dominated subset of alpha level points.
if self.filter_dominated:
mask = is_non_dominated(alpha_level_points)
mvar.append(alpha_level_points[mask])
else:
mvar.append(alpha_level_points)
return mvar
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
r"""Calculate the MVaR corresponding to the given samples.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
X: A `batch_shape x q x d`-dim tensor of inputs. Ignored.
Returns:
A `sample_shape x batch_shape x q x m'`-dim tensor of MVaR values,
if `self.expectation=True`.
Otherwise, this returns a `sample_shape x batch_shape x (q * k') x m'`-dim
tensor, where `k'` is the maximum `k` across all batches that is returned
by `get_mvar_set_...`. Each `(q * k') x m'` corresponds to the `k` MVaR
values for each `q` batch of `n_w` inputs, padded up to `k'` by repeating
the last element. If `self.pad_to_n_w`, we set `k' = self.n_w`, producing
a deterministic return shape.
"""
batch_shape, m = samples.shape[:-2], samples.shape[-1]
prepared_samples = self._prepare_samples(samples)
# This is -1 x n_w x m.
prepared_samples = prepared_samples.reshape(-1, *prepared_samples.shape[-2:])
# Get the mvar set using the appropriate method based on device, m & n_w.
# NOTE: The `n_w <= 64` part is based on testing on a 24 core CPU.
# `get_mvar_set_gpu` heavily relies on parallelized batch computations and
# may scale worse on CPUs with fewer cores.
# Using `no_grad` here since `MVaR` is not differentiable.
with torch.no_grad():
if (
samples.device == torch.device("cpu")
and m == 2
and prepared_samples.shape[-2] <= 64
):
mvar_set = self.get_mvar_set_cpu(prepared_samples)
else:
mvar_set = self.get_mvar_set_gpu(prepared_samples)
if samples.requires_grad:
# TODO: Investigate differentiability of MVaR.
warnings.warn(
"Got `samples` that requires grad, but computing MVaR involves "
"non-differentable operations and the results will not be "
"differentiable. This may lead to errors down the line!",
RuntimeWarning,
)
# Set the `pad_size` to either `self.n_w` or the size of the largest MVaR set.
pad_size = self.n_w if self.pad_to_n_w else max([_.shape[0] for _ in mvar_set])
padded_mvar_list = []
for mvar_ in mvar_set:
if self.expectation:
padded_mvar_list.append(mvar_.mean(dim=0))
else:
# Repeat the last entry to make `mvar_set` `pad_size x m`.
repeats_needed = pad_size - mvar_.shape[0]
padded_mvar_list.append(
torch.cat([mvar_, mvar_[-1].expand(repeats_needed, m)], dim=0)
)
mvars = torch.stack(padded_mvar_list, dim=0)
return mvars.view(*batch_shape, -1, m)
class MARS(VaR, MultiOutputRiskMeasureMCObjective):
r"""MVaR Approximation based on Random Scalarizations as introduced
in [Daulton2022MARS]_.
This approximates MVaR via VaR of Chebyshev scalarizations, where each
scalarization corresponds to a point in the MVaR set. As implemented,
this uses one set of scalarization weights to approximate a single MVaR value.
Note that due to the normalization within the Chebyshev scalarization,
the output of this risk measure may not be on the same scale as its inputs.
"""
_is_mo: bool = False
def __init__(
self,
alpha: float,
n_w: int,
chebyshev_weights: Union[Tensor, List[float]],
baseline_Y: Optional[Tensor] = None,
ref_point: Optional[Union[Tensor, List[float]]] = None,
preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None,
) -> None:
r"""Transform the posterior samples to samples of a risk measure.
Args:
alpha: The risk level, float in `(0.0, 1.0]`.
n_w: The size of the perturbation set to calculate the risk measure over.
chebyshev_weights: The weights to use in the Chebyshev scalarization.
The Chebyshev scalarization is applied before computing VaR.
The weights must be non-negative. See `preprocessing_function` to
support minimization objectives.
baseline_Y: An `n' x d`-dim tensor of baseline outcomes to use in
determining the normalization bounds for Chebyshev scalarization.
It is recommended to set this via `set_baseline_Y` helper.
ref_point: An optional MVaR reference point to use in determining
the normalization bounds for Chebyshev scalarization.
preprocessing_function: A preprocessing function to apply to the
samples before computing the risk measure. This can be used to
remove non-objective outcomes or to align all outcomes for
maximization. For constrained optimization, this should also
apply feasibility-weighting to samples.
"""
if preprocessing_function is None:
preprocessing_function = IdentityMCMultiOutputObjective()
super().__init__(
alpha=alpha,
n_w=n_w,
preprocessing_function=preprocessing_function,
)
self.chebyshev_weights = torch.as_tensor(chebyshev_weights)
self.baseline_Y = baseline_Y
self.register_buffer(
"ref_point", torch.as_tensor(ref_point) if ref_point is not None else None
)
self.mvar = MVaR(n_w=self.n_w, alpha=self.alpha)
self._chebyshev_objective = None
def set_baseline_Y(
self,
model: Optional[Model],
X_baseline: Optional[Tensor],
Y_samples: Optional[Tensor] = None,
) -> None:
r"""Set the `baseline_Y` based on the MVaR predictions of the `model`
for `X_baseline`.
Args:
model: The model being used for MARS optimization. Must have a compatible
`InputPerturbation` transform attached. Ignored if `Y_samples` is given.
X_baseline: An `n x d`-dim tensor of previously evaluated points.
Ignored if `Y_samples` is given.
Y_samples: An optional `(n * n_w) x d`-dim tensor of predictions. If given,
instead of sampling from the model, these are used.
"""
if Y_samples is None:
with torch.no_grad():
Y = model.posterior(X_baseline.unsqueeze(-2)).mean.squeeze(-2)
else:
if model is not None or X_baseline is not None:
warnings.warn(
"`model` and `X_baseline` are ignored when `Y_samples` is "
"provided to `MARS.set_baseline_Y`.",
BotorchWarning,
)
Y = Y_samples
Y = self.preprocessing_function(Y)
Y = self.mvar(Y).view(-1, Y.shape[-1])
Y = Y[is_non_dominated(Y)]
self.baseline_Y = Y
@property
def chebyshev_weights(self) -> Tensor:
r"""The weights used in Chebyshev scalarization."""
return self._chebyshev_weights
@chebyshev_weights.setter
def chebyshev_weights(self, chebyshev_weights: Union[Tensor, List[float]]) -> None:
r"""Update the Chebyshev weights.
Invalidates the cached Chebyshev objective.
Args:
chebyshev_weights: The weights to use in the Chebyshev scalarization.
The Chebyshev scalarization is applied before computing VaR.
The weights must be non-negative. See `preprocessing_function` to
support minimization objectives.
"""
self._chebyshev_objective = None
chebyshev_weights = torch.as_tensor(chebyshev_weights)
if torch.any(chebyshev_weights < 0):
raise UnsupportedError("Negative weights are not supported in MARS.")
if chebyshev_weights.dim() != 1:
raise UnsupportedError("Batched weights are not supported in MARS.")
self.register_buffer("_chebyshev_weights", chebyshev_weights)
@property
def baseline_Y(self) -> Optional[Tensor]:
r"""Baseline outcomes used indetermining the normalization bounds."""
return self._baseline_Y
@baseline_Y.setter
def baseline_Y(self, baseline_Y: Optional[Tensor]) -> None:
r"""Update the baseline outcomes.
Invalidates the cached Chebyshev objective.
Args:
baseline_Y: An `n' x d`-dim tensor of baseline outcomes to use in
determining the normalization bounds for Chebyshev scalarization.
It is recommended to set this via `set_baseline_Y` helper.
"""
self._chebyshev_objective = None
self.register_buffer("_baseline_Y", baseline_Y)
@property
def chebyshev_objective(self) -> Callable[[Tensor, Optional[Tensor]], Tensor]:
r"""The objective for applying the Chebyshev scalarization."""
if self._chebyshev_objective is None:
self._construct_chebyshev_objective()
return self._chebyshev_objective
def _construct_chebyshev_objective(self) -> None:
r"""Construct a Chebyshev scalarization. Outcomes are first normalized to [0,1],
then the Chebyshev scalarization is applied.
NOTE: This is a modified version of the `get_chebyshev_scalarization` helper.
It doesn't support negative weights. All objectives should be aligned for
maximization using `preprocessing_function`.
"""
if self.baseline_Y is None:
raise RuntimeError(
"baseline_Y must be set before constructing the Chebyshev objective."
)
ref_point = self.ref_point
if ref_point is not None:
ref_point = ref_point.to(self.baseline_Y)
Y_bounds = self._get_Y_normalization_bounds(
Y=self.baseline_Y, ref_point=ref_point
)
if ref_point is not None:
ref_point = normalize(ref_point.unsqueeze(0), bounds=Y_bounds).squeeze(0)
def chebyshev_obj(Y: Tensor, X: Optional[Tensor] = None) -> Tensor:
Y = self.preprocessing_function(Y)
Y = normalize(Y, bounds=Y_bounds)
if ref_point is not None:
Y = Y - ref_point
product = torch.einsum("...m,m->...m", Y, self.chebyshev_weights.to(Y))
return product.min(dim=-1).values
self._chebyshev_objective = chebyshev_obj
def _prepare_samples(self, samples: Tensor) -> Tensor:
r"""Prepare samples for VaR computation by applying the Chebyshev scalarization
and separating out the q-batch dimension.
Args:
samples: A `sample_shape x batch_shape x (q * n_w) x m`-dim tensor of
posterior samples. The q-batches should be ordered so that each
`n_w` block of samples correspond to the same input.
Returns:
A `sample_shape x batch_shape x q x n_w`-dim tensor of prepared samples.
"""
samples = self.chebyshev_objective(samples)
return samples.view(*samples.shape[:-1], -1, self.n_w)
@staticmethod
def _get_Y_normalization_bounds(
Y: Tensor,
ref_point: Optional[Tensor] = None,
) -> Tensor:
r"""Get normalization bounds for scalarizations.
Args:
Y: A `n x m`-dim tensor of outcomes.
ref_point: The reference point.
Returns:
A `2 x m`-dim tensor containing the normalization bounds.
"""
if ref_point is not None:
ref_point = ref_point.to(Y)
if Y.ndim != 2:
raise UnsupportedError("Batched Y is not supported.")
if Y.shape[-2] == 0:
# If there are no observations, return standard bounds.
Y_bounds = torch.zeros(2, Y.shape[-1], dtype=Y.dtype, device=Y.device)
Y_bounds[1] = 1.0
return Y_bounds
pareto_Y = Y[is_non_dominated(Y)]
if pareto_Y.shape[-2] == 1:
if ref_point is not None and (pareto_Y > ref_point).all():
Y_bounds = torch.cat([ref_point.unsqueeze(0), pareto_Y], dim=0)
else:
# If there is only one observation, set the bounds to be [Y_m, Y_m + 1]
# for each objective m. This ensures we do not divide by zero.
Y_bounds = torch.cat([pareto_Y, pareto_Y + 1], dim=0)
else:
if ref_point is None:
better_than_ref = torch.ones(
pareto_Y.shape[0], device=pareto_Y.device, dtype=torch.long
)
else:
better_than_ref = (pareto_Y > ref_point).all(dim=-1)
if ref_point is not None and better_than_ref.any():
nadir = ref_point
pareto_Y = pareto_Y[better_than_ref]
else:
nadir = pareto_Y.min(dim=-2).values
ideal = pareto_Y.max(dim=-2).values
Y_bounds = torch.stack([nadir, ideal])
# If any of the lower bounds is equal to the upper bound, increase the
# upper bound to prevent division by zero.
Y_range = Y_bounds.max(dim=0).values - Y_bounds.min(dim=0).values
mask = Y_range <= 0
Y_bounds[1, mask] = Y_bounds[1, mask] + 1.0
return Y_bounds
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Acquisition functions for max-value entropy search for multi-objective
Bayesian optimization (MESMO).
References
.. [Belakaria2019]
S. Belakaria, A. Deshwal, J. R. Doppa. Max-value Entropy Search
for Multi-Objective Bayesian Optimization. Advances in Neural
Information Processing Systems, 32. 2019.
"""
from __future__ import annotations
from math import pi
from typing import Any, Callable, Optional, Tuple, Union
import torch
from botorch.acquisition.max_value_entropy_search import qMaxValueEntropy
from botorch.acquisition.multi_objective.joint_entropy_search import (
LowerBoundMultiObjectiveEntropySearch,
)
from botorch.acquisition.multi_objective.monte_carlo import (
MultiObjectiveMCAcquisitionFunction,
)
from botorch.models.converter import (
batched_multi_output_to_single_output,
model_list_to_batched,
)
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
class qMultiObjectiveMaxValueEntropy(
qMaxValueEntropy, MultiObjectiveMCAcquisitionFunction
):
r"""The acquisition function for MESMO.
This acquisition function computes the mutual information of
Pareto frontier and a candidate point. See [Belakaria2019]_ for
a detailed discussion.
q > 1 is supported through cyclic optimization and fantasies.
Noisy observations are support by computing information gain with
observation noise as in Appendix C in [Takeno2020mfmves]_.
Note: this only supports maximization.
Attributes:
_default_sample_shape: The `sample_shape` for the default sampler.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> MESMO = qMultiObjectiveMaxValueEntropy(model, sample_pfs)
>>> mesmo = MESMO(test_X)
"""
_default_sample_shape = torch.Size([128])
def __init__(
self,
model: Model,
sample_pareto_frontiers: Callable[[Model], Tensor],
num_fantasies: int = 16,
X_pending: Optional[Tensor] = None,
sampler: Optional[MCSampler] = None,
**kwargs: Any,
) -> None:
r"""Multi-objective max-value entropy search acquisition function.
Args:
model: A fitted multi-output model.
sample_pareto_frontiers: A callable that takes a model and returns a
`num_samples x n' x m`-dim tensor of outcomes to use for constructing
`num_samples` sampled Pareto frontiers.
num_fantasies: Number of fantasies to generate. The higher this
number the more accurate the model (at the expense of model
complexity, wall time and memory). Ignored if `X_pending` is `None`.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
"""
MultiObjectiveMCAcquisitionFunction.__init__(self, model=model, sampler=sampler)
# Batch GP models (e.g. fantasized models) are not currently supported
if isinstance(model, ModelListGP):
train_X = model.models[0].train_inputs[0]
else:
train_X = model.train_inputs[0]
if train_X.ndim > 3:
raise NotImplementedError(
"Batch GP models (e.g. fantasized models) "
"are not yet supported by qMultiObjectiveMaxValueEntropy"
)
# convert to batched MO model
batched_mo_model = (
model_list_to_batched(model) if isinstance(model, ModelListGP) else model
)
self._init_model = batched_mo_model
self.mo_model = batched_mo_model
self.model = batched_multi_output_to_single_output(
batch_mo_model=batched_mo_model
)
self.fantasies_sampler = SobolQMCNormalSampler(
sample_shape=torch.Size([num_fantasies])
)
self.num_fantasies = num_fantasies
# weight is used in _compute_information_gain
self.maximize = True
self.weight = 1.0
self.sample_pareto_frontiers = sample_pareto_frontiers
# this avoids unnecessary model conversion if X_pending is None
if X_pending is None:
self._sample_max_values()
else:
self.set_X_pending(X_pending)
# This avoids attribute errors in qMaxValueEntropy code.
self.posterior_transform = None
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
r"""Set pending points.
Informs the acquisition function about pending design points,
fantasizes the model on the pending points and draws max-value samples
from the fantasized model posterior.
Args:
X_pending: `m x d` Tensor with `m` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
MultiObjectiveMCAcquisitionFunction.set_X_pending(self, X_pending=X_pending)
if X_pending is not None:
# fantasize the model
fantasy_model = self._init_model.fantasize(
X=X_pending, sampler=self.fantasies_sampler, observation_noise=True
)
self.mo_model = fantasy_model
# convert model to batched single outcome model.
self.model = batched_multi_output_to_single_output(
batch_mo_model=self.mo_model
)
self._sample_max_values()
else:
# This is mainly for setting the model to the original model
# after the sequential optimization at q > 1
self.mo_model = self._init_model
self.model = batched_multi_output_to_single_output(
batch_mo_model=self.mo_model
)
self._sample_max_values()
def _sample_max_values(self) -> None:
r"""Sample max values for MC approximation of the expectation in MES"""
with torch.no_grad():
# num_samples x (num_fantasies) x n_pareto_points x m
sampled_pfs = self.sample_pareto_frontiers(self.mo_model)
if sampled_pfs.ndim == 3:
# add fantasy dim
sampled_pfs = sampled_pfs.unsqueeze(-3)
# take component-wise max value
self.posterior_max_values = sampled_pfs.max(dim=-2).values
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Compute max-value entropy at the design points `X`.
Args:
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
with `1` `d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of MVE values at the given design points `X`.
"""
# `m` dim tensor of information gains
# unsqueeze X to add a batch-dim for the batched model
igs = qMaxValueEntropy.forward(self, X=X.unsqueeze(-3))
# sum over objectives
return igs.sum(dim=-1)
class qLowerBoundMultiObjectiveMaxValueEntropySearch(
LowerBoundMultiObjectiveEntropySearch
):
r"""The acquisition function for the multi-objective Max-value Entropy Search,
where the batches `q > 1` are supported through the lower bound formulation.
This acquisition function computes the mutual information between the observation
at a candidate point `X` and the Pareto optimal outputs.
See [Tu2022]_ for a discussion on the estimation procedure.
NOTES:
(i) The estimated acquisition value could be negative.
(ii) The lower bound batch acquisition function might not be monotone in the
sense that adding more elements to the batch does not necessarily increase the
acquisition value. Specifically, the acquisition value can become smaller when
more inputs are added.
"""
def __init__(
self,
model: Model,
hypercell_bounds: Tensor,
X_pending: Optional[Tensor] = None,
estimation_type: str = "LB",
num_samples: int = 64,
**kwargs: Any,
) -> None:
r"""Lower bound multi-objective max-value entropy search acquisition function.
Args:
model: A fitted batch model with 'M' number of outputs.
hypercell_bounds: A `num_pareto_samples x 2 x J x M`-dim Tensor
containing the hyper-rectangle bounds for integration, where `J` is
the number of hyper-rectangles. In the unconstrained case, this gives
the partition of the dominated space. In the constrained case, this
gives the partition of the feasible dominated space union the
infeasible space.
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation, but have not yet been evaluated.
estimation_type: A string to determine which entropy estimate is
computed: "0", "LB", "LB2", or "MC".
num_samples: The number of Monte Carlo samples for the Monte Carlo
estimate.
"""
super().__init__(
model=model,
pareto_sets=None,
pareto_fronts=None,
hypercell_bounds=hypercell_bounds,
X_pending=X_pending,
estimation_type=estimation_type,
num_samples=num_samples,
)
def _compute_posterior_statistics(
self, X: Tensor
) -> dict[str, Union[GPyTorchPosterior, Tensor]]:
r"""Compute the posterior statistics.
Args:
X: A `batch_shape x q x d`-dim Tensor of inputs.
Returns:
A dictionary containing the posterior variables used to estimate the
entropy.
- "initial_entropy": A `batch_shape`-dim Tensor containing the entropy of
the Gaussian random variable `p(Y| X, D_n)`.
- "posterior_mean": A `batch_shape x num_pareto_samples x q x 1 x M`-dim
Tensor containing the posterior mean at the input `X`.
- "posterior_variance": A `batch_shape x num_pareto_samples x q x 1 x
M`-dim Tensor containing the posterior variance at the input `X`
excluding the observation noise.
- "observation_noise": A `batch_shape x num_pareto_samples x q x 1 x M`
-dim Tensor containing the observation noise at the input `X`.
- "posterior_with_noise": The posterior distribution at `X` which
includes the observation noise. This is used to compute the marginal
log-probabilities with respect to `p(y| x, D_n)` for `x` in `X`.
"""
tkwargs = {"dtype": X.dtype, "device": X.device}
CLAMP_LB = torch.finfo(tkwargs["dtype"]).eps
# Compute the initial entropy term depending on `X`.
# TODO: Below we compute posterior_plus_noise twice:
# (1) Firstly, we compute p(Y| X, D_n) when computing the initial entropy
# (2) Secondly, we compute p(y| x, D_n) for x in X in order to compute
# log(p(y|x, D_n)) for x in X in the Monte Carlo estimate..
# This could be simplified if we could evaluate log(p(y|x, D_n)) using the
# the posterior p(Y| X, D_n)
posterior_plus_noise = self.initial_model.posterior(X, observation_noise=True)
# Additional constant term.
add_term = (
0.5
* self.model.num_outputs
* (1 + torch.log(2 * pi * torch.ones(1, **tkwargs)))
)
# The variance initially has shape `batch_shape x (q*M) x (q*M)`
# prior_entropy has shape `batch_shape x num_fantasies`
initial_entropy = add_term + 0.5 * torch.logdet(
posterior_plus_noise.mvn.covariance_matrix
)
posterior_statistics = {"initial_entropy": initial_entropy}
# Compute the posterior entropy term.
posterior_plus_noise = self.model.posterior(
X.unsqueeze(-2), observation_noise=True
)
# `batch_shape x q x 1 x M`
mean = posterior_plus_noise.mean
var_plus_noise = posterior_plus_noise.variance.clamp_min(CLAMP_LB)
# Expand shapes to `batch_shape x num_pareto_samples x q x 1 x M`
new_shape = (
mean.shape[:-3] + torch.Size([self.num_pareto_samples]) + mean.shape[-3:]
)
mean = mean.unsqueeze(-4).expand(new_shape)
var_plus_noise = var_plus_noise.unsqueeze(-4).expand(new_shape)
# TODO: This computes the observation noise via a second evaluation of the
# posterior. This step could be done better.
posterior = self.model.posterior(X.unsqueeze(-2), observation_noise=False)
var = posterior.variance.clamp_min(CLAMP_LB)
var = var.unsqueeze(-4).expand(new_shape)
obs_noise = var_plus_noise - var
posterior_statistics["posterior_mean"] = mean
posterior_statistics["posterior_variance"] = var
posterior_statistics["observation_noise"] = obs_noise
posterior_statistics["posterior_with_noise"] = posterior_plus_noise
return posterior_statistics
def _compute_monte_carlo_variables(
self, posterior: GPyTorchPosterior
) -> Tuple[Tensor, Tensor]:
r"""Compute the samples and log-probability associated with a posterior
distribution.
Args:
posterior: The posterior distribution, which includes the observation
noise.
Returns:
A two-element tuple containing
- samples: A `num_mc_samples x batch_shape x num_pareto_samples x q x 1
x M`-dim Tensor containing the Monte Carlo samples.
- samples_log_prob: A `num_mc_samples x batch_shape x num_pareto_samples
x q`-dim Tensor containing the log-probabilities of the Monte Carlo
samples.
"""
# `num_mc_samples x batch_shape x q x 1 x M`
samples = self.get_posterior_samples(posterior)
# `num_mc_samples x batch_shape x q`
if self.model.num_outputs == 1:
samples_log_prob = posterior.mvn.log_prob(samples.squeeze(-1))
else:
samples_log_prob = posterior.mvn.log_prob(samples)
# Expand shape to `num_mc_samples x batch_shape x num_pareto_samples x
# q x 1 x M`
new_shape = (
samples.shape[:-3]
+ torch.Size([self.num_pareto_samples])
+ samples.shape[-3:]
)
samples = samples.unsqueeze(-4).expand(new_shape)
# Expand shape to `num_mc_samples x batch_shape x num_pareto_samples x q`
new_shape = (
samples_log_prob.shape[:-1]
+ torch.Size([self.num_pareto_samples])
+ samples_log_prob.shape[-1:]
)
samples_log_prob = samples_log_prob.unsqueeze(-2).expand(new_shape)
return samples, samples_log_prob
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluates qLowerBoundMultiObjectiveMaxValueEntropySearch at the design
points `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of `batch_shape` t-batches with `q`
`d`-dim design points each.
Returns:
A `batch_shape`-dim Tensor of acquisition values at the given design
points `X`.
"""
return self._compute_lower_bound_information_gain(X)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Analytic Acquisition Functions for Multi-objective Bayesian optimization.
References
.. [Yang2019]
Yang, K., Emmerich, M., Deutz, A. et al. Efficient computation of expected
hypervolume improvement using box decomposition algorithms. J Glob Optim 75,
3–34 (2019)
"""
from __future__ import annotations
from abc import abstractmethod
from itertools import product
from typing import List, Optional
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import PosteriorTransform
from botorch.exceptions.errors import UnsupportedError
from botorch.models.model import Model
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
NondominatedPartitioning,
)
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
from torch.distributions import Normal
class MultiObjectiveAnalyticAcquisitionFunction(AcquisitionFunction):
r"""Abstract base class for Multi-Objective batch acquisition functions."""
def __init__(
self,
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
) -> None:
r"""Constructor for the MultiObjectiveAnalyticAcquisitionFunction base class.
Args:
model: A fitted model.
posterior_transform: A PosteriorTransform (optional).
"""
super().__init__(model=model)
if posterior_transform is None or isinstance(
posterior_transform, PosteriorTransform
):
self.posterior_transform = posterior_transform
else:
raise UnsupportedError(
"Only a posterior_transform of type PosteriorTransform is "
"supported for Multi-Objective analytic acquisition functions."
)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `batch_shape x 1 x d` X Tensor of t-batches with `1` `d`-dim
design point each, and returns a Tensor with shape `batch_shape'`, where
`batch_shape'` is the broadcasted batch shape of model and input `X`.
"""
pass # pragma: no cover
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
raise UnsupportedError(
"Analytic acquisition functions do not account for X_pending yet."
)
class ExpectedHypervolumeImprovement(MultiObjectiveAnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
ref_point: List[float],
partitioning: NondominatedPartitioning,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs,
) -> None:
r"""Expected Hypervolume Improvement supporting m>=2 outcomes.
This implements the computes EHVI using the algorithm from [Yang2019]_, but
additionally computes gradients via auto-differentiation as proposed by
[Daulton2020qehvi]_.
Note: this is currently inefficient in two ways due to the binary partitioning
algorithm that we use for the box decomposition:
- We have more boxes in our decomposition
- If we used a box decomposition that used `inf` as the upper bound for
the last dimension *in all hypercells*, then we could reduce the number
of terms we need to compute from 2^m to 2^(m-1). [Yang2019]_ do this
by using DKLV17 and LKF17 for the box decomposition.
TODO: Use DKLV17 and LKF17 for the box decomposition as in [Yang2019]_ for
greater efficiency.
TODO: Add support for outcome constraints.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> ref_point = [0.0, 0.0]
>>> EHVI = ExpectedHypervolumeImprovement(model, ref_point, partitioning)
>>> ehvi = EHVI(test_X)
Args:
model: A fitted model.
ref_point: A list with `m` elements representing the reference point (in the
outcome space) w.r.t. to which compute the hypervolume. This is a
reference point for the objective values (i.e. after applying
`objective` to the samples).
partitioning: A `NondominatedPartitioning` module that provides the non-
dominated front and a partitioning of the non-dominated space in hyper-
rectangles.
posterior_transform: A `PosteriorTransform`.
"""
# TODO: we could refactor this __init__ logic into a
# HypervolumeAcquisitionFunction Mixin
if len(ref_point) != partitioning.num_outcomes:
raise ValueError(
"The length of the reference point must match the number of outcomes. "
f"Got ref_point with {len(ref_point)} elements, but expected "
f"{partitioning.num_outcomes}."
)
ref_point = torch.tensor(
ref_point,
dtype=partitioning.pareto_Y.dtype,
device=partitioning.pareto_Y.device,
)
better_than_ref = (partitioning.pareto_Y > ref_point).all(dim=1)
if not better_than_ref.any() and partitioning.pareto_Y.shape[0] > 0:
raise ValueError(
"At least one pareto point must be better than the reference point."
)
super().__init__(model=model, posterior_transform=posterior_transform, **kwargs)
self.register_buffer("ref_point", ref_point)
self.partitioning = partitioning
cell_bounds = self.partitioning.get_hypercell_bounds()
self.register_buffer("cell_lower_bounds", cell_bounds[0])
self.register_buffer("cell_upper_bounds", cell_bounds[1])
# create indexing tensor of shape `2^m x m`
self._cross_product_indices = torch.tensor(
list(product(*[[0, 1] for _ in range(ref_point.shape[0])])),
dtype=torch.long,
device=ref_point.device,
)
self.normal = Normal(0, 1)
def psi(self, lower: Tensor, upper: Tensor, mu: Tensor, sigma: Tensor) -> Tensor:
r"""Compute Psi function.
For each cell i and outcome k:
Psi(lower_{i,k}, upper_{i,k}, mu_k, sigma_k) = (
sigma_k * PDF((upper_{i,k} - mu_k) / sigma_k) + (
mu_k - lower_{i,k}
) * (1 - CDF(upper_{i,k} - mu_k) / sigma_k)
)
See Equation 19 in [Yang2019]_ for more details.
Args:
lower: A `num_cells x m`-dim tensor of lower cell bounds
upper: A `num_cells x m`-dim tensor of upper cell bounds
mu: A `batch_shape x 1 x m`-dim tensor of means
sigma: A `batch_shape x 1 x m`-dim tensor of standard deviations (clamped).
Returns:
A `batch_shape x num_cells x m`-dim tensor of values.
"""
u = (upper - mu) / sigma
return sigma * self.normal.log_prob(u).exp() + (mu - lower) * (
1 - self.normal.cdf(u)
)
def nu(self, lower: Tensor, upper: Tensor, mu: Tensor, sigma: Tensor) -> Tensor:
r"""Compute Nu function.
For each cell i and outcome k:
nu(lower_{i,k}, upper_{i,k}, mu_k, sigma_k) = (
upper_{i,k} - lower_{i,k}
) * (1 - CDF((upper_{i,k} - mu_k) / sigma_k))
See Equation 25 in [Yang2019]_ for more details.
Args:
lower: A `num_cells x m`-dim tensor of lower cell bounds
upper: A `num_cells x m`-dim tensor of upper cell bounds
mu: A `batch_shape x 1 x m`-dim tensor of means
sigma: A `batch_shape x 1 x m`-dim tensor of standard deviations (clamped).
Returns:
A `batch_shape x num_cells x m`-dim tensor of values.
"""
return (upper - lower) * (1 - self.normal.cdf((upper - mu) / sigma))
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(
X, posterior_transform=self.posterior_transform
)
mu = posterior.mean
sigma = posterior.variance.clamp_min(1e-9).sqrt()
# clamp here, since upper_bounds will contain `inf`s, which
# are not differentiable
cell_upper_bounds = self.cell_upper_bounds.clamp_max(
1e10 if X.dtype == torch.double else 1e8
)
# Compute psi(lower_i, upper_i, mu_i, sigma_i) for i=0, ... m-2
psi_lu = self.psi(
lower=self.cell_lower_bounds, upper=cell_upper_bounds, mu=mu, sigma=sigma
)
# Compute psi(lower_m, lower_m, mu_m, sigma_m)
psi_ll = self.psi(
lower=self.cell_lower_bounds,
upper=self.cell_lower_bounds,
mu=mu,
sigma=sigma,
)
# Compute nu(lower_m, upper_m, mu_m, sigma_m)
nu = self.nu(
lower=self.cell_lower_bounds, upper=cell_upper_bounds, mu=mu, sigma=sigma
)
# compute the difference psi_ll - psi_lu
psi_diff = psi_ll - psi_lu
# this is batch_shape x num_cells x 2 x (m-1)
stacked_factors = torch.stack([psi_diff, nu], dim=-2)
# Take the cross product of psi_diff and nu across all outcomes
# e.g. for m = 2
# for each batch and cell, compute
# [psi_diff_0, psi_diff_1]
# [nu_0, psi_diff_1]
# [psi_diff_0, nu_1]
# [nu_0, nu_1]
# this tensor has shape: `batch_shape x num_cells x 2^m x m`
all_factors_up_to_last = stacked_factors.gather(
dim=-2,
index=self._cross_product_indices.expand(
stacked_factors.shape[:-2] + self._cross_product_indices.shape
),
)
# compute product for all 2^m terms,
# sum across all terms and hypercells
return all_factors_up_to_last.prod(dim=-1).sum(dim=-1).sum(dim=-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-Fidelity Acquisition Functions for Multi-objective Bayesian optimization.
References
.. [Irshad2021MOMF]
F. Irshad, S. Karsch, and A. Döpp. Expected hypervolume improvement for
simultaneous multi-objective and multi-fidelity optimization.
arXiv preprint arXiv:2112.13901, 2021.
"""
from __future__ import annotations
from typing import Any, Callable, List, Optional, Union
import torch
from botorch.acquisition.cost_aware import InverseCostWeightedUtility
from botorch.acquisition.multi_objective.monte_carlo import (
qExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.objective import MCMultiOutputObjective
from botorch.models.cost import AffineFidelityCostModel
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
NondominatedPartitioning,
)
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
from torch import Tensor
class MOMF(qExpectedHypervolumeImprovement):
def __init__(
self,
model: Model,
ref_point: Union[List[float], Tensor],
partitioning: NondominatedPartitioning,
sampler: Optional[MCSampler] = None,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
eta: Optional[Union[Tensor, float]] = 1e-3,
X_pending: Optional[Tensor] = None,
cost_call: Callable[Tensor, Tensor] = None,
**kwargs: Any,
) -> None:
r"""MOMF acquisition function supporting m>=2 outcomes.
The model needs to have train_obj that has a fidelity
objective appended to its end.
In the following example we consider a 2-D output space
but the ref_point is 3D because of fidelity objective.
See [Irshad2021MOMF]_ for details.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> ref_point = [0.0, 0.0, 0.0]
>>> cost_func = lambda X: 5 + X[..., -1]
>>> momf = MOMF(model, ref_point, partitioning, cost_func)
>>> momf_val = momf(test_X)
Args:
model: A fitted model. There are two default assumptions in the training
data. `train_X` should have fidelity parameter `s` as the last dimension
of the input and `train_Y` contains a trust objective as its last
dimension.
ref_point: A list or tensor with `m+1` elements representing the reference
point (in the outcome space) w.r.t. to which compute the hypervolume.
The '+1' takes care of the trust objective appended to `train_Y`.
This is a reference point for the objective values (i.e. after
applying`objective` to the samples).
partitioning: A `NondominatedPartitioning` module that provides the non-
dominated front and a partitioning of the non-dominated space in hyper-
rectangles. If constraints are present, this partitioning must only
include feasible points.
sampler: The sampler used to draw base samples. If not given,
a sampler is generated using `get_sampler`.
objective: The MCMultiOutputObjective under which the samples are evaluated.
Defaults to `IdentityMultiOutputObjective()`.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility. The acquisition function will compute expected feasible
hypervolume.
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation but have not yet
been evaluated. Concatenated into `X` upon forward call. Copied and set
to have no gradient.
cost_call: A callable cost function mapping a Tensor of dimension
`batch_shape x q x d` to a cost Tensor of dimension
`batch_shape x q x m`. Defaults to an AffineCostModel with
`C(s) = 1 + s`.
eta: The temperature parameter for the sigmoid function used for the
differentiable approximation of the constraints. In case of a float the
same eta is used for every constraint in constraints. In case of a
tensor the length of the tensor must match the number of provided
constraints. The i-th constraint is then estimated with the i-th
eta value.
"""
if len(ref_point) != partitioning.num_outcomes:
raise ValueError(
"The length of the reference point must match the number of outcomes. "
f"Got ref_point with {len(ref_point)} elements, but expected "
f"{partitioning.num_outcomes}."
)
ref_point = torch.as_tensor(
ref_point,
dtype=partitioning.pareto_Y.dtype,
device=partitioning.pareto_Y.device,
)
super().__init__(
model=model,
ref_point=ref_point,
partitioning=partitioning,
sampler=sampler,
objective=objective,
constraints=constraints,
eta=eta,
X_pending=X_pending,
)
if cost_call is None:
cost_model = AffineFidelityCostModel(
fidelity_weights={-1: 1.0}, fixed_cost=1.0
)
else:
cost_model = GenericDeterministicModel(cost_call)
cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)
self.cost_aware_utility = cost_aware_utility
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.get_posterior_samples(posterior)
hv_gain = self._compute_qehvi(samples=samples, X=X)
cost_weighted_qehvi = self.cost_aware_utility(X=X, deltas=hv_gain)
return cost_weighted_qehvi
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Samplers to enable use cases that are not base sample driven, such as
stochastic optimization of acquisition functions.
"""
from __future__ import annotations
import torch
from botorch.posteriors import Posterior
from botorch.sampling.base import MCSampler
from torch import Tensor
class ForkedRNGSampler(MCSampler):
r"""A sampler using `torch.fork_rng` to enable replicable sampling
from a posterior that does not support base samples.
NOTE: This approach is not a one-to-one replacement for base sample
driven sampling. The main missing piece in this approach is that its
outputs are not replicable across the batch dimensions. As a result,
when an acquisition function is batch evaluated with repeated candidates,
each candidate will produce a different acquisition value, which is not
compatible with Sample Average Approximation.
"""
def forward(self, posterior: Posterior) -> Tensor:
r"""Draws MC samples from the posterior in a `fork_rng` context.
Args:
posterior: The posterior to sample from.
Returns:
The samples drawn from the posterior.
"""
with torch.random.fork_rng():
torch.manual_seed(self.seed)
return posterior.rsample(sample_shape=self.sample_shape)
class StochasticSampler(MCSampler):
r"""A sampler that simply calls `posterior.rsample` to generate the
samples. This should only be used for stochastic optimization of the
acquisition functions, e.g., via `gen_candidates_torch`. This should
not be used with `optimize_acqf`, which uses deterministic optimizers
under the hood.
NOTE: This ignores the `seed` option.
"""
def forward(self, posterior: Posterior) -> Tensor:
r"""Draws MC samples from the posterior.
Args:
posterior: The posterior to sample from.
Returns:
The samples drawn from the posterior.
"""
return posterior.rsample(sample_shape=self.sample_shape)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A dummy sampler for use with deterministic models.
"""
from __future__ import annotations
from botorch.posteriors.deterministic import DeterministicPosterior
from botorch.sampling.stochastic_samplers import StochasticSampler
class DeterministicSampler(StochasticSampler):
r"""A sampler that simply calls `posterior.rsample`, intended to be used with
`DeterministicModel` & `DeterministicPosterior`.
[DEPRECATED] - Use `IndexSampler` in conjunction with `EnsemblePosterior`
instead of `DeterministicSampler` with `DeterministicPosterior`.
This is effectively signals that `StochasticSampler` is safe to use with
deterministic models since their output is deterministic by definition.
"""
def _update_base_samples(
self, posterior: DeterministicPosterior, base_sampler: DeterministicSampler
) -> None:
r"""This is a no-op since there are no base samples to update.
Args:
posterior: The posterior for which the base samples are constructed.
base_sampler: The base sampler to retrieve the base samples from.
"""
return
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Sampler modules producing N(0,1) samples, to be used with MC-evaluated
acquisition functions and Gaussian posteriors.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import torch
from botorch.exceptions import UnsupportedError
from botorch.posteriors import Posterior
from botorch.posteriors.higher_order import HigherOrderGPPosterior
from botorch.posteriors.multitask import MultitaskGPPosterior
from botorch.posteriors.transformed import TransformedPosterior
from botorch.sampling.base import MCSampler
from botorch.utils.sampling import draw_sobol_normal_samples, manual_seed
from torch import Tensor
from torch.quasirandom import SobolEngine
class NormalMCSampler(MCSampler, ABC):
r"""Base class for samplers producing (possibly QMC) N(0,1) samples.
Subclasses must implement the `_construct_base_samples` method.
"""
def forward(self, posterior: Posterior) -> Tensor:
r"""Draws MC samples from the posterior.
Args:
posterior: The posterior to sample from.
Returns:
The samples drawn from the posterior.
"""
self._construct_base_samples(posterior=posterior)
samples = posterior.rsample_from_base_samples(
sample_shape=self.sample_shape,
base_samples=self.base_samples.expand(
self._get_extended_base_sample_shape(posterior=posterior)
),
)
return samples
@abstractmethod
def _construct_base_samples(self, posterior: Posterior) -> None:
r"""Generate base samples (if necessary).
This function will generate a new set of base samples and register the
`base_samples` buffer if one of the following is true:
- the MCSampler has no `base_samples` attribute.
- the output of `_get_collapsed_shape` does not agree with the shape of
`self.base_samples`.
Args:
posterior: The Posterior for which to generate base samples.
"""
pass # pragma: no cover
def _update_base_samples(
self, posterior: Posterior, base_sampler: NormalMCSampler
) -> None:
r"""Update the sampler to use the original base samples for X_baseline.
This is used in CachedCholeskyAcquisitionFunctions to ensure consistency.
Args:
posterior: The posterior for which the base samples are constructed.
base_sampler: The base sampler to retrieve the base samples from.
"""
self._instance_check(base_sampler=base_sampler)
self._construct_base_samples(posterior=posterior)
if base_sampler.base_samples is not None:
current_base_samples = base_sampler.base_samples.detach().clone()
# This is the # of non-`sample_shape` dimensions.
base_ndims = current_base_samples.dim() - 1
# Unsqueeze as many dimensions as needed to match target_shape.
target_shape = self._get_collapsed_shape(posterior=posterior)
view_shape = (
self.sample_shape
+ torch.Size([1] * (len(target_shape) - current_base_samples.dim()))
+ current_base_samples.shape[-base_ndims:]
)
expanded_shape = (
target_shape[:-base_ndims] + current_base_samples.shape[-base_ndims:]
)
# Use stored base samples:
# Use all base_samples from the current sampler
# this includes the base_samples from the base_sampler
# and any base_samples for the new points in the sampler.
# For example, when using sequential greedy candidate generation
# then generate the new candidate point using last (-1) base_sample
# in sampler. This copies that base sample.
expanded_samples = current_base_samples.view(view_shape).expand(
expanded_shape
)
if isinstance(posterior, (HigherOrderGPPosterior, MultitaskGPPosterior)):
n_train_samples = current_base_samples.shape[-1] // 2
# The train base samples.
self.base_samples[..., :n_train_samples] = expanded_samples[
..., :n_train_samples
]
# The train noise base samples.
self.base_samples[..., -n_train_samples:] = expanded_samples[
..., -n_train_samples:
]
else:
batch_shape = (
posterior._posterior.batch_shape
if isinstance(posterior, TransformedPosterior)
else posterior.batch_shape
)
single_output = (
len(posterior.base_sample_shape) - len(batch_shape)
) == 1
if single_output:
self.base_samples[
..., : current_base_samples.shape[-1]
] = expanded_samples
else:
self.base_samples[
..., : current_base_samples.shape[-2], :
] = expanded_samples
class IIDNormalSampler(NormalMCSampler):
r"""Sampler for MC base samples using iid N(0,1) samples.
Example:
>>> sampler = IIDNormalSampler(1000, seed=1234)
>>> posterior = model.posterior(test_X)
>>> samples = sampler(posterior)
"""
def _construct_base_samples(self, posterior: Posterior) -> None:
r"""Generate iid `N(0,1)` base samples (if necessary).
This function will generate a new set of base samples and set the
`base_samples` buffer if one of the following is true:
- the MCSampler has no `base_samples` attribute.
- the output of `_get_collapsed_shape` does not agree with the shape of
`self.base_samples`.
Args:
posterior: The Posterior for which to generate base samples.
"""
target_shape = self._get_collapsed_shape(posterior=posterior)
if self.base_samples is None or self.base_samples.shape != target_shape:
with manual_seed(seed=self.seed):
base_samples = torch.randn(
target_shape, device=posterior.device, dtype=posterior.dtype
)
self.register_buffer("base_samples", base_samples)
if self.base_samples.device != posterior.device:
self.to(device=posterior.device) # pragma: nocover
if self.base_samples.dtype != posterior.dtype:
self.to(dtype=posterior.dtype)
class SobolQMCNormalSampler(NormalMCSampler):
r"""Sampler for quasi-MC N(0,1) base samples using Sobol sequences.
Example:
>>> sampler = SobolQMCNormalSampler(torch.Size([1024]), seed=1234)
>>> posterior = model.posterior(test_X)
>>> samples = sampler(posterior)
"""
def _construct_base_samples(self, posterior: Posterior) -> None:
r"""Generate quasi-random Normal base samples (if necessary).
This function will generate a new set of base samples and set the
`base_samples` buffer if one of the following is true:
- the MCSampler has no `base_samples` attribute.
- the output of `_get_collapsed_shape` does not agree with the shape of
`self.base_samples`.
Args:
posterior: The Posterior for which to generate base samples.
"""
target_shape = self._get_collapsed_shape(posterior=posterior)
if self.base_samples is None or self.base_samples.shape != target_shape:
base_collapsed_shape = target_shape[len(self.sample_shape) :]
output_dim = base_collapsed_shape.numel()
if output_dim > SobolEngine.MAXDIM:
raise UnsupportedError(
"SobolQMCSampler only supports dimensions "
f"`q * o <= {SobolEngine.MAXDIM}`. Requested: {output_dim}"
)
base_samples = draw_sobol_normal_samples(
d=output_dim,
n=self.sample_shape.numel(),
device=posterior.device,
dtype=posterior.dtype,
seed=self.seed,
)
base_samples = base_samples.view(target_shape)
self.register_buffer("base_samples", base_samples)
self.to(device=posterior.device, dtype=posterior.dtype)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.sampling.base import MCSampler
from botorch.sampling.deterministic import DeterministicSampler
from botorch.sampling.get_sampler import get_sampler
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.sampling.pairwise_samplers import (
PairwiseIIDNormalSampler,
PairwiseMCSampler,
PairwiseSobolQMCNormalSampler,
)
from botorch.sampling.qmc import MultivariateNormalQMCEngine, NormalQMCEngine
from botorch.sampling.stochastic_samplers import ForkedRNGSampler, StochasticSampler
from torch.quasirandom import SobolEngine
__all__ = [
"DeterministicSampler",
"ForkedRNGSampler",
"get_sampler",
"IIDNormalSampler",
"ListSampler",
"MCSampler",
"MultivariateNormalQMCEngine",
"NormalQMCEngine",
"PairwiseIIDNormalSampler",
"PairwiseMCSampler",
"PairwiseSobolQMCNormalSampler",
"SobolEngine",
"SobolQMCNormalSampler",
"StochasticSampler",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import combinations
from typing import Any, Optional
import numpy as np
import torch
from botorch.posteriors.posterior import Posterior
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from torch import Tensor
class PairwiseMCSampler(MCSampler):
r"""
Abstract class for Pairwise MC Sampler.
This sampler will sample pairwise comparisons. It is to be used together
with PairwiseGP and BoTorch acquisition functions (e.g., qKnowledgeGradient)
"""
def __init__(self, max_num_comparisons: int = None, seed: int = None) -> None:
r"""
Args:
max_num_comparisons: Max number of comparisons drawn within samples.
If None, use all possible pairwise comparisons
seed: The seed for np.random.seed. If omitted, use a random seed.
May be overwritten by sibling classes or subclasses.
"""
self.max_num_comparisons = max_num_comparisons
self.seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item()
def forward(self, posterior: Posterior) -> Tensor:
r"""Draws MC samples from the posterior and make comparisons
Args:
posterior: The Posterior to sample from.
The returned samples are expected to have output dimension of 1.
Returns:
Posterior sample pairwise comparisons.
"""
samples = super().forward(posterior)
np.random.seed(self.seed)
s_n = samples.shape[-2] # candidate number per batch
if s_n < 2:
raise RuntimeError("Number of samples < 2, cannot make comparisons")
# TODO: Don't instantiate a generator
all_pairs = np.array(list(combinations(range(s_n), 2)))
if self.max_num_comparisons is None:
comp_n = len(all_pairs)
else:
comp_n = min(self.max_num_comparisons, len(all_pairs))
comp_pairs = all_pairs[
np.random.choice(range(len(all_pairs)), comp_n, replace=False)
]
s_comps_size = torch.Size((*samples.shape[:-2], comp_n, 2))
s_v = samples.view(-1, s_n)
idx1, idx2 = comp_pairs[:, 0], comp_pairs[:, 1]
prefs = (s_v[:, idx1] > s_v[:, idx2]).long().cpu()
cpt = comp_pairs.T
c1 = np.choose(prefs, cpt)
c2 = np.choose(1 - prefs, cpt)
s_comps = torch.stack([c1, c2], dim=-1).reshape(s_comps_size)
return s_comps
class PairwiseIIDNormalSampler(PairwiseMCSampler, IIDNormalSampler):
def __init__(
self,
sample_shape: torch.Size,
seed: Optional[int] = None,
max_num_comparisons: int = None,
**kwargs: Any,
) -> None:
r"""
Args:
sample_shape: The `sample_shape` of the samples to generate.
seed: The seed for the RNG. If omitted, use a random seed.
max_num_comparisons: Max number of comparisons drawn within samples.
If None, use all possible pairwise comparisons.
kwargs: Catch-all for deprecated arguments.
"""
PairwiseMCSampler.__init__(
self, max_num_comparisons=max_num_comparisons, seed=seed
)
IIDNormalSampler.__init__(self, sample_shape=sample_shape, seed=seed, **kwargs)
class PairwiseSobolQMCNormalSampler(PairwiseMCSampler, SobolQMCNormalSampler):
def __init__(
self,
sample_shape: torch.Size,
seed: Optional[int] = None,
max_num_comparisons: int = None,
**kwargs: Any,
) -> None:
r"""
Args:
sample_shape: The `sample_shape` of the samples to generate.
seed: The seed for the RNG. If omitted, use a random seed.
max_num_comparisons: Max number of comparisons drawn within samples.
If None, use all possible pairwise comparisons.
kwargs: Catch-all for deprecated arguments.
"""
PairwiseMCSampler.__init__(
self, max_num_comparisons=max_num_comparisons, seed=seed
)
SobolQMCNormalSampler.__init__(
self, sample_shape=sample_shape, seed=seed, **kwargs
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Type, Union
import torch
from botorch.logging import logger
from botorch.posteriors.deterministic import DeterministicPosterior
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.posteriors.posterior import Posterior
from botorch.posteriors.posterior_list import PosteriorList
from botorch.posteriors.torch import TorchPosterior
from botorch.posteriors.transformed import TransformedPosterior
from botorch.sampling.base import MCSampler
from botorch.sampling.deterministic import DeterministicSampler
from botorch.sampling.index_sampler import IndexSampler
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import (
IIDNormalSampler,
NormalMCSampler,
SobolQMCNormalSampler,
)
from botorch.utils.dispatcher import Dispatcher
from gpytorch.distributions import MultivariateNormal
from torch.distributions import Distribution
from torch.quasirandom import SobolEngine
def _posterior_to_distribution_encoder(
posterior: Posterior,
) -> Union[Type[Distribution], Type[Posterior]]:
r"""An encoder returning the type of the distribution for `TorchPosterior`
and the type of the posterior for the rest.
"""
if isinstance(posterior, TorchPosterior):
return type(posterior.distribution)
return type(posterior)
GetSampler = Dispatcher("get_sampler", encoder=_posterior_to_distribution_encoder)
def get_sampler(
posterior: TorchPosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the sampler for the given posterior.
The sampler can be used as `sampler(posterior)` to produce samples
suitable for use in acquisition function optimization via SAA.
Args:
posterior: A `Posterior` to get the sampler for.
sample_shape: The sample shape of the samples produced by the
given sampler. The full shape of the resulting samples is
given by `posterior._extended_shape(sample_shape)`.
kwargs: Optional kwargs, passed down to the samplers during construction.
Returns:
The `MCSampler` object for the given posterior.
"""
kwargs["sample_shape"] = sample_shape
return GetSampler(posterior, **kwargs)
@GetSampler.register(MultivariateNormal)
def _get_sampler_mvn(
posterior: GPyTorchPosterior, sample_shape: torch.Size, **kwargs: Any
) -> NormalMCSampler:
r"""The Sobol normal sampler for the `MultivariateNormal` posterior.
If the output dim is too large, falls back to `IIDNormalSampler`.
"""
sampler = SobolQMCNormalSampler(sample_shape=sample_shape, **kwargs)
collapsed_shape = sampler._get_collapsed_shape(posterior=posterior)
base_collapsed_shape = collapsed_shape[len(sample_shape) :]
if base_collapsed_shape.numel() > SobolEngine.MAXDIM:
logger.warning(
f"Output dim {base_collapsed_shape.numel()} is too large for the "
"Sobol engine. Using IIDNormalSampler instead."
)
sampler = IIDNormalSampler(sample_shape=sample_shape, **kwargs)
return sampler
@GetSampler.register(TransformedPosterior)
def _get_sampler_derived(
posterior: TransformedPosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the sampler for the underlying posterior."""
return get_sampler(
posterior=posterior._posterior, sample_shape=sample_shape, **kwargs
)
@GetSampler.register(PosteriorList)
def _get_sampler_list(
posterior: PosteriorList, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the `ListSampler` with the appropriate list of samplers."""
samplers = [
get_sampler(posterior=p, sample_shape=sample_shape, **kwargs)
for p in posterior.posteriors
]
return ListSampler(*samplers)
@GetSampler.register(DeterministicPosterior)
def _get_sampler_deterministic(
posterior: DeterministicPosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the dummy `DeterministicSampler` for the `DeterministicPosterior`."""
return DeterministicSampler(sample_shape=sample_shape, **kwargs)
@GetSampler.register(EnsemblePosterior)
def _get_sampler_ensemble(
posterior: EnsemblePosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the `IndexSampler` for the `EnsemblePosterior`."""
return IndexSampler(sample_shape=sample_shape, **kwargs)
@GetSampler.register(object)
def _not_found_error(
posterior: Posterior, sample_shape: torch.Size, **kwargs: Any
) -> None:
raise NotImplementedError(
f"A registered `MCSampler` for posterior {posterior} is not found. You can "
"implement and register one using `@GetSampler.register`."
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Sampler to be used with `EnsemblePosteriors` to enable
deterministic optimization of acquisition functions with ensemble models.
"""
from __future__ import annotations
import torch
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.sampling.base import MCSampler
from torch import Tensor
class IndexSampler(MCSampler):
r"""A sampler that calls `posterior.rsample_from_base_samples` to
generate the samples via index base samples."""
def forward(self, posterior: EnsemblePosterior) -> Tensor:
r"""Draws MC samples from the posterior.
Args:
posterior: The ensemble posterior to sample from.
Returns:
The samples drawn from the posterior.
"""
self._construct_base_samples(posterior=posterior)
samples = posterior.rsample_from_base_samples(
sample_shape=self.sample_shape, base_samples=self.base_samples
)
return samples
def _construct_base_samples(self, posterior: EnsemblePosterior) -> None:
r"""Constructs base samples as indices to sample with them from
the Posterior.
Args:
posterior: The ensemble posterior to construct the base samples
for.
"""
if self.base_samples is None or self.base_samples.shape != self.sample_shape:
with torch.random.fork_rng():
torch.manual_seed(self.seed)
base_samples = torch.multinomial(
posterior.weights,
num_samples=self.sample_shape.numel(),
replacement=True,
).reshape(self.sample_shape)
self.register_buffer("base_samples", base_samples)
if self.base_samples.device != posterior.device:
self.to(device=posterior.device) # pragma: nocover
def _update_base_samples(
self, posterior: EnsemblePosterior, base_sampler: IndexSampler
) -> None:
r"""Null operation just needed for compatibility with
`CachedCholeskyAcquisitionFunction`."""
pass
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Quasi Monte-Carlo sampling from Normal distributions.
References:
.. [Pages2018numprob]
G. Pages. Numerical Probability: An Introduction with Applications to
Finance. Universitext. Springer International Publishing, 2018.
"""
from __future__ import annotations
import math
from typing import Optional
import torch
from torch import Tensor
from torch.quasirandom import SobolEngine
class NormalQMCEngine:
r"""Engine for qMC sampling from a Multivariate Normal `N(0, I_d)`.
By default, this implementation uses Box-Muller transformed Sobol samples
following pg. 123 in [Pages2018numprob]_. To use the inverse transform
instead, set `inv_transform=True`.
Example:
>>> engine = NormalQMCEngine(3)
>>> samples = engine.draw(16)
"""
def __init__(
self, d: int, seed: Optional[int] = None, inv_transform: bool = False
) -> None:
r"""Engine for drawing qMC samples from a multivariate normal `N(0, I_d)`.
Args:
d: The dimension of the samples.
seed: The seed with which to seed the random number generator of the
underlying SobolEngine.
inv_transform: If True, use inverse transform instead of Box-Muller.
"""
self._d = d
self._seed = seed
self._inv_transform = inv_transform
if inv_transform:
sobol_dim = d
else:
# to apply Box-Muller, we need an even number of dimensions
sobol_dim = 2 * math.ceil(d / 2)
self._sobol_engine = SobolEngine(dimension=sobol_dim, scramble=True, seed=seed)
def draw(
self, n: int = 1, out: Optional[Tensor] = None, dtype: torch.dtype = torch.float
) -> Optional[Tensor]:
r"""Draw `n` qMC samples from the standard Normal.
Args:
n: The number of samples to draw. As a best practice, use powers of 2.
out: An option output tensor. If provided, draws are put into this
tensor, and the function returns None.
dtype: The desired torch data type (ignored if `out` is provided).
Returns:
A `n x d` tensor of samples if `out=None` and `None` otherwise.
"""
# get base samples
samples = self._sobol_engine.draw(n, dtype=dtype)
if self._inv_transform:
# apply inverse transform (values to close to 0/1 result in inf values)
v = 0.5 + (1 - torch.finfo(samples.dtype).eps) * (samples - 0.5)
samples_tf = torch.erfinv(2 * v - 1) * math.sqrt(2)
else:
# apply Box-Muller transform (note: [1] indexes starting from 1)
even = torch.arange(0, samples.shape[-1], 2)
Rs = (-2 * torch.log(samples[:, even])).sqrt()
thetas = 2 * math.pi * samples[:, 1 + even]
cos = torch.cos(thetas)
sin = torch.sin(thetas)
samples_tf = torch.stack([Rs * cos, Rs * sin], -1).reshape(n, -1)
# make sure we only return the number of dimension requested
samples_tf = samples_tf[:, : self._d]
if out is None:
return samples_tf
else:
out.copy_(samples_tf)
class MultivariateNormalQMCEngine:
r"""Engine for qMC sampling from a multivariate Normal `N(\mu, \Sigma)`.
By default, this implementation uses Box-Muller transformed Sobol samples
following pg. 123 in [Pages2018numprob]_. To use the inverse transform
instead, set `inv_transform=True`.
Example:
>>> mean = torch.tensor([1.0, 2.0])
>>> cov = torch.tensor([[1.0, 0.25], [0.25, 2.0]])
>>> engine = MultivariateNormalQMCEngine(mean, cov)
>>> samples = engine.draw(16)
"""
def __init__(
self,
mean: Tensor,
cov: Tensor,
seed: Optional[int] = None,
inv_transform: bool = False,
) -> None:
r"""Engine for qMC sampling from a multivariate Normal `N(\mu, \Sigma)`.
Args:
mean: The mean vector.
cov: The covariance matrix.
seed: The seed with which to seed the random number generator of the
underlying SobolEngine.
inv_transform: If True, use inverse transform instead of Box-Muller.
"""
# validate inputs
if not cov.shape[0] == cov.shape[1]:
raise ValueError("Covariance matrix is not square.")
if not mean.shape[0] == cov.shape[0]:
raise ValueError("Dimension mismatch between mean and covariance.")
if not torch.allclose(cov, cov.transpose(-1, -2)):
raise ValueError("Covariance matrix is not symmetric.")
self._mean = mean
self._normal_engine = NormalQMCEngine(
d=mean.shape[0], seed=seed, inv_transform=inv_transform
)
# compute Cholesky decomp; if it fails, do the eigendecomposition
try:
self._corr_matrix = torch.linalg.cholesky(cov).transpose(-1, -2)
except RuntimeError:
eigval, eigvec = torch.linalg.eigh(cov)
tol = 1e-8 if eigval.dtype == torch.double else 1e-6
if torch.any(eigval < -tol):
raise ValueError("Covariance matrix not PSD.")
eigval_root = eigval.clamp_min(0.0).sqrt()
self._corr_matrix = (eigvec * eigval_root).transpose(-1, -2)
def draw(self, n: int = 1, out: Optional[Tensor] = None) -> Optional[Tensor]:
r"""Draw `n` qMC samples from the multivariate Normal.
Args:
n: The number of samples to draw. As a best practice, use powers of 2.
out: An option output tensor. If provided, draws are put into this
tensor, and the function returns None.
Returns:
A `n x d` tensor of samples if `out=None` and `None` otherwise.
"""
dtype = out.dtype if out is not None else self._mean.dtype
device = out.device if out is not None else self._mean.device
base_samples = self._normal_engine.draw(n, dtype=dtype).to(device=device)
corr_mat = self._corr_matrix.to(dtype=dtype, device=device)
mean = self._mean.to(dtype=dtype, device=device)
qmc_samples = base_samples @ corr_mat + mean
if out is None:
return qmc_samples
else:
out.copy_(qmc_samples)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
The base class for sampler modules to be used with MC-evaluated acquisition functions.
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from typing import Any, Optional, Tuple
import torch
from botorch.exceptions.errors import InputDataError
from botorch.posteriors import Posterior
from torch import Tensor
from torch.nn import Module
KWARGS_DEPRECATED_MSG = (
"The {} argument of `MCSampler`s has been deprecated and will raise an "
"error in a future version."
)
KWARG_ERR_MSG = (
"`MCSampler`s no longer support the `{}` argument. "
"Consider using `{}` for similar functionality."
)
class MCSampler(Module, ABC):
r"""Abstract base class for Samplers.
Subclasses must implement the `forward` method.
Example:
This method is usually not called directly, but via the sampler's
`__call__` method:
>>> posterior = model.posterior(test_X)
>>> samples = sampler(posterior)
:meta private:
"""
def __init__(
self,
sample_shape: torch.Size,
seed: Optional[int] = None,
**kwargs: Any,
) -> None:
r"""Abstract base class for samplers.
Args:
sample_shape: The `sample_shape` of the samples to generate. The full shape
of the samples is given by `posterior._extended_shape(sample_shape)`.
seed: An optional seed to use for sampling.
**kwargs: Catch-all for deprecated kwargs.
"""
super().__init__()
if not isinstance(sample_shape, torch.Size):
if isinstance(sample_shape, int):
sample_shape = torch.Size([sample_shape])
warnings.warn(
"The first positional argument of samplers, `num_samples`, has "
"been deprecated and replaced with `sample_shape`, which expects "
"a `torch.Size` object.",
DeprecationWarning,
)
else:
raise InputDataError(
"Expected `sample_shape` to be a `torch.Size` object, "
f"got {sample_shape}."
)
for k, v in kwargs.items():
if k == "resample":
if v is True:
raise RuntimeError(KWARG_ERR_MSG.format(k, "StochasticSampler"))
else:
warnings.warn(KWARGS_DEPRECATED_MSG.format(k), DeprecationWarning)
elif k == "collapse_batch_dims":
if v is False:
raise RuntimeError(KWARG_ERR_MSG.format(k, "ForkedRNGSampler"))
else:
warnings.warn(KWARGS_DEPRECATED_MSG.format(k), DeprecationWarning)
else:
raise RuntimeError(f"Recevied an unknown argument {k}: {v}.")
self.sample_shape = sample_shape
self.seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item()
self.register_buffer("base_samples", None)
@abstractmethod
def forward(self, posterior: Posterior) -> Tensor:
r"""Draws MC samples from the posterior.
Args:
posterior: The posterior to sample from.
Returns:
The samples drawn from the posterior.
"""
pass # pragma no cover
def _get_batch_range(self, posterior: Posterior) -> Tuple[int, int]:
r"""Get the t-batch range of the posterior with an optional override.
In rare cases, e.g., in `qMultiStepLookahead`, we may want to override the
`batch_range` of the posterior. If this behavior is desired, one can set
`batch_range_override` attribute on the samplers.
Args:
posterior: The posterior to sample from.
Returns:
The t-batch range to use for collapsing the base samples.
"""
if hasattr(self, "batch_range_override"):
return self.batch_range_override
return posterior.batch_range
def _get_collapsed_shape(self, posterior: Posterior) -> torch.Size:
r"""Get the shape of the base samples with the t-batches collapsed.
Args:
posterior: The posterior to sample from.
Returns:
The collapsed shape of the base samples expected by the posterior. The
t-batch dimensions of the base samples are collapsed to size 1. This is
useful to prevent sampling variance across t-batches.
"""
base_sample_shape = posterior.base_sample_shape
batch_start, batch_end = self._get_batch_range(posterior)
base_sample_shape = (
base_sample_shape[:batch_start]
+ torch.Size([1 for _ in base_sample_shape[batch_start:batch_end]])
+ base_sample_shape[batch_end:]
)
return self.sample_shape + base_sample_shape
def _get_extended_base_sample_shape(self, posterior: Posterior) -> torch.Size:
r"""Get the shape of the base samples expected by the posterior.
Args:
posterior: The posterior to sample from.
Returns:
The extended shape of the base samples expected by the posterior.
"""
return self.sample_shape + posterior.base_sample_shape
def _update_base_samples(
self, posterior: Posterior, base_sampler: MCSampler
) -> None:
r"""Update the sampler to use the original base samples for X_baseline.
This is used in CachedCholeskyAcquisitionFunctions to ensure consistency.
Args:
posterior: The posterior for which the base samples are constructed.
base_sampler: The base sampler to retrieve the base samples from.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `_update_base_samples`."
)
def _instance_check(self, base_sampler):
r"""Check that `base_sampler` is an instance of `self.__class__`."""
if not isinstance(base_sampler, self.__class__):
raise RuntimeError(
"Expected `base_sampler` to be an instance of "
f"{self.__class__.__name__}. Got {base_sampler}."
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A `SamplerList` for sampling from a `PosteriorList`.
"""
from __future__ import annotations
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.posteriors.posterior_list import PosteriorList
from botorch.sampling.base import MCSampler
from torch import Tensor
from torch.nn import ModuleList
class ListSampler(MCSampler):
def __init__(self, *samplers: MCSampler) -> None:
r"""A list of samplers for sampling from a `PosteriorList`.
Args:
samplers: A variable number of samplers. This should include
a sampler for each posterior.
"""
super(MCSampler, self).__init__()
self.samplers = ModuleList(samplers)
self._validate_samplers()
def _validate_samplers(self) -> None:
r"""Checks that the samplers share the same sample shape."""
sample_shapes = [s.sample_shape for s in self.samplers]
if not all(sample_shapes[0] == ss for ss in sample_shapes):
raise UnsupportedError(
"ListSampler requires all samplers to have the same sample shape."
)
@property
def sample_shape(self) -> torch.Size:
r"""The sample shape of the underlying samplers."""
self._validate_samplers()
return self.samplers[0].sample_shape
def forward(self, posterior: PosteriorList) -> Tensor:
r"""Samples from the posteriors and concatenates the samples.
Args:
posterior: A `PosteriorList` to sample from.
Returns:
The samples drawn from the posterior.
"""
samples_list = [
s(posterior=p) for s, p in zip(self.samplers, posterior.posteriors)
]
return posterior._reshape_and_cat(tensors=samples_list)
def _update_base_samples(
self, posterior: PosteriorList, base_sampler: ListSampler
) -> None:
r"""Update the sampler to use the original base samples for X_baseline.
This is used in CachedCholeskyAcquisitionFunctions to ensure consistency.
Args:
posterior: The posterior for which the base samples are constructed.
base_sampler: The base sampler to retrieve the base samples from.
"""
self._instance_check(base_sampler=base_sampler)
for s, p, bs in zip(self.samplers, posterior.posteriors, base_sampler.samplers):
s._update_base_samples(posterior=p, base_sampler=bs)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Any, Callable, List, Optional
from botorch.models.approximate_gp import ApproximateGPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.sampling.pathwise.features import gen_kernel_features
from botorch.sampling.pathwise.features.generators import TKernelFeatureMapGenerator
from botorch.sampling.pathwise.paths import GeneralizedLinearPath, PathList, SamplePath
from botorch.sampling.pathwise.utils import (
get_input_transform,
get_output_transform,
get_train_inputs,
TInputTransform,
TOutputTransform,
)
from botorch.utils.dispatcher import Dispatcher
from botorch.utils.sampling import draw_sobol_normal_samples
from gpytorch.kernels import Kernel
from gpytorch.models import ApproximateGP, ExactGP, GP
from gpytorch.variational import _VariationalStrategy
from torch import Size, Tensor
from torch.nn import Module
TPathwisePriorSampler = Callable[[GP, Size], SamplePath]
DrawKernelFeaturePaths = Dispatcher("draw_kernel_feature_paths")
def draw_kernel_feature_paths(
model: GP, sample_shape: Size, **kwargs: Any
) -> GeneralizedLinearPath:
r"""Draws functions from a Bayesian-linear-model-based approximation to a GP prior.
When evaluted, sample paths produced by this method return Tensors with dimensions
`sample_dims x batch_dims x [joint_dim]`, where `joint_dim` denotes the penultimate
dimension of the input tensor. For multioutput models, outputs are returned as the
final batch dimension.
Args:
model: The prior over functions.
sample_shape: The shape of the sample paths to be drawn.
"""
return DrawKernelFeaturePaths(model, sample_shape=sample_shape, **kwargs)
def _draw_kernel_feature_paths_fallback(
num_inputs: int,
mean_module: Optional[Module],
covar_module: Kernel,
sample_shape: Size,
num_features: int = 1024,
map_generator: TKernelFeatureMapGenerator = gen_kernel_features,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
weight_generator: Optional[Callable[[Size], Tensor]] = None,
) -> GeneralizedLinearPath:
# Generate a kernel feature map
feature_map = map_generator(
kernel=covar_module,
num_inputs=num_inputs,
num_outputs=num_features,
)
# Sample random weights with which to combine kernel features
if weight_generator is None:
weight = draw_sobol_normal_samples(
n=sample_shape.numel() * covar_module.batch_shape.numel(),
d=feature_map.num_outputs,
device=covar_module.device,
dtype=covar_module.dtype,
).reshape(sample_shape + covar_module.batch_shape + (feature_map.num_outputs,))
else:
weight = weight_generator(
sample_shape + covar_module.batch_shape + (feature_map.num_outputs,)
).to(device=covar_module.device, dtype=covar_module.dtype)
# Return the sample paths
return GeneralizedLinearPath(
feature_map=feature_map,
weight=weight,
bias_module=mean_module,
input_transform=input_transform,
output_transform=output_transform,
)
@DrawKernelFeaturePaths.register(ExactGP)
def _draw_kernel_feature_paths_ExactGP(
model: ExactGP, **kwargs: Any
) -> GeneralizedLinearPath:
(train_X,) = get_train_inputs(model, transformed=False)
return _draw_kernel_feature_paths_fallback(
num_inputs=train_X.shape[-1],
mean_module=model.mean_module,
covar_module=model.covar_module,
input_transform=get_input_transform(model),
output_transform=get_output_transform(model),
**kwargs,
)
@DrawKernelFeaturePaths.register(ModelListGP)
def _draw_kernel_feature_paths_list(
model: ModelListGP,
join: Optional[Callable[[List[Tensor]], Tensor]] = None,
**kwargs: Any,
) -> PathList:
paths = [draw_kernel_feature_paths(m, **kwargs) for m in model.models]
return PathList(paths=paths, join=join)
@DrawKernelFeaturePaths.register(ApproximateGPyTorchModel)
def _draw_kernel_feature_paths_ApproximateGPyTorchModel(
model: ApproximateGPyTorchModel, **kwargs: Any
) -> GeneralizedLinearPath:
(train_X,) = get_train_inputs(model, transformed=False)
return DrawKernelFeaturePaths(
model.model,
num_inputs=train_X.shape[-1],
input_transform=get_input_transform(model),
output_transform=get_output_transform(model),
**kwargs,
)
@DrawKernelFeaturePaths.register(ApproximateGP)
def _draw_kernel_feature_paths_ApproximateGP(
model: ApproximateGP, **kwargs: Any
) -> GeneralizedLinearPath:
return DrawKernelFeaturePaths(model, model.variational_strategy, **kwargs)
@DrawKernelFeaturePaths.register(ApproximateGP, _VariationalStrategy)
def _draw_kernel_feature_paths_ApproximateGP_fallback(
model: ApproximateGP,
_: _VariationalStrategy,
*,
num_inputs: int,
**kwargs: Any,
) -> GeneralizedLinearPath:
return _draw_kernel_feature_paths_fallback(
num_inputs=num_inputs,
mean_module=model.mean_module,
covar_module=model.covar_module,
**kwargs,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import ABC
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
Union,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.pathwise.features import FeatureMap
from botorch.sampling.pathwise.utils import (
TInputTransform,
TOutputTransform,
TransformedModuleMixin,
)
from torch import Tensor
from torch.nn import Module, ModuleDict, ModuleList, Parameter
class SamplePath(ABC, TransformedModuleMixin, Module):
r"""Abstract base class for Botorch sample paths."""
class PathDict(SamplePath):
r"""A dictionary of SamplePaths."""
def __init__(
self,
paths: Optional[Mapping[str, SamplePath]] = None,
join: Optional[Callable[[List[Tensor]], Tensor]] = None,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
) -> None:
r"""Initializes a PathDict instance.
Args:
paths: An optional mapping of strings to sample paths.
join: An optional callable used to combine each path's outputs.
input_transform: An optional input transform for the module.
output_transform: An optional output transform for the module.
"""
if join is None and output_transform is not None:
raise UnsupportedError("Output transforms must be preceded by a join rule.")
super().__init__()
self.join = join
self.input_transform = input_transform
self.output_transform = output_transform
self.paths = (
paths
if isinstance(paths, ModuleDict)
else ModuleDict({} if paths is None else paths)
)
def forward(self, x: Tensor, **kwargs: Any) -> Union[Tensor, Dict[str, Tensor]]:
out = [path(x, **kwargs) for path in self.paths.values()]
return dict(zip(self.paths, out)) if self.join is None else self.join(out)
def items(self) -> Iterable[Tuple[str, SamplePath]]:
return self.paths.items()
def keys(self) -> Iterable[str]:
return self.paths.keys()
def values(self) -> Iterable[SamplePath]:
return self.paths.values()
def __len__(self) -> int:
return len(self.paths)
def __iter__(self) -> Iterator[SamplePath]:
yield from self.paths
def __delitem__(self, key: str) -> None:
del self.paths[key]
def __getitem__(self, key: str) -> SamplePath:
return self.paths[key]
def __setitem__(self, key: str, val: SamplePath) -> None:
self.paths[key] = val
class PathList(SamplePath):
r"""A list of SamplePaths."""
def __init__(
self,
paths: Optional[Iterable[SamplePath]] = None,
join: Optional[Callable[[List[Tensor]], Tensor]] = None,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
) -> None:
r"""Initializes a PathList instance.
Args:
paths: An optional iterable of sample paths.
join: An optional callable used to combine each path's outputs.
input_transform: An optional input transform for the module.
output_transform: An optional output transform for the module.
"""
if join is None and output_transform is not None:
raise UnsupportedError("Output transforms must be preceded by a join rule.")
super().__init__()
self.join = join
self.input_transform = input_transform
self.output_transform = output_transform
self.paths = (
paths
if isinstance(paths, ModuleList)
else ModuleList({} if paths is None else paths)
)
def forward(self, x: Tensor, **kwargs: Any) -> Union[Tensor, List[Tensor]]:
out = [path(x, **kwargs) for path in self.paths]
return out if self.join is None else self.join(out)
def __len__(self) -> int:
return len(self.paths)
def __iter__(self) -> Iterator[SamplePath]:
yield from self.paths
def __delitem__(self, key: int) -> None:
del self.paths[key]
def __getitem__(self, key: int) -> SamplePath:
return self.paths[key]
def __setitem__(self, key: int, val: SamplePath) -> None:
self.paths[key] = val
class GeneralizedLinearPath(SamplePath):
r"""A sample path in the form of a generalized linear model."""
def __init__(
self,
feature_map: FeatureMap,
weight: Union[Parameter, Tensor],
bias_module: Optional[Module] = None,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
):
r"""Initializes a GeneralizedLinearPath instance.
.. code-block:: text
path(x) = output_transform(bias_module(z) + feature_map(z)^T weight),
where z = input_transform(x).
Args:
feature_map: A map used to featurize the module's inputs.
weight: A tensor of weights used to combine input features.
bias_module: An optional module used to define additive offsets.
input_transform: An optional input transform for the module.
output_transform: An optional output transform for the module.
"""
super().__init__()
self.feature_map = feature_map
if not isinstance(weight, Parameter):
self.register_buffer("weight", weight)
self.weight = weight
self.bias_module = bias_module
self.input_transform = input_transform
self.output_transform = output_transform
def forward(self, x: Tensor, **kwargs) -> Tensor:
feat = self.feature_map(x, **kwargs)
out = (feat @ self.weight.unsqueeze(-1)).squeeze(-1)
return out if self.bias_module is None else out + self.bias_module(x)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
.. [wilson2020sampling]
J. Wilson, V. Borovitskiy, A. Terenin, P. Mostowsky, and M. Deisenroth. Efficiently
sampling functions from Gaussian process posteriors. International Conference on
Machine Learning (2020).
.. [wilson2021pathwise]
J. Wilson, V. Borovitskiy, A. Terenin, P. Mostowsky, and M. Deisenroth. Pathwise
Conditioning of Gaussian Processes. Journal of Machine Learning Research (2021).
"""
from __future__ import annotations
from typing import Any, Optional, Union
from botorch.models.approximate_gp import ApproximateGPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.sampling.pathwise.paths import PathDict, PathList, SamplePath
from botorch.sampling.pathwise.prior_samplers import (
draw_kernel_feature_paths,
TPathwisePriorSampler,
)
from botorch.sampling.pathwise.update_strategies import gaussian_update, TPathwiseUpdate
from botorch.sampling.pathwise.utils import (
get_output_transform,
get_train_inputs,
get_train_targets,
TInputTransform,
TOutputTransform,
)
from botorch.utils.context_managers import delattr_ctx
from botorch.utils.dispatcher import Dispatcher
from gpytorch.models import ApproximateGP, ExactGP, GP
from torch import Size
DrawMatheronPaths = Dispatcher("draw_matheron_paths")
class MatheronPath(PathDict):
r"""Represents function draws from a GP posterior via Matheron's rule:
.. code-block:: text
"Prior path"
v
(f | y)(·) = f(·) + Cov(f(·), y) Cov(y, y)^{-1} (y - f(X) - ε),
\_______________________________________/
v
"Update path"
where `=` denotes equality in distribution, :math:`f \sim GP(0, k)`,
:math:`y \sim N(f(X), \Sigma)`, and :math:`\epsilon \sim N(0, \Sigma)`.
For more information, see [wilson2020sampling]_ and [wilson2021pathwise]_.
"""
def __init__(
self,
prior_paths: SamplePath,
update_paths: SamplePath,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
) -> None:
r"""Initializes a MatheronPath instance.
Args:
prior_paths: Sample paths used to represent the prior.
update_paths: Sample paths used to represent the data.
input_transform: An optional input transform for the module.
output_transform: An optional output transform for the module.
"""
super().__init__(
join=sum,
paths={"prior_paths": prior_paths, "update_paths": update_paths},
input_transform=input_transform,
output_transform=output_transform,
)
def draw_matheron_paths(
model: GP,
sample_shape: Size,
prior_sampler: TPathwisePriorSampler = draw_kernel_feature_paths,
update_strategy: TPathwiseUpdate = gaussian_update,
**kwargs: Any,
) -> MatheronPath:
r"""Generates function draws from (an approximate) Gaussian process prior.
When evaluted, sample paths produced by this method return Tensors with dimensions
`sample_dims x batch_dims x [joint_dim]`, where `joint_dim` denotes the penultimate
dimension of the input tensor. For multioutput models, outputs are returned as the
final batch dimension.
Args:
model: Gaussian process whose posterior is to be sampled.
sample_shape: Sizes of sample dimensions.
prior_sample: A callable that takes a model and a sample shape and returns
a set of sample paths representing the prior.
update_strategy: A callable that takes a model and a tensor of prior process
values and returns a set of sample paths representing the data.
"""
return DrawMatheronPaths(
model,
sample_shape=sample_shape,
prior_sampler=prior_sampler,
update_strategy=update_strategy,
**kwargs,
)
@DrawMatheronPaths.register(ModelListGP)
def _draw_matheron_paths_ModelListGP(model: ModelListGP, **kwargs: Any):
return PathList([draw_matheron_paths(m, **kwargs) for m in model.models])
@DrawMatheronPaths.register(ExactGP)
def _draw_matheron_paths_ExactGP(
model: ExactGP,
*,
sample_shape: Size,
prior_sampler: TPathwisePriorSampler,
update_strategy: TPathwiseUpdate,
) -> MatheronPath:
(train_X,) = get_train_inputs(model, transformed=True)
train_Y = get_train_targets(model, transformed=True)
with delattr_ctx(model, "outcome_transform"):
# Generate draws from the prior
prior_paths = prior_sampler(model=model, sample_shape=sample_shape)
sample_values = prior_paths.forward(train_X)
# Compute pathwise updates
update_paths = update_strategy(
model=model,
sample_values=sample_values,
train_targets=train_Y,
)
return MatheronPath(
prior_paths=prior_paths,
update_paths=update_paths,
output_transform=get_output_transform(model),
)
@DrawMatheronPaths.register((ApproximateGP, ApproximateGPyTorchModel))
def _draw_matheron_paths_ApproximateGP(
model: Union[ApproximateGP, ApproximateGPyTorchModel],
*,
sample_shape: Size,
prior_sampler: TPathwisePriorSampler,
update_strategy: TPathwiseUpdate,
**kwargs: Any,
) -> MatheronPath:
# Note: Inducing points are assumed to be pre-transformed
Z = (
model.model.variational_strategy.inducing_points
if isinstance(model, ApproximateGPyTorchModel)
else model.variational_strategy.inducing_points
)
with delattr_ctx(model, "outcome_transform"):
# Generate draws from the prior
prior_paths = prior_sampler(model=model, sample_shape=sample_shape)
sample_values = prior_paths.forward(Z) # `forward` bypasses transforms
# Compute pathwise updates
update_paths = update_strategy(model=model, sample_values=sample_values)
return MatheronPath(
prior_paths=prior_paths,
update_paths=update_paths,
output_transform=get_output_transform(model),
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.sampling.pathwise.features import (
gen_kernel_features,
KernelEvaluationMap,
KernelFeatureMap,
)
from botorch.sampling.pathwise.paths import (
GeneralizedLinearPath,
PathDict,
PathList,
SamplePath,
)
from botorch.sampling.pathwise.posterior_samplers import (
draw_matheron_paths,
MatheronPath,
)
from botorch.sampling.pathwise.prior_samplers import draw_kernel_feature_paths
from botorch.sampling.pathwise.update_strategies import gaussian_update
__all__ = [
"draw_matheron_paths",
"draw_kernel_feature_paths",
"gen_kernel_features",
"gaussian_update",
"GeneralizedLinearPath",
"KernelEvaluationMap",
"KernelFeatureMap",
"MatheronPath",
"SamplePath",
"PathDict",
"PathList",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Any, Callable, Optional, Union
import torch
from botorch.models.approximate_gp import ApproximateGPyTorchModel
from botorch.models.transforms.input import InputTransform
from botorch.sampling.pathwise.features import KernelEvaluationMap
from botorch.sampling.pathwise.paths import GeneralizedLinearPath, SamplePath
from botorch.sampling.pathwise.utils import (
get_input_transform,
get_train_inputs,
get_train_targets,
TInputTransform,
)
from botorch.utils.dispatcher import Dispatcher
from botorch.utils.types import DEFAULT, NoneType
from gpytorch.kernels.kernel import Kernel
from gpytorch.likelihoods import _GaussianLikelihoodBase, Likelihood
from gpytorch.models import ApproximateGP, ExactGP, GP
from gpytorch.variational import VariationalStrategy
from linear_operator.operators import (
LinearOperator,
SumLinearOperator,
ZeroLinearOperator,
)
from torch import Tensor
TPathwiseUpdate = Callable[[GP, Tensor], SamplePath]
GaussianUpdate = Dispatcher("gaussian_update")
def gaussian_update(
model: GP,
sample_values: Tensor,
likelihood: Optional[Likelihood] = DEFAULT,
**kwargs: Any,
) -> GeneralizedLinearPath:
r"""Computes a Gaussian pathwise update in exact arithmetic:
.. code-block:: text
(f | y)(·) = f(·) + Cov(f(·), y) Cov(y, y)^{-1} (y - f(X) - ε),
\_______________________________________/
V
"Gaussian pathwise update"
where `=` denotes equality in distribution, :math:`f \sim GP(0, k)`,
:math:`y \sim N(f(X), \Sigma)`, and :math:`\epsilon \sim N(0, \Sigma)`.
For more information, see [wilson2020sampling]_ and [wilson2021pathwise]_.
Args:
model: A Gaussian process prior together with a likelihood.
sample_values: Assumed values for :math:`f(X)`.
likelihood: An optional likelihood used to help define the desired
update. Defaults to `model.likelihood` if it exists else None.
"""
if likelihood is DEFAULT:
likelihood = getattr(model, "likelihood", None)
return GaussianUpdate(model, likelihood, sample_values=sample_values, **kwargs)
def _gaussian_update_exact(
kernel: Kernel,
points: Tensor,
target_values: Tensor,
sample_values: Tensor,
noise_covariance: Optional[Union[Tensor, LinearOperator]] = None,
scale_tril: Optional[Union[Tensor, LinearOperator]] = None,
input_transform: Optional[TInputTransform] = None,
) -> GeneralizedLinearPath:
# Prepare Cholesky factor of `Cov(y, y)` and noise sample values as needed
if isinstance(noise_covariance, (NoneType, ZeroLinearOperator)):
scale_tril = kernel(points).cholesky() if scale_tril is None else scale_tril
else:
noise_values = torch.randn_like(sample_values).unsqueeze(-1)
noise_values = noise_covariance.cholesky() @ noise_values
sample_values = sample_values + noise_values.squeeze(-1)
scale_tril = (
SumLinearOperator(kernel(points), noise_covariance).cholesky()
if scale_tril is None
else scale_tril
)
# Solve for `Cov(y, y)^{-1}(Y - f(X) - ε)`
errors = target_values - sample_values
weight = torch.cholesky_solve(errors.unsqueeze(-1), scale_tril.to_dense())
# Define update feature map and paths
feature_map = KernelEvaluationMap(
kernel=kernel,
points=points,
input_transform=input_transform,
)
return GeneralizedLinearPath(feature_map=feature_map, weight=weight.squeeze(-1))
@GaussianUpdate.register(ExactGP, _GaussianLikelihoodBase)
def _gaussian_update_ExactGP(
model: ExactGP,
likelihood: _GaussianLikelihoodBase,
*,
sample_values: Tensor,
target_values: Optional[Tensor] = None,
points: Optional[Tensor] = None,
noise_covariance: Optional[Union[Tensor, LinearOperator]] = None,
scale_tril: Optional[Union[Tensor, LinearOperator]] = None,
**ignore: Any,
) -> GeneralizedLinearPath:
if points is None:
(points,) = get_train_inputs(model, transformed=True)
if target_values is None:
target_values = get_train_targets(model, transformed=True)
if noise_covariance is None:
noise_covariance = likelihood.noise_covar(shape=points.shape[:-1])
return _gaussian_update_exact(
kernel=model.covar_module,
points=points,
target_values=target_values,
sample_values=sample_values,
noise_covariance=noise_covariance,
scale_tril=scale_tril,
input_transform=get_input_transform(model),
)
@GaussianUpdate.register(ApproximateGPyTorchModel, (Likelihood, NoneType))
def _gaussian_update_ApproximateGPyTorchModel(
model: ApproximateGPyTorchModel,
likelihood: Union[Likelihood, NoneType],
**kwargs: Any,
) -> GeneralizedLinearPath:
return GaussianUpdate(
model.model, likelihood, input_transform=get_input_transform(model), **kwargs
)
@GaussianUpdate.register(ApproximateGP, (Likelihood, NoneType))
def _gaussian_update_ApproximateGP(
model: ApproximateGP, likelihood: Union[Likelihood, NoneType], **kwargs: Any
) -> GeneralizedLinearPath:
return GaussianUpdate(model, model.variational_strategy, **kwargs)
@GaussianUpdate.register(ApproximateGP, VariationalStrategy)
def _gaussian_update_ApproximateGP_VariationalStrategy(
model: ApproximateGP,
_: VariationalStrategy,
*,
sample_values: Tensor,
target_values: Optional[Tensor] = None,
noise_covariance: Optional[Union[Tensor, LinearOperator]] = None,
input_transform: Optional[InputTransform] = None,
**ignore: Any,
) -> GeneralizedLinearPath:
# TODO: Account for jitter added by `psd_safe_cholesky`
if not isinstance(noise_covariance, (NoneType, ZeroLinearOperator)):
raise NotImplementedError(
f"`noise_covariance` argument not yet supported for {type(model)}."
)
# Inducing points `Z` are assumed to live in transformed space
batch_shape = model.covar_module.batch_shape
v = model.variational_strategy
Z = v.inducing_points
L = v._cholesky_factor(v(Z, prior=True).lazy_covariance_matrix).to(
dtype=sample_values.dtype
)
# Generate whitened inducing variables `u`, then location-scale transform
if target_values is None:
u = v.variational_distribution.rsample(
sample_values.shape[: sample_values.ndim - len(batch_shape) - 1],
)
target_values = model.mean_module(Z) + (u @ L.transpose(-1, -2))
return _gaussian_update_exact(
kernel=model.covar_module,
points=Z,
target_values=target_values,
sample_values=sample_values,
scale_tril=L,
input_transform=input_transform,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Callable, Iterable, List, Optional, overload, Tuple, Union
import torch
from botorch.models.approximate_gp import SingleTaskVariationalGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model, ModelList
from botorch.models.transforms.input import InputTransform
from botorch.models.transforms.outcome import OutcomeTransform
from botorch.utils.dispatcher import Dispatcher
from gpytorch.kernels import ScaleKernel
from gpytorch.kernels.kernel import Kernel
from torch import LongTensor, Tensor
from torch.nn import Module, ModuleList
TInputTransform = Union[InputTransform, Callable[[Tensor], Tensor]]
TOutputTransform = Union[OutcomeTransform, Callable[[Tensor], Tensor]]
GetTrainInputs = Dispatcher("get_train_inputs")
GetTrainTargets = Dispatcher("get_train_targets")
class TransformedModuleMixin:
r"""Mixin that wraps a module's __call__ method with optional transforms."""
input_transform: Optional[TInputTransform]
output_transform: Optional[TOutputTransform]
def __call__(self, values: Tensor, *args: Any, **kwargs: Any) -> Tensor:
input_transform = getattr(self, "input_transform", None)
if input_transform is not None:
values = (
input_transform.forward(values)
if isinstance(input_transform, InputTransform)
else input_transform(values)
)
output = super().__call__(values, *args, **kwargs)
output_transform = getattr(self, "output_transform", None)
if output_transform is None:
return output
return (
output_transform.untransform(output)[0]
if isinstance(output_transform, OutcomeTransform)
else output_transform(output)
)
class TensorTransform(ABC, Module):
r"""Abstract base class for transforms that map tensor to tensor."""
@abstractmethod
def forward(self, values: Tensor, **kwargs: Any) -> Tensor:
pass # pragma: no cover
class ChainedTransform(TensorTransform):
r"""A composition of TensorTransforms."""
def __init__(self, *transforms: TensorTransform):
r"""Initializes a ChainedTransform instance.
Args:
transforms: A set of transforms to be applied from right to left.
"""
super().__init__()
self.transforms = ModuleList(transforms)
def forward(self, values: Tensor) -> Tensor:
for transform in reversed(self.transforms):
values = transform(values)
return values
class SineCosineTransform(TensorTransform):
r"""A transform that returns concatenated sine and cosine features."""
def __init__(self, scale: Optional[Tensor] = None):
r"""Initializes a SineCosineTransform instance.
Args:
scale: An optional tensor used to rescale the module's outputs.
"""
super().__init__()
self.scale = scale
def forward(self, values: Tensor) -> Tensor:
sincos = torch.concat([values.sin(), values.cos()], dim=-1)
return sincos if self.scale is None else self.scale * sincos
class InverseLengthscaleTransform(TensorTransform):
r"""A transform that divides its inputs by a kernels lengthscales."""
def __init__(self, kernel: Kernel):
r"""Initializes an InverseLengthscaleTransform instance.
Args:
kernel: The kernel whose lengthscales are to be used.
"""
if not kernel.has_lengthscale:
raise RuntimeError(f"{type(kernel)} does not implement `lengthscale`.")
super().__init__()
self.kernel = kernel
def forward(self, values: Tensor) -> Tensor:
return self.kernel.lengthscale.reciprocal() * values
class OutputscaleTransform(TensorTransform):
r"""A transform that multiplies its inputs by the square root of a
kernel's outputscale."""
def __init__(self, kernel: ScaleKernel):
r"""Initializes an OutputscaleTransform instance.
Args:
kernel: A ScaleKernel whose `outputscale` is to be used.
"""
super().__init__()
self.kernel = kernel
def forward(self, values: Tensor) -> Tensor:
outputscale = (
self.kernel.outputscale[..., None, None]
if self.kernel.batch_shape
else self.kernel.outputscale
)
return outputscale.sqrt() * values
class FeatureSelector(TensorTransform):
r"""A transform that returns a subset of its input's features.
along a given tensor dimension."""
def __init__(self, indices: Iterable[int], dim: Union[int, LongTensor] = -1):
r"""Initializes a FeatureSelector instance.
Args:
indices: A LongTensor of feature indices.
dim: The dimensional along which to index features.
"""
super().__init__()
self.register_buffer("dim", dim if torch.is_tensor(dim) else torch.tensor(dim))
self.register_buffer(
"indices", indices if torch.is_tensor(indices) else torch.tensor(indices)
)
def forward(self, values: Tensor) -> Tensor:
return values.index_select(dim=self.dim, index=self.indices)
class OutcomeUntransformer(TensorTransform):
r"""Module acting as a bridge for `OutcomeTransform.untransform`."""
def __init__(
self,
transform: OutcomeTransform,
num_outputs: Union[int, LongTensor],
):
r"""Initializes an OutcomeUntransformer instance.
Args:
transform: The wrapped OutcomeTransform instance.
num_outputs: The number of outcome features that the
OutcomeTransform transforms.
"""
super().__init__()
self.transform = transform
self.register_buffer(
"num_outputs",
num_outputs if torch.is_tensor(num_outputs) else torch.tensor(num_outputs),
)
def forward(self, values: Tensor) -> Tensor:
# OutcomeTransforms expect an explicit output dimension in the final position.
if self.num_outputs == 1: # BoTorch has suppressed the output dimension
output_values, _ = self.transform.untransform(values.unsqueeze(-1))
return output_values.squeeze(-1)
# BoTorch has moved the output dimension inside as the final batch dimension.
output_values, _ = self.transform.untransform(values.transpose(-2, -1))
return output_values.transpose(-2, -1)
def get_input_transform(model: GPyTorchModel) -> Optional[InputTransform]:
r"""Returns a model's input_transform or None."""
return getattr(model, "input_transform", None)
def get_output_transform(model: GPyTorchModel) -> Optional[OutcomeUntransformer]:
r"""Returns a wrapped version of a model's outcome_transform or None."""
transform = getattr(model, "outcome_transform", None)
if transform is None:
return None
return OutcomeUntransformer(transform=transform, num_outputs=model.num_outputs)
@overload
def get_train_inputs(model: Model, transformed: bool = False) -> Tuple[Tensor, ...]:
pass # pragma: no cover
@overload
def get_train_inputs(model: ModelList, transformed: bool = False) -> List[...]:
pass # pragma: no cover
def get_train_inputs(model: Model, transformed: bool = False):
return GetTrainInputs(model, transformed=transformed)
@GetTrainInputs.register(Model)
def _get_train_inputs_Model(model: Model, transformed: bool = False) -> Tuple[Tensor]:
if not transformed:
original_train_input = getattr(model, "_original_train_inputs", None)
if torch.is_tensor(original_train_input):
return (original_train_input,)
(X,) = model.train_inputs
transform = get_input_transform(model)
if transform is None:
return (X,)
if model.training:
return (transform.forward(X) if transformed else X,)
return (X if transformed else transform.untransform(X),)
@GetTrainInputs.register(SingleTaskVariationalGP)
def _get_train_inputs_SingleTaskVariationalGP(
model: SingleTaskVariationalGP, transformed: bool = False
) -> Tuple[Tensor]:
(X,) = model.model.train_inputs
if model.training != transformed:
return (X,)
transform = get_input_transform(model)
if transform is None:
return (X,)
return (transform.forward(X) if model.training else transform.untransform(X),)
@GetTrainInputs.register(ModelList)
def _get_train_inputs_ModelList(
model: ModelList, transformed: bool = False
) -> List[...]:
return [get_train_inputs(m, transformed=transformed) for m in model.models]
@overload
def get_train_targets(model: Model, transformed: bool = False) -> Tensor:
pass # pragma: no cover
@overload
def get_train_targets(model: ModelList, transformed: bool = False) -> List[...]:
pass # pragma: no cover
def get_train_targets(model: Model, transformed: bool = False):
return GetTrainTargets(model, transformed=transformed)
@GetTrainTargets.register(Model)
def _get_train_targets_Model(model: Model, transformed: bool = False) -> Tensor:
Y = model.train_targets
# Note: Avoid using `get_output_transform` here since it creates a Module
transform = getattr(model, "outcome_transform", None)
if transformed or transform is None:
return Y
if model.num_outputs == 1:
return transform.untransform(Y.unsqueeze(-1))[0].squeeze(-1)
return transform.untransform(Y.transpose(-2, -1))[0].transpose(-2, -1)
@GetTrainTargets.register(SingleTaskVariationalGP)
def _get_train_targets_SingleTaskVariationalGP(
model: Model, transformed: bool = False
) -> Tensor:
Y = model.model.train_targets
transform = getattr(model, "outcome_transform", None)
if transformed or transform is None:
return Y
if model.num_outputs == 1:
return transform.untransform(Y.unsqueeze(-1))[0].squeeze(-1)
# SingleTaskVariationalGP.__init__ doesn't bring the multitoutpout dimension inside
return transform.untransform(Y)[0]
@GetTrainTargets.register(ModelList)
def _get_train_targets_ModelList(
model: ModelList, transformed: bool = False
) -> List[...]:
return [get_train_targets(m, transformed=transformed) for m in model.models]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional, Union
import torch
from botorch.sampling.pathwise.utils import (
TInputTransform,
TOutputTransform,
TransformedModuleMixin,
)
from gpytorch.kernels import Kernel
from linear_operator.operators import LinearOperator
from torch import Size, Tensor
from torch.nn import Module
class FeatureMap(TransformedModuleMixin, Module):
num_outputs: int
batch_shape: Size
input_transform: Optional[TInputTransform]
output_transform: Optional[TOutputTransform]
class KernelEvaluationMap(FeatureMap):
r"""A feature map defined by centering a kernel at a set of points."""
def __init__(
self,
kernel: Kernel,
points: Tensor,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
) -> None:
r"""Initializes a KernelEvaluationMap instance:
.. code-block:: text
feature_map(x) = output_transform(kernel(input_transform(x), points)).
Args:
kernel: The kernel :math:`k` used to define the feature map.
points: A tensor passed as the kernel's second argument.
input_transform: An optional input transform for the module.
output_transform: An optional output transform for the module.
"""
try:
torch.broadcast_shapes(points.shape[:-2], kernel.batch_shape)
except RuntimeError:
raise RuntimeError(
f"Shape mismatch: {points.shape=}, but {kernel.batch_shape=}."
)
super().__init__()
self.kernel = kernel
self.points = points
self.input_transform = input_transform
self.output_transform = output_transform
def forward(self, x: Tensor) -> Union[Tensor, LinearOperator]:
return self.kernel(x, self.points)
@property
def num_outputs(self) -> int:
if self.output_transform is None:
return self.points.shape[-1]
canary = torch.empty(
1, self.points.shape[-1], device=self.points.device, dtype=self.points.dtype
)
return self.output_transform(canary).shape[-1]
@property
def batch_shape(self) -> Size:
return self.kernel.batch_shape
class KernelFeatureMap(FeatureMap):
r"""Representation of a kernel :math:`k: \mathcal{X}^2 \to \mathbb{R}` as an
n-dimensional feature map :math:`\phi: \mathcal{X} \to \mathbb{R}^n` satisfying:
:math:`k(x, x') ≈ \phi(x)^\top \phi(x')`.
"""
def __init__(
self,
kernel: Kernel,
weight: Tensor,
bias: Optional[Tensor] = None,
input_transform: Optional[TInputTransform] = None,
output_transform: Optional[TOutputTransform] = None,
) -> None:
r"""Initializes a KernelFeatureMap instance:
.. code-block:: text
feature_map(x) = output_transform(input_transform(x)^{T} weight + bias).
Args:
kernel: The kernel :math:`k` used to define the feature map.
weight: A tensor of weights used to linearly combine the module's inputs.
bias: A tensor of biases to be added to the linearly combined inputs.
input_transform: An optional input transform for the module.
output_transform: An optional output transform for the module.
"""
super().__init__()
self.kernel = kernel
self.register_buffer("weight", weight)
self.register_buffer("bias", bias)
self.weight = weight
self.bias = bias
self.input_transform = input_transform
self.output_transform = output_transform
def forward(self, x: Tensor) -> Tensor:
out = x @ self.weight.transpose(-2, -1)
return out if self.bias is None else out + self.bias
@property
def num_outputs(self) -> int:
if self.output_transform is None:
return self.weight.shape[-2]
canary = torch.empty(
self.weight.shape[-2], device=self.weight.device, dtype=self.weight.dtype
)
return self.output_transform(canary).shape[-1]
@property
def batch_shape(self) -> Size:
return self.kernel.batch_shape
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.sampling.pathwise.features.generators import gen_kernel_features
from botorch.sampling.pathwise.features.maps import (
FeatureMap,
KernelEvaluationMap,
KernelFeatureMap,
)
__all__ = [
"FeatureMap",
"gen_kernel_features",
"KernelEvaluationMap",
"KernelFeatureMap",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
.. [rahimi2007random]
A. Rahimi and B. Recht. Random features for large-scale kernel machines.
Advances in Neural Information Processing Systems 20 (2007).
.. [sutherland2015error]
D. J. Sutherland and J. Schneider. On the error of random Fourier features.
arXiv preprint arXiv:1506.02785 (2015).
"""
from __future__ import annotations
from typing import Any, Callable
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.pathwise.features.maps import KernelFeatureMap
from botorch.sampling.pathwise.utils import (
ChainedTransform,
FeatureSelector,
InverseLengthscaleTransform,
OutputscaleTransform,
SineCosineTransform,
)
from botorch.utils.dispatcher import Dispatcher
from botorch.utils.sampling import draw_sobol_normal_samples
from gpytorch import kernels
from gpytorch.kernels.kernel import Kernel
from torch import Size, Tensor
from torch.distributions import Gamma
TKernelFeatureMapGenerator = Callable[[Kernel, int, int], KernelFeatureMap]
GenKernelFeatures = Dispatcher("gen_kernel_features")
def gen_kernel_features(
kernel: kernels.Kernel,
num_inputs: int,
num_outputs: int,
**kwargs: Any,
) -> KernelFeatureMap:
r"""Generates a feature map :math:`\phi: \mathcal{X} \to \mathbb{R}^{n}` such that
:math:`k(x, x') ≈ \phi(x)^{T} \phi(x')`. For stationary kernels :math:`k`, defaults
to the method of random Fourier features. For more details, see [rahimi2007random]_
and [sutherland2015error]_.
Args:
kernel: The kernel :math:`k` to be represented via a finite-dim basis.
num_inputs: The number of input features.
num_outputs: The number of kernel features.
"""
return GenKernelFeatures(
kernel,
num_inputs=num_inputs,
num_outputs=num_outputs,
**kwargs,
)
def _gen_fourier_features(
kernel: kernels.Kernel,
weight_generator: Callable[[Size], Tensor],
num_inputs: int,
num_outputs: int,
) -> KernelFeatureMap:
r"""Generate a feature map :math:`\phi: \mathcal{X} \to \mathbb{R}^{2l}` that
approximates a stationary kernel so that :math:`k(x, x') ≈ \phi(x)^\top \phi(x')`.
Following [sutherland2015error]_, we represent complex exponentials by pairs of
basis functions :math:`\phi_{i}(x) = \sin(x^\top w_{i})` and
:math:`\phi_{i + l} = \cos(x^\top w_{i}).
Args:
kernel: A stationary kernel :math:`k(x, x') = k(x - x')`.
weight_generator: A callable used to generate weight vectors :math:`w`.
num_inputs: The number of input features.
num_outputs: The number of Fourier features.
"""
if num_outputs % 2:
raise UnsupportedError(
f"Expected an even number of output features, but received {num_outputs=}."
)
input_transform = InverseLengthscaleTransform(kernel)
if kernel.active_dims is not None:
num_inputs = len(kernel.active_dims)
input_transform = ChainedTransform(
input_transform, FeatureSelector(indices=kernel.active_dims)
)
weight = weight_generator(
Size([kernel.batch_shape.numel() * num_outputs // 2, num_inputs])
).reshape(*kernel.batch_shape, num_outputs // 2, num_inputs)
output_transform = SineCosineTransform(
torch.tensor((2 / num_outputs) ** 0.5, device=kernel.device, dtype=kernel.dtype)
)
return KernelFeatureMap(
kernel=kernel,
weight=weight,
input_transform=input_transform,
output_transform=output_transform,
)
@GenKernelFeatures.register(kernels.RBFKernel)
def _gen_kernel_features_rbf(
kernel: kernels.RBFKernel,
*,
num_inputs: int,
num_outputs: int,
) -> KernelFeatureMap:
def _weight_generator(shape: Size) -> Tensor:
try:
n, d = shape
except ValueError:
raise UnsupportedError(
f"Expected `shape` to be 2-dimensional, but {len(shape)=}."
)
return draw_sobol_normal_samples(
n=n,
d=d,
device=kernel.lengthscale.device,
dtype=kernel.lengthscale.dtype,
)
return _gen_fourier_features(
kernel=kernel,
weight_generator=_weight_generator,
num_inputs=num_inputs,
num_outputs=num_outputs,
)
@GenKernelFeatures.register(kernels.MaternKernel)
def _gen_kernel_features_matern(
kernel: kernels.MaternKernel,
*,
num_inputs: int,
num_outputs: int,
) -> KernelFeatureMap:
def _weight_generator(shape: Size) -> Tensor:
try:
n, d = shape
except ValueError:
raise UnsupportedError(
f"Expected `shape` to be 2-dimensional, but {len(shape)=}."
)
dtype = kernel.lengthscale.dtype
device = kernel.lengthscale.device
nu = torch.tensor(kernel.nu, device=device, dtype=dtype)
normals = draw_sobol_normal_samples(n=n, d=d, device=device, dtype=dtype)
return Gamma(nu, nu).rsample((n, 1)).rsqrt() * normals
return _gen_fourier_features(
kernel=kernel,
weight_generator=_weight_generator,
num_inputs=num_inputs,
num_outputs=num_outputs,
)
@GenKernelFeatures.register(kernels.ScaleKernel)
def _gen_kernel_features_scale(
kernel: kernels.ScaleKernel,
*,
num_inputs: int,
num_outputs: int,
) -> KernelFeatureMap:
active_dims = kernel.active_dims
feature_map = gen_kernel_features(
kernel.base_kernel,
num_inputs=num_inputs if active_dims is None else len(active_dims),
num_outputs=num_outputs,
)
if active_dims is not None and active_dims is not kernel.base_kernel.active_dims:
feature_map.input_transform = ChainedTransform(
feature_map.input_transform, FeatureSelector(indices=active_dims)
)
feature_map.output_transform = ChainedTransform(
OutputscaleTransform(kernel), feature_map.output_transform
)
return feature_map
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Methods for optimizing acquisition functions.
"""
from __future__ import annotations
import dataclasses
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from botorch.acquisition.acquisition import (
AcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
from botorch.exceptions import InputDataError, UnsupportedError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.generation.gen import gen_candidates_scipy, TGenCandidates
from botorch.logging import logger
from botorch.optim.initializers import (
gen_batch_initial_conditions,
gen_one_shot_kg_initial_conditions,
TGenInitialConditions,
)
from botorch.optim.stopping import ExpMAStoppingCriterion
from botorch.optim.utils import _filter_kwargs
from torch import Tensor
INIT_OPTION_KEYS = {
# set of options for initialization that we should
# not pass to scipy.optimize.minimize to avoid
# warnings
"alpha",
"batch_limit",
"eta",
"init_batch_limit",
"nonnegative",
"n_burnin",
"sample_around_best",
"sample_around_best_sigma",
"sample_around_best_prob_perturb",
"seed",
"thinning",
}
@dataclasses.dataclass(frozen=True)
class OptimizeAcqfInputs:
"""
Container for inputs to `optimize_acqf`.
See docstring for `optimize_acqf` for explanation of parameters.
"""
acq_function: AcquisitionFunction
bounds: Tensor
q: int
num_restarts: int
raw_samples: Optional[int]
options: Optional[Dict[str, Union[bool, float, int, str]]]
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]]
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]]
nonlinear_inequality_constraints: Optional[List[Callable]]
fixed_features: Optional[Dict[int, float]]
post_processing_func: Optional[Callable[[Tensor], Tensor]]
batch_initial_conditions: Optional[Tensor]
return_best_only: bool
gen_candidates: TGenCandidates
sequential: bool
ic_generator: Optional[TGenInitialConditions] = None
timeout_sec: Optional[float] = None
return_full_tree: bool = False
retry_on_optimization_warning: bool = True
ic_gen_kwargs: Dict = dataclasses.field(default_factory=dict)
@property
def full_tree(self) -> bool:
return self.return_full_tree or (
not isinstance(self.acq_function, OneShotAcquisitionFunction)
)
def __post_init__(self) -> None:
if self.inequality_constraints is None and not (
self.bounds.ndim == 2 and self.bounds.shape[0] == 2
):
raise ValueError(
"bounds should be a `2 x d` tensor, current shape: "
f"{list(self.bounds.shape)}."
)
d = self.bounds.shape[1]
if self.batch_initial_conditions is not None:
batch_initial_conditions_shape = self.batch_initial_conditions.shape
if len(batch_initial_conditions_shape) not in (2, 3):
raise ValueError(
"batch_initial_conditions must be 2-dimensional or "
"3-dimensional. Its shape is "
f"{batch_initial_conditions_shape}."
)
if batch_initial_conditions_shape[-1] != d:
raise ValueError(
f"batch_initial_conditions.shape[-1] must be {d}. The "
f"shape is {batch_initial_conditions_shape}."
)
elif self.ic_generator is None:
if self.nonlinear_inequality_constraints is not None:
raise RuntimeError(
"`ic_generator` must be given if "
"there are non-linear inequality constraints."
)
if self.raw_samples is None:
raise ValueError(
"Must specify `raw_samples` when "
"`batch_initial_conditions` is None`."
)
def get_ic_generator(self) -> TGenInitialConditions:
if self.ic_generator is not None:
return self.ic_generator
elif isinstance(self.acq_function, qKnowledgeGradient):
return gen_one_shot_kg_initial_conditions
return gen_batch_initial_conditions
def _raise_deprecation_warning_if_kwargs(fn_name: str, kwargs: Dict[str, Any]) -> None:
"""
Raise a warning if kwargs are provided.
Some functions used to support **kwargs. The applicable parameters have now been
refactored to be named arguments, so no warning will be raised for users passing
the expected arguments. However, if a user had been passing an inapplicable
keyword argument, this will now raise a warning whereas in the past it did
nothing.
"""
if len(kwargs) > 0:
warnings.warn(
f"`{fn_name}` does not support arguments {list(kwargs.keys())}. In "
"the future, this will become an error.",
DeprecationWarning,
)
def _optimize_acqf_all_features_fixed(
*,
bounds: Tensor,
fixed_features: Dict[int, float],
q: int,
acq_function: AcquisitionFunction,
) -> Tuple[Tensor, Tensor]:
"""
Helper function for `optimize_acqf` for the trivial case where
all features are fixed.
"""
X = torch.tensor(
[fixed_features[i] for i in range(bounds.shape[-1])],
device=bounds.device,
dtype=bounds.dtype,
)
X = X.expand(q, *X.shape)
with torch.no_grad():
acq_value = acq_function(X)
return X, acq_value
def _validate_sequential_inputs(opt_inputs: OptimizeAcqfInputs) -> None:
# validate that linear constraints across the q-dim and
# self.sequential are not present together
if opt_inputs.inequality_constraints is not None:
for constraint in opt_inputs.inequality_constraints:
if len(constraint[0].shape) > 1:
raise UnsupportedError(
"Linear inequality constraints across the q-dimension are not "
"supported for sequential optimization."
)
if opt_inputs.equality_constraints is not None:
for constraint in opt_inputs.equality_constraints:
if len(constraint[0].shape) > 1:
raise UnsupportedError(
"Linear equality constraints across the q-dimension are not "
"supported for sequential optimization."
)
# TODO: Validate constraints if provided:
# https://github.com/pytorch/botorch/pull/1231
if opt_inputs.batch_initial_conditions is not None:
raise UnsupportedError(
"`batch_initial_conditions` is not supported for sequential "
"optimization. Either avoid specifying "
"`batch_initial_conditions` to use the custom initializer or "
"use the `ic_generator` kwarg to generate initial conditions "
"for the case of nonlinear inequality constraints."
)
if not opt_inputs.return_best_only:
raise NotImplementedError(
"`return_best_only=False` only supported for joint optimization."
)
if isinstance(opt_inputs.acq_function, OneShotAcquisitionFunction):
raise NotImplementedError(
"sequential optimization currently not supported for one-shot "
"acquisition functions. Must have `sequential=False`."
)
def _optimize_acqf_sequential_q(
opt_inputs: OptimizeAcqfInputs,
) -> Tuple[Tensor, Tensor]:
"""
Helper function for `optimize_acqf` when sequential=True and q > 1.
For each of `q` times, generate a single candidate greedily, then add it to
the list of pending points.
"""
_validate_sequential_inputs(opt_inputs)
# When using sequential optimization, we allocate the total timeout
# evenly across the individual acquisition optimizations.
timeout_sec = (
opt_inputs.timeout_sec / opt_inputs.q
if opt_inputs.timeout_sec is not None
else None
)
candidate_list, acq_value_list = [], []
base_X_pending = opt_inputs.acq_function.X_pending
new_inputs = dataclasses.replace(
opt_inputs,
q=1,
batch_initial_conditions=None,
return_best_only=True,
sequential=False,
timeout_sec=timeout_sec,
)
for i in range(opt_inputs.q):
candidate, acq_value = _optimize_acqf_batch(new_inputs)
candidate_list.append(candidate)
acq_value_list.append(acq_value)
candidates = torch.cat(candidate_list, dim=-2)
new_inputs.acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
logger.info(f"Generated sequential candidate {i+1} of {opt_inputs.q}")
opt_inputs.acq_function.set_X_pending(base_X_pending)
return candidates, torch.stack(acq_value_list)
def _optimize_acqf_batch(opt_inputs: OptimizeAcqfInputs) -> Tuple[Tensor, Tensor]:
options = opt_inputs.options or {}
initial_conditions_provided = opt_inputs.batch_initial_conditions is not None
if initial_conditions_provided:
batch_initial_conditions = opt_inputs.batch_initial_conditions
else:
# pyre-ignore[28]: Unexpected keyword argument `acq_function` to anonymous call.
batch_initial_conditions = opt_inputs.get_ic_generator()(
acq_function=opt_inputs.acq_function,
bounds=opt_inputs.bounds,
q=opt_inputs.q,
num_restarts=opt_inputs.num_restarts,
raw_samples=opt_inputs.raw_samples,
fixed_features=opt_inputs.fixed_features,
options=options,
inequality_constraints=opt_inputs.inequality_constraints,
equality_constraints=opt_inputs.equality_constraints,
**opt_inputs.ic_gen_kwargs,
)
batch_limit: int = options.get(
"batch_limit",
opt_inputs.num_restarts
if not opt_inputs.nonlinear_inequality_constraints
else 1,
)
def _optimize_batch_candidates() -> Tuple[Tensor, Tensor, List[Warning]]:
batch_candidates_list: List[Tensor] = []
batch_acq_values_list: List[Tensor] = []
batched_ics = batch_initial_conditions.split(batch_limit)
opt_warnings = []
timeout_sec = (
opt_inputs.timeout_sec / len(batched_ics)
if opt_inputs.timeout_sec is not None
else None
)
bounds = opt_inputs.bounds
gen_kwargs: Dict[str, Any] = {
"lower_bounds": None if bounds[0].isinf().all() else bounds[0],
"upper_bounds": None if bounds[1].isinf().all() else bounds[1],
"options": {k: v for k, v in options.items() if k not in INIT_OPTION_KEYS},
"fixed_features": opt_inputs.fixed_features,
"timeout_sec": timeout_sec,
}
# only add parameter constraints to gen_kwargs if they are specified
# to avoid unnecessary warnings in _filter_kwargs
for constraint_name in [
"inequality_constraints",
"equality_constraints",
"nonlinear_inequality_constraints",
]:
if (constraint := getattr(opt_inputs, constraint_name)) is not None:
gen_kwargs[constraint_name] = constraint
filtered_gen_kwargs = _filter_kwargs(opt_inputs.gen_candidates, **gen_kwargs)
for i, batched_ics_ in enumerate(batched_ics):
# optimize using random restart optimization
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always", category=OptimizationWarning)
(
batch_candidates_curr,
batch_acq_values_curr,
) = opt_inputs.gen_candidates(
batched_ics_, opt_inputs.acq_function, **filtered_gen_kwargs
)
opt_warnings += ws
batch_candidates_list.append(batch_candidates_curr)
batch_acq_values_list.append(batch_acq_values_curr)
logger.info(f"Generated candidate batch {i+1} of {len(batched_ics)}.")
batch_candidates = torch.cat(batch_candidates_list)
has_scalars = batch_acq_values_list[0].ndim == 0
if has_scalars:
batch_acq_values = torch.stack(batch_acq_values_list)
else:
batch_acq_values = torch.cat(batch_acq_values_list).flatten()
return batch_candidates, batch_acq_values, opt_warnings
batch_candidates, batch_acq_values, ws = _optimize_batch_candidates()
optimization_warning_raised = any(
(issubclass(w.category, OptimizationWarning) for w in ws)
)
if optimization_warning_raised and opt_inputs.retry_on_optimization_warning:
first_warn_msg = (
"Optimization failed in `gen_candidates_scipy` with the following "
f"warning(s):\n{[w.message for w in ws]}\nBecause you specified "
"`batch_initial_conditions`, optimization will not be retried with "
"new initial conditions and will proceed with the current solution."
" Suggested remediation: Try again with different "
"`batch_initial_conditions`, or don't provide `batch_initial_conditions.`"
if initial_conditions_provided
else "Optimization failed in `gen_candidates_scipy` with the following "
f"warning(s):\n{[w.message for w in ws]}\nTrying again with a new "
"set of initial conditions."
)
warnings.warn(first_warn_msg, RuntimeWarning)
if not initial_conditions_provided:
batch_initial_conditions = opt_inputs.get_ic_generator()(
acq_function=opt_inputs.acq_function,
bounds=opt_inputs.bounds,
q=opt_inputs.q,
num_restarts=opt_inputs.num_restarts,
raw_samples=opt_inputs.raw_samples,
fixed_features=opt_inputs.fixed_features,
options=options,
inequality_constraints=opt_inputs.inequality_constraints,
equality_constraints=opt_inputs.equality_constraints,
**opt_inputs.ic_gen_kwargs,
)
batch_candidates, batch_acq_values, ws = _optimize_batch_candidates()
optimization_warning_raised = any(
(issubclass(w.category, OptimizationWarning) for w in ws)
)
if optimization_warning_raised:
warnings.warn(
"Optimization failed on the second try, after generating a "
"new set of initial conditions.",
RuntimeWarning,
)
if opt_inputs.post_processing_func is not None:
batch_candidates = opt_inputs.post_processing_func(batch_candidates)
with torch.no_grad():
acq_values_list = [
opt_inputs.acq_function(cand)
for cand in batch_candidates.split(batch_limit, dim=0)
]
batch_acq_values = torch.cat(acq_values_list, dim=0)
if opt_inputs.return_best_only:
best = torch.argmax(batch_acq_values.view(-1), dim=0)
batch_candidates = batch_candidates[best]
batch_acq_values = batch_acq_values[best]
if not opt_inputs.full_tree:
batch_candidates = opt_inputs.acq_function.extract_candidates(
X_full=batch_candidates
)
return batch_candidates, batch_acq_values
def optimize_acqf(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
nonlinear_inequality_constraints: Optional[List[Callable]] = None,
fixed_features: Optional[Dict[int, float]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
return_best_only: bool = True,
gen_candidates: Optional[TGenCandidates] = None,
sequential: bool = False,
*,
ic_generator: Optional[TGenInitialConditions] = None,
timeout_sec: Optional[float] = None,
return_full_tree: bool = False,
retry_on_optimization_warning: bool = True,
**ic_gen_kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of candidates via multi-start optimization.
Args:
acq_function: An AcquisitionFunction.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`
(if inequality_constraints is provided, these bounds can be -inf and
+inf, respectively).
q: The number of candidates.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
options: Options for candidate generation.
inequality_constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. `indices` and
`coefficients` should be torch tensors. See the docstring of
`make_scipy_linear_constraints` for an example. When q=1, or when
applying the same constraint to each candidate in the batch,
`indices` should be a 1-d tensor. For inter-point constraints,
`indices` must be a 2-d Tensor, where in each row `indices[i] =
(k_i, l_i)` the first index `k_i` corresponds to the `k_i`-th
element of the `q`-batch and the second index `l_i` corresponds to
the `l_i`-th feature of that element.
equality_constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an equality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`. See the docstring of
`make_scipy_linear_constraints` for an example.
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as an
input and return a `(num_restarts) x q`-dim tensor with the constraint
values. The constraints will later be passed to SLSQP. You need to pass in
`batch_initial_conditions` in this case. Using non-linear inequality
constraints also requires that `batch_limit` is set to 1, which will be
done automatically if not specified in `options`.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
batch_initial_conditions: A tensor to specify the initial conditions. Set
this if you do not want to use default initialization strategy.
return_best_only: If False, outputs the solutions corresponding to all
random restart initializations of the optimization.
gen_candidates: A callable for generating candidates (and their associated
acquisition values) given a tensor of initial conditions and an
acquisition function. Other common inputs include lower and upper bounds
and a dictionary of options, but refer to the documentation of specific
generation functions (e.g gen_candidates_scipy and gen_candidates_torch)
for method-specific inputs. Default: `gen_candidates_scipy`
sequential: If False, uses joint optimization, otherwise uses sequential
optimization.
ic_generator: Function for generating initial conditions. Not needed when
`batch_initial_conditions` are provided. Defaults to
`gen_one_shot_kg_initial_conditions` for `qKnowledgeGradient` acquisition
functions and `gen_batch_initial_conditions` otherwise. Must be specified
for nonlinear inequality constraints.
timeout_sec: Max amount of time optimization can run for.
return_full_tree:
retry_on_optimization_warning: Whether to retry candidate generation with a new
set of initial conditions when it fails with an `OptimizationWarning`.
ic_gen_kwargs: Additional keyword arguments passed to function specified by
`ic_generator`
Returns:
A two-element tuple containing
- A tensor of generated candidates. The shape is
-- `q x d` if `return_best_only` is True (default)
-- `num_restarts x q x d` if `return_best_only` is False
- a tensor of associated acquisition values. If `sequential=False`,
this is a `(num_restarts)`-dim tensor of joint acquisition values
(with explicit restart dimension if `return_best_only=False`). If
`sequential=True`, this is a `q`-dim tensor of expected acquisition
values conditional on having observed candidates `0,1,...,i-1`.
Example:
>>> # generate `q=2` candidates jointly using 20 random restarts
>>> # and 512 raw samples
>>> candidates, acq_value = optimize_acqf(qEI, bounds, 2, 20, 512)
>>> generate `q=3` candidates sequentially using 15 random restarts
>>> # and 256 raw samples
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0.], [1.]])
>>> candidates, acq_value_list = optimize_acqf(
>>> qEI, bounds, 3, 15, 256, sequential=True
>>> )
"""
# using a default of None simplifies unit testing
if gen_candidates is None:
gen_candidates = gen_candidates_scipy
opt_acqf_inputs = OptimizeAcqfInputs(
acq_function=acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
return_best_only=return_best_only,
gen_candidates=gen_candidates,
sequential=sequential,
ic_generator=ic_generator,
timeout_sec=timeout_sec,
return_full_tree=return_full_tree,
retry_on_optimization_warning=retry_on_optimization_warning,
ic_gen_kwargs=ic_gen_kwargs,
)
return _optimize_acqf(opt_acqf_inputs)
def _optimize_acqf(opt_inputs: OptimizeAcqfInputs) -> Tuple[Tensor, Tensor]:
# Handle the trivial case when all features are fixed
if (
opt_inputs.fixed_features is not None
and len(opt_inputs.fixed_features) == opt_inputs.bounds.shape[-1]
):
return _optimize_acqf_all_features_fixed(
bounds=opt_inputs.bounds,
fixed_features=opt_inputs.fixed_features,
q=opt_inputs.q,
acq_function=opt_inputs.acq_function,
)
# Perform sequential optimization via successive conditioning on pending points
if opt_inputs.sequential and opt_inputs.q > 1:
return _optimize_acqf_sequential_q(opt_inputs=opt_inputs)
# Batch optimization (including the case q=1)
return _optimize_acqf_batch(opt_inputs=opt_inputs)
def optimize_acqf_cyclic(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
cyclic_options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
*,
ic_generator: Optional[TGenInitialConditions] = None,
timeout_sec: Optional[float] = None,
return_full_tree: bool = False,
retry_on_optimization_warning: bool = True,
**ic_gen_kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of `q` candidates via cyclic optimization.
Args:
acq_function: An AcquisitionFunction
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`
(if inequality_constraints is provided, these bounds can be -inf and
+inf, respectively).
q: The number of candidates.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
batch_initial_conditions: A tensor to specify the initial conditions.
If no initial conditions are provided, the default initialization will
be used.
cyclic_options: Options for stopping criterion for outer cyclic optimization.
ic_generator: Function for generating initial conditions. Not needed when
`batch_initial_conditions` are provided. Defaults to
`gen_one_shot_kg_initial_conditions` for `qKnowledgeGradient` acquisition
functions and `gen_batch_initial_conditions` otherwise. Must be specified
for nonlinear inequality constraints.
timeout_sec: Max amount of time optimization can run for.
return_full_tree:
retry_on_optimization_warning: Whether to retry candidate generation with a new
set of initial conditions when it fails with an `OptimizationWarning`.
ic_gen_kwargs: Additional keyword arguments passed to function specified by
`ic_generator`
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- a `q`-dim tensor of expected acquisition values, where the value at
index `i` is the acquisition value conditional on having observed
all candidates except candidate `i`.
Example:
>>> # generate `q=3` candidates cyclically using 15 random restarts
>>> # 256 raw samples, and 4 cycles
>>>
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0.], [1.]])
>>> candidates, acq_value_list = optimize_acqf_cyclic(
>>> qEI, bounds, 3, 15, 256, cyclic_options={"maxiter": 4}
>>> )
"""
opt_inputs = OptimizeAcqfInputs(
acq_function=acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=None,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
return_best_only=True,
gen_candidates=gen_candidates_scipy,
sequential=True,
ic_generator=ic_generator,
timeout_sec=timeout_sec,
return_full_tree=return_full_tree,
retry_on_optimization_warning=retry_on_optimization_warning,
ic_gen_kwargs=ic_gen_kwargs,
)
# for the first cycle, optimize the q candidates sequentially
candidates, acq_vals = _optimize_acqf(opt_inputs)
q = opt_inputs.q
opt_inputs = dataclasses.replace(opt_inputs, q=1)
acq_function = opt_inputs.acq_function
if q > 1:
cyclic_options = cyclic_options or {}
stopping_criterion = ExpMAStoppingCriterion(**cyclic_options)
stop = stopping_criterion.evaluate(fvals=acq_vals)
base_X_pending = acq_function.X_pending
idxr = torch.ones(q, dtype=torch.bool, device=opt_inputs.bounds.device)
while not stop:
for i in range(q):
# optimize only candidate i
idxr[i] = 0
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates[idxr]], dim=-2)
if base_X_pending is not None
else candidates[idxr]
)
opt_inputs = dataclasses.replace(
opt_inputs,
batch_initial_conditions=candidates[i].unsqueeze(0),
sequential=False,
)
candidate_i, acq_val_i = _optimize_acqf(opt_inputs)
candidates[i] = candidate_i
acq_vals[i] = acq_val_i
idxr[i] = 1
stop = stopping_criterion.evaluate(fvals=acq_vals)
acq_function.set_X_pending(base_X_pending)
return candidates, acq_vals
def optimize_acqf_list(
acq_function_list: List[AcquisitionFunction],
bounds: Tensor,
num_restarts: int,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
nonlinear_inequality_constraints: Optional[List[Callable]] = None,
fixed_features: Optional[Dict[int, float]] = None,
fixed_features_list: Optional[List[Dict[int, float]]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
ic_generator: Optional[TGenInitialConditions] = None,
ic_gen_kwargs: Optional[Dict] = None,
) -> Tuple[Tensor, Tensor]:
r"""Generate a list of candidates from a list of acquisition functions.
The acquisition functions are optimized in sequence, with previous candidates
set as `X_pending`. This is also known as sequential greedy optimization.
Args:
acq_function_list: A list of acquisition functions.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`
(if inequality_constraints is provided, these bounds can be -inf and
+inf, respectively).
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as an
input and return a `(num_restarts) x q`-dim tensor with the constraint
values. The constraints will later be passed to SLSQP. You need to pass in
`batch_initial_conditions` in this case. Using non-linear inequality
constraints also requires that `batch_limit` is set to 1, which will be
done automatically if not specified in `options`.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
fixed_features_list: A list of maps `{feature_index: value}`. The i-th
item represents the fixed_feature for the i-th optimization. If
`fixed_features_list` is provided, `optimize_acqf_mixed` is invoked.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
ic_generator: Function for generating initial conditions. Not needed when
`batch_initial_conditions` are provided. Defaults to
`gen_one_shot_kg_initial_conditions` for `qKnowledgeGradient` acquisition
functions and `gen_batch_initial_conditions` otherwise. Must be specified
for nonlinear inequality constraints.
ic_gen_kwargs: Additional keyword arguments passed to function specified by
`ic_generator`
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- a `q`-dim tensor of expected acquisition values, where the value at
index `i` is the acquisition value conditional on having observed
all candidates except candidate `i`.
"""
if fixed_features and fixed_features_list:
raise ValueError(
"Èither `fixed_feature` or `fixed_features_list` can be provided, not both."
)
if not acq_function_list:
raise ValueError("acq_function_list must be non-empty.")
candidate_list, acq_value_list = [], []
candidates = torch.tensor([], device=bounds.device, dtype=bounds.dtype)
base_X_pending = acq_function_list[0].X_pending
for acq_function in acq_function_list:
if candidate_list:
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
if fixed_features_list:
candidate, acq_value = optimize_acqf_mixed(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
fixed_features_list=fixed_features_list,
post_processing_func=post_processing_func,
ic_generator=ic_generator,
ic_gen_kwargs=ic_gen_kwargs,
)
else:
ic_gen_kwargs = ic_gen_kwargs or {}
candidate, acq_value = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
return_best_only=True,
sequential=False,
ic_generator=ic_generator,
**ic_gen_kwargs,
)
candidate_list.append(candidate)
acq_value_list.append(acq_value)
candidates = torch.cat(candidate_list, dim=-2)
return candidates, torch.stack(acq_value_list)
def optimize_acqf_mixed(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
fixed_features_list: List[Dict[int, float]],
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
nonlinear_inequality_constraints: Optional[List[Callable]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
ic_generator: Optional[TGenInitialConditions] = None,
ic_gen_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Optimize over a list of fixed_features and returns the best solution.
This is useful for optimizing over mixed continuous and discrete domains.
For q > 1 this function always performs sequential greedy optimization (with
proper conditioning on generated candidates).
Args:
acq_function: An AcquisitionFunction
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`
(if inequality_constraints is provided, these bounds can be -inf and
+inf, respectively).
q: The number of candidates.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
fixed_features_list: A list of maps `{feature_index: value}`. The i-th
item represents the fixed_feature for the i-th optimization.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as an
input and return a `(num_restarts) x q`-dim tensor with the constraint
values. The constraints will later be passed to SLSQP. You need to pass in
`batch_initial_conditions` in this case. Using non-linear inequality
constraints also requires that `batch_limit` is set to 1, which will be
done automatically if not specified in `options`.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
batch_initial_conditions: A tensor to specify the initial conditions. Set
this if you do not want to use default initialization strategy.
ic_generator: Function for generating initial conditions. Not needed when
`batch_initial_conditions` are provided. Defaults to
`gen_one_shot_kg_initial_conditions` for `qKnowledgeGradient` acquisition
functions and `gen_batch_initial_conditions` otherwise. Must be specified
for nonlinear inequality constraints.
ic_gen_kwargs: Additional keyword arguments passed to function specified by
`ic_generator`
kwargs: kwargs do nothing. This is provided so that the same arguments can
be passed to different acquisition functions without raising an error.
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- an associated acquisition value.
"""
if not fixed_features_list:
raise ValueError("fixed_features_list must be non-empty.")
if isinstance(acq_function, OneShotAcquisitionFunction):
if not hasattr(acq_function, "evaluate") and q > 1:
raise ValueError(
"`OneShotAcquisitionFunction`s that do not implement `evaluate` "
"are currently not supported when `q > 1`. This is needed to "
"compute the joint acquisition value."
)
_raise_deprecation_warning_if_kwargs("optimize_acqf_mixed", kwargs)
ic_gen_kwargs = ic_gen_kwargs or {}
if q == 1:
ff_candidate_list, ff_acq_value_list = [], []
for fixed_features in fixed_features_list:
candidate, acq_value = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
ic_generator=ic_generator,
return_best_only=True,
**ic_gen_kwargs,
)
ff_candidate_list.append(candidate)
ff_acq_value_list.append(acq_value)
ff_acq_values = torch.stack(ff_acq_value_list)
best = torch.argmax(ff_acq_values)
return ff_candidate_list[best], ff_acq_values[best]
# For batch optimization with q > 1 we do not want to enumerate all n_combos^n
# possible combinations of discrete choices. Instead, we use sequential greedy
# optimization.
base_X_pending = acq_function.X_pending
candidates = torch.tensor([], device=bounds.device, dtype=bounds.dtype)
for _ in range(q):
candidate, acq_value = optimize_acqf_mixed(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
fixed_features_list=fixed_features_list,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
ic_generator=ic_generator,
ic_gen_kwargs=ic_gen_kwargs,
)
candidates = torch.cat([candidates, candidate], dim=-2)
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
acq_function.set_X_pending(base_X_pending)
# compute joint acquisition value
if isinstance(acq_function, OneShotAcquisitionFunction):
acq_value = acq_function.evaluate(X=candidates, bounds=bounds)
else:
acq_value = acq_function(candidates)
return candidates, acq_value
def optimize_acqf_discrete(
acq_function: AcquisitionFunction,
q: int,
choices: Tensor,
max_batch_size: int = 2048,
unique: bool = True,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Optimize over a discrete set of points using batch evaluation.
For `q > 1` this function generates candidates by means of sequential
conditioning (rather than joint optimization), since for all but the
smalles number of choices the set `choices^q` of discrete points to
evaluate quickly explodes.
Args:
acq_function: An AcquisitionFunction.
q: The number of candidates.
choices: A `num_choices x d` tensor of possible choices.
max_batch_size: The maximum number of choices to evaluate in batch.
A large limit can cause excessive memory usage if the model has
a large training set.
unique: If True return unique choices, o/w choices may be repeated
(only relevant if `q > 1`).
kwargs: kwargs do nothing. This is provided so that the same arguments can
be passed to different acquisition functions without raising an error.
Returns:
A three-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- an associated acquisition value.
"""
if isinstance(acq_function, OneShotAcquisitionFunction):
raise UnsupportedError(
"Discrete optimization is not supported for"
"one-shot acquisition functions."
)
if choices.numel() == 0:
raise InputDataError("`choices` must be non-emtpy.")
_raise_deprecation_warning_if_kwargs("optimize_acqf_discrete", kwargs)
choices_batched = choices.unsqueeze(-2)
if q > 1:
candidate_list, acq_value_list = [], []
base_X_pending = acq_function.X_pending
for _ in range(q):
with torch.no_grad():
acq_values = _split_batch_eval_acqf(
acq_function=acq_function,
X=choices_batched,
max_batch_size=max_batch_size,
)
best_idx = torch.argmax(acq_values)
candidate_list.append(choices_batched[best_idx])
acq_value_list.append(acq_values[best_idx])
# set pending points
candidates = torch.cat(candidate_list, dim=-2)
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
# need to remove choice from choice set if enforcing uniqueness
if unique:
choices_batched = torch.cat(
[choices_batched[:best_idx], choices_batched[best_idx + 1 :]]
)
# Reset acq_func to previous X_pending state
acq_function.set_X_pending(base_X_pending)
return candidates, torch.stack(acq_value_list)
with torch.no_grad():
acq_values = _split_batch_eval_acqf(
acq_function=acq_function, X=choices_batched, max_batch_size=max_batch_size
)
best_idx = torch.argmax(acq_values)
return choices_batched[best_idx], acq_values[best_idx]
def _split_batch_eval_acqf(
acq_function: AcquisitionFunction, X: Tensor, max_batch_size: int
) -> Tensor:
return torch.cat([acq_function(X_) for X_ in X.split(max_batch_size)])
def _generate_neighbors(
x: Tensor,
discrete_choices: List[Tensor],
X_avoid: Tensor,
inequality_constraints: List[Tuple[Tensor, Tensor, float]],
):
# generate all 1D perturbations
npts = sum([len(c) for c in discrete_choices])
X_loc = x.repeat(npts, 1)
j = 0
for i, c in enumerate(discrete_choices):
X_loc[j : j + len(c), i] = c
j += len(c)
# remove invalid and infeasible points (also remove x)
X_loc = _filter_invalid(X=X_loc, X_avoid=torch.cat((X_avoid, x)))
X_loc = _filter_infeasible(X=X_loc, inequality_constraints=inequality_constraints)
return X_loc
def _filter_infeasible(
X: Tensor, inequality_constraints: List[Tuple[Tensor, Tensor, float]]
):
"""Remove all points from `X` that don't satisfy the constraints."""
is_feasible = torch.ones(X.shape[0], dtype=torch.bool, device=X.device)
for (inds, weights, bound) in inequality_constraints:
is_feasible &= (X[..., inds] * weights).sum(dim=-1) >= bound
return X[is_feasible]
def _filter_invalid(X: Tensor, X_avoid: Tensor):
"""Remove all occurences of `X_avoid` from `X`."""
return X[~(X == X_avoid.unsqueeze(-2)).all(dim=-1).any(dim=-2)]
def _gen_batch_initial_conditions_local_search(
discrete_choices: List[Tensor],
raw_samples: int,
X_avoid: Tensor,
inequality_constraints: List[Tuple[Tensor, Tensor, float]],
min_points: int,
max_tries: int = 100,
):
"""Generate initial conditions for local search."""
device = discrete_choices[0].device
dtype = discrete_choices[0].dtype
dim = len(discrete_choices)
X = torch.zeros(0, dim, device=device, dtype=dtype)
for _ in range(max_tries):
X_new = torch.zeros(raw_samples, dim, device=device, dtype=dtype)
for i, c in enumerate(discrete_choices):
X_new[:, i] = c[
torch.randint(low=0, high=len(c), size=(raw_samples,), device=c.device)
]
X = torch.unique(torch.cat((X, X_new)), dim=0)
X = _filter_invalid(X=X, X_avoid=X_avoid)
X = _filter_infeasible(X=X, inequality_constraints=inequality_constraints)
if len(X) >= min_points:
return X
raise RuntimeError(f"Failed to generate at least {min_points} initial conditions")
def optimize_acqf_discrete_local_search(
acq_function: AcquisitionFunction,
discrete_choices: List[Tensor],
q: int,
num_restarts: int = 20,
raw_samples: int = 4096,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
X_avoid: Optional[Tensor] = None,
batch_initial_conditions: Optional[Tensor] = None,
max_batch_size: int = 2048,
unique: bool = True,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Optimize acquisition function over a lattice.
This is useful when d is large and enumeration of the search space
isn't possible. For q > 1 this function always performs sequential
greedy optimization (with proper conditioning on generated candidates).
NOTE: While this method supports arbitrary lattices, it has only been
thoroughly tested for {0, 1}^d. Consider it to be in alpha stage for
the more general case.
Args:
acq_function: An AcquisitionFunction
discrete_choices: A list of possible discrete choices for each dimension.
Each element in the list is expected to be a torch tensor.
q: The number of candidates.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
inequality_constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
X_avoid: An `n x d` tensor of candidates that we aren't allowed to pick.
batch_initial_conditions: A tensor of size `n x 1 x d` to specify the
initial conditions. Set this if you do not want to use default
initialization strategy.
max_batch_size: The maximum number of choices to evaluate in batch.
A large limit can cause excessive memory usage if the model has
a large training set.
unique: If True return unique choices, o/w choices may be repeated
(only relevant if `q > 1`).
kwargs: kwargs do nothing. This is provided so that the same arguments can
be passed to different acquisition functions without raising an error.
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- an associated acquisition value.
"""
_raise_deprecation_warning_if_kwargs("optimize_acqf_discrete_local_search", kwargs)
candidate_list = []
base_X_pending = acq_function.X_pending if q > 1 else None
base_X_avoid = X_avoid
device = discrete_choices[0].device
dtype = discrete_choices[0].dtype
dim = len(discrete_choices)
if X_avoid is None:
X_avoid = torch.zeros(0, dim, device=device, dtype=dtype)
inequality_constraints = inequality_constraints or []
for i in range(q):
# generate some starting points
if i == 0 and batch_initial_conditions is not None:
X0 = _filter_invalid(X=batch_initial_conditions.squeeze(1), X_avoid=X_avoid)
X0 = _filter_infeasible(
X=X0, inequality_constraints=inequality_constraints
).unsqueeze(1)
else:
X_init = _gen_batch_initial_conditions_local_search(
discrete_choices=discrete_choices,
raw_samples=raw_samples,
X_avoid=X_avoid,
inequality_constraints=inequality_constraints,
min_points=num_restarts,
)
# pick the best starting points
with torch.no_grad():
acqvals_init = _split_batch_eval_acqf(
acq_function=acq_function,
X=X_init.unsqueeze(1),
max_batch_size=max_batch_size,
).unsqueeze(-1)
X0 = X_init[acqvals_init.topk(k=num_restarts, largest=True, dim=0).indices]
# optimize from the best starting points
best_xs = torch.zeros(len(X0), dim, device=device, dtype=dtype)
best_acqvals = torch.zeros(len(X0), 1, device=device, dtype=dtype)
for j, x in enumerate(X0):
curr_x, curr_acqval = x.clone(), acq_function(x.unsqueeze(1))
while True:
# this generates all feasible neighbors that are one bit away
X_loc = _generate_neighbors(
x=curr_x,
discrete_choices=discrete_choices,
X_avoid=X_avoid,
inequality_constraints=inequality_constraints,
)
# there may not be any neighbors
if len(X_loc) == 0:
break
with torch.no_grad():
acqval_loc = acq_function(X_loc.unsqueeze(1))
# break if no neighbor is better than the current point (local optimum)
if acqval_loc.max() <= curr_acqval:
break
best_ind = acqval_loc.argmax().item()
curr_x, curr_acqval = X_loc[best_ind].unsqueeze(0), acqval_loc[best_ind]
best_xs[j, :], best_acqvals[j] = curr_x, curr_acqval
# pick the best
best_idx = best_acqvals.argmax()
candidate_list.append(best_xs[best_idx].unsqueeze(0))
# set pending points
candidates = torch.cat(candidate_list, dim=-2)
if q > 1:
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
# Update points to avoid if unique is True
if unique:
X_avoid = (
torch.cat([base_X_avoid, candidates], dim=-2)
if base_X_avoid is not None
else candidates
)
# Reset acq_func to original X_pending state
if q > 1:
acq_function.set_X_pending(base_X_pending)
with torch.no_grad():
acq_value = acq_function(candidates) # compute joint acquisition value
return candidates, acq_value
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Tools for model fitting."""
from __future__ import annotations
from functools import partial
from itertools import filterfalse
from time import monotonic
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Union,
)
from warnings import warn
from botorch.exceptions.warnings import OptimizationWarning
from botorch.optim.closures import get_loss_closure_with_grads
from botorch.optim.core import (
OptimizationResult,
OptimizationStatus,
scipy_minimize,
torch_minimize,
)
from botorch.optim.numpy_converter import (
_scipy_objective_and_grad,
module_to_array,
set_params_with_array,
)
from botorch.optim.stopping import ExpMAStoppingCriterion
from botorch.optim.utils import (
_filter_kwargs,
_get_extra_mll_args,
get_name_filter,
get_parameters_and_bounds,
TorchAttr,
)
from botorch.optim.utils.model_utils import get_parameters
from botorch.utils.types import DEFAULT
from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood
from gpytorch.settings import fast_computations
from numpy import ndarray
from scipy.optimize import Bounds, minimize
from torch import Tensor
from torch.nn import Module
from torch.optim.adam import Adam
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
TBoundsDict = Dict[str, Tuple[Optional[float], Optional[float]]]
TScipyObjective = Callable[
[ndarray, MarginalLogLikelihood, Dict[str, TorchAttr]], Tuple[float, ndarray]
]
TModToArray = Callable[
[Module, Optional[TBoundsDict], Optional[Set[str]]],
Tuple[ndarray, Dict[str, TorchAttr], Optional[ndarray]],
]
TArrayToMod = Callable[[Module, ndarray, Dict[str, TorchAttr]], Module]
def fit_gpytorch_mll_scipy(
mll: MarginalLogLikelihood,
parameters: Optional[Dict[str, Tensor]] = None,
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None,
closure_kwargs: Optional[Dict[str, Any]] = None,
method: str = "L-BFGS-B",
options: Optional[Dict[str, Any]] = None,
callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None,
timeout_sec: Optional[float] = None,
) -> OptimizationResult:
r"""Generic scipy.optimized-based fitting routine for GPyTorch MLLs.
The model and likelihood in mll must already be in train mode.
Args:
mll: MarginalLogLikelihood to be maximized.
parameters: Optional dictionary of parameters to be optimized. Defaults
to all parameters of `mll` that require gradients.
bounds: A dictionary of user-specified bounds for `parameters`. Used to update
default parameter bounds obtained from `mll`.
closure: Callable that returns a tensor and an iterable of gradient tensors.
Responsible for setting the `grad` attributes of `parameters`. If no closure
is provided, one will be obtained by calling `get_loss_closure_with_grads`.
closure_kwargs: Keyword arguments passed to `closure`.
method: Solver type, passed along to scipy.minimize.
options: Dictionary of solver options, passed along to scipy.minimize.
callback: Optional callback taking `parameters` and an OptimizationResult as its
sole arguments.
timeout_sec: Timeout in seconds after which to terminate the fitting loop
(note that timing out can result in bad fits!).
Returns:
The final OptimizationResult.
"""
# Resolve `parameters` and update default bounds
_parameters, _bounds = get_parameters_and_bounds(mll)
bounds = _bounds if bounds is None else {**_bounds, **bounds}
if parameters is None:
parameters = {n: p for n, p in _parameters.items() if p.requires_grad}
if closure is None:
closure = get_loss_closure_with_grads(mll, parameters=parameters)
if closure_kwargs is not None:
closure = partial(closure, **closure_kwargs)
result = scipy_minimize(
closure=closure,
parameters=parameters,
bounds=bounds,
method=method,
options=options,
callback=callback,
timeout_sec=timeout_sec,
)
if result.status != OptimizationStatus.SUCCESS:
warn(
f"`scipy_minimize` terminated with status {result.status}, displaying"
f" original message from `scipy.optimize.minimize`: {result.message}",
OptimizationWarning,
)
return result
def fit_gpytorch_mll_torch(
mll: MarginalLogLikelihood,
parameters: Optional[Dict[str, Tensor]] = None,
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None,
closure_kwargs: Optional[Dict[str, Any]] = None,
step_limit: Optional[int] = None,
stopping_criterion: Optional[Callable[[Tensor], bool]] = DEFAULT, # pyre-ignore [9]
optimizer: Union[Optimizer, Callable[..., Optimizer]] = Adam,
scheduler: Optional[Union[_LRScheduler, Callable[..., _LRScheduler]]] = None,
callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None,
timeout_sec: Optional[float] = None,
) -> OptimizationResult:
r"""Generic torch.optim-based fitting routine for GPyTorch MLLs.
Args:
mll: MarginalLogLikelihood to be maximized.
parameters: Optional dictionary of parameters to be optimized. Defaults
to all parameters of `mll` that require gradients.
bounds: A dictionary of user-specified bounds for `parameters`. Used to update
default parameter bounds obtained from `mll`.
closure: Callable that returns a tensor and an iterable of gradient tensors.
Responsible for setting the `grad` attributes of `parameters`. If no closure
is provided, one will be obtained by calling `get_loss_closure_with_grads`.
closure_kwargs: Keyword arguments passed to `closure`.
step_limit: Optional upper bound on the number of optimization steps.
stopping_criterion: A StoppingCriterion for the optimization loop.
optimizer: A `torch.optim.Optimizer` instance or a factory that takes
a list of parameters and returns an `Optimizer` instance.
scheduler: A `torch.optim.lr_scheduler._LRScheduler` instance or a factory
that takes an `Optimizer` instance and returns an `_LRSchedule`.
callback: Optional callback taking `parameters` and an OptimizationResult as its
sole arguments.
timeout_sec: Timeout in seconds after which to terminate the fitting loop
(note that timing out can result in bad fits!).
Returns:
The final OptimizationResult.
"""
if stopping_criterion == DEFAULT:
stopping_criterion = ExpMAStoppingCriterion()
# Resolve `parameters` and update default bounds
param_dict, bounds_dict = get_parameters_and_bounds(mll)
if parameters is None:
parameters = {n: p for n, p in param_dict.items() if p.requires_grad}
if closure is None:
closure = get_loss_closure_with_grads(mll, parameters)
if closure_kwargs is not None:
closure = partial(closure, **closure_kwargs)
return torch_minimize(
closure=closure,
parameters=parameters,
bounds=bounds_dict if bounds is None else {**bounds_dict, **bounds},
optimizer=optimizer,
scheduler=scheduler,
step_limit=step_limit,
stopping_criterion=stopping_criterion,
callback=callback,
timeout_sec=timeout_sec,
)
def fit_gpytorch_scipy(
mll: MarginalLogLikelihood,
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
method: str = "L-BFGS-B",
options: Optional[Dict[str, Any]] = None,
track_iterations: bool = False,
approx_mll: bool = False,
scipy_objective: TScipyObjective = _scipy_objective_and_grad,
module_to_array_func: TModToArray = module_to_array,
module_from_array_func: TArrayToMod = set_params_with_array,
**kwargs: Any,
) -> Tuple[MarginalLogLikelihood, Dict[str, Union[float, List[OptimizationResult]]]]:
r"""Legacy method for scipy-based fitting of gpytorch models.
The model and likelihood in mll must already be in train mode. This method requires
that the model has `train_inputs` and `train_targets`.
Args:
mll: MarginalLogLikelihood to be maximized.
bounds: A dictionary mapping parameter names to tuples of lower and upper
bounds.
method: Solver type, passed along to scipy.optimize.minimize.
options: Dictionary of solver options, passed along to scipy.optimize.minimize.
approx_mll: If True, use gpytorch's approximate MLL computation. This is
disabled by default since the stochasticity is an issue for
determistic optimizers). Enabling this is only recommended when
working with large training data sets (n>2000).
Returns:
2-element tuple containing
- MarginalLogLikelihood with parameters optimized in-place.
- Dictionary with the following key/values:
"fopt": Best mll value.
"wall_time": Wall time of fitting.
"iterations": List of OptimizationResult objects with information on each
iteration. If track_iterations is False, will be empty.
"OptimizeResult": The result returned by `scipy.optim.minimize`.
"""
warn(
"`fit_gpytorch_scipy` is marked for deprecation, consider using "
"`scipy_minimize` or its model fitting helper `fit_gpytorch_mll_scipy`.",
DeprecationWarning,
)
start_time = monotonic()
iterations: List[OptimizationResult] = []
options = {} if options is None else options.copy()
exclude: Iterator[Union[Pattern, str]] = options.pop("exclude", None)
if exclude:
exclude, _ = zip( # get the qualified names of excluded parameters
*filterfalse(get_name_filter(exclude), mll.named_parameters())
)
x0, property_dict, bounds = module_to_array_func(
module=mll, exclude=exclude, bounds=bounds
)
if bounds is not None:
bounds = Bounds(lb=bounds[0], ub=bounds[1], keep_feasible=True)
def wrapper(x: ndarray) -> Tuple[float, ndarray]:
with fast_computations(log_prob=approx_mll):
return scipy_objective(x=x, mll=mll, property_dict=property_dict)
def store_iteration(xk):
iterations.append(
OptimizationResult(
step=len(iterations),
fval=float(wrapper(xk)[0]),
status=OptimizationStatus.RUNNING,
runtime=monotonic() - start_time,
)
)
result = minimize(
wrapper,
x0,
bounds=bounds,
method=method,
jac=True,
options=options,
callback=store_iteration if track_iterations else None,
)
info_dict = {
"fopt": float(result.fun),
"wall_time": monotonic() - start_time,
"iterations": iterations,
"OptimizeResult": result,
}
if not result.success:
try:
# Some result.message are bytes
msg = result.message.decode("ascii")
except AttributeError:
# Others are str
msg = result.message
warn(
f"Fitting failed with the optimizer reporting '{msg}'", OptimizationWarning
)
# Set to optimum
mll = module_from_array_func(mll, result.x, property_dict)
return mll, info_dict
def fit_gpytorch_torch(
mll: MarginalLogLikelihood,
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
optimizer_cls: Optimizer = Adam,
options: Optional[Dict[str, Any]] = None,
track_iterations: bool = False,
approx_mll: bool = False,
) -> Tuple[MarginalLogLikelihood, Dict[str, Union[float, List[OptimizationResult]]]]:
r"""Legacy method for torch-based fitting of gpytorch models.
The model and likelihood in mll must already be in train mode.
Note: this method requires that the model has `train_inputs` and `train_targets`.
Args:
mll: MarginalLogLikelihood to be maximized.
bounds: An optional dictionary mapping parameter names to tuples
of lower and upper bounds. Bounds specified here take precedence
over bounds on the same parameters specified in the constraints
registered with the module.
optimizer_cls: Torch optimizer to use. Must not require a closure.
options: options for model fitting. Relevant options will be passed to
the `optimizer_cls`. Additionally, options can include: "disp"
to specify whether to display model fitting diagnostics and "maxiter"
to specify the maximum number of iterations.
Returns:
2-element tuple containing
- mll with parameters optimized in-place.
- Dictionary with the following key/values:
"fopt": Best mll value.
"wall_time": Wall time of fitting.
"iterations": List of OptimizationResult objects with information on each
iteration. If track_iterations is False, will be empty.
Example:
>>> gp = SingleTaskGP(train_X, train_Y)
>>> mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
>>> mll.train()
>>> fit_gpytorch_torch(mll)
>>> mll.eval()
"""
warn(
"`fit_gpytorch_torch` is marked for deprecation, consider using "
"`torch_minimize` or its model fitting helper `fit_gpytorch_mll_torch`.",
DeprecationWarning,
)
_options = {"maxiter": 100, "disp": True, "lr": 0.05}
_options.update(options or {})
exclude = _options.pop("exclude", None)
parameters = get_parameters(
mll,
requires_grad=True,
name_filter=None if exclude is None else get_name_filter(exclude),
)
optimizer = optimizer_cls(
params=list(parameters.values()), **_filter_kwargs(optimizer_cls, **_options)
)
iterations: List[OptimizationResult] = []
stopping_criterion = ExpMAStoppingCriterion(
**_filter_kwargs(ExpMAStoppingCriterion, **_options)
)
def closure() -> Tuple[Tensor, Tuple[Tensor, ...]]:
optimizer.zero_grad()
with fast_computations(log_prob=approx_mll):
out = mll.model(*mll.model.train_inputs)
loss = -mll(out, mll.model.train_targets, *_get_extra_mll_args(mll)).sum()
loss.backward()
return loss, tuple(param.grad for param in parameters.values())
def store_iteration(parameters: Dict[str, Tensor], result: OptimizationResult):
iterations.append(result)
result = fit_gpytorch_mll_torch(
mll=mll,
closure=closure,
bounds=bounds,
parameters=parameters,
optimizer=optimizer,
stopping_criterion=stopping_criterion,
callback=store_iteration if track_iterations else None,
)
return mll, {
"fopt": result.fval,
"wall_time": result.runtime,
"iterations": iterations,
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Callable, List, Optional, Union
import torch
from torch import Tensor
from torch.nn import Parameter
class HomotopySchedule(ABC):
@property
@abstractmethod
def num_steps(self) -> int:
"""Number of steps in the schedule."""
@property
@abstractmethod
def value(self) -> Any:
"""Current value in the schedule."""
@property
@abstractmethod
def should_stop(self) -> bool:
"""Return true if we have incremented past the end of the schedule."""
@abstractmethod
def restart(self) -> None:
"""Restart the schedule to start from the beginning."""
@abstractmethod
def step(self) -> None:
"""Move to solving the next problem."""
class FixedHomotopySchedule(HomotopySchedule):
"""Homotopy schedule with a fixed list of values."""
def __init__(self, values: List[Any]) -> None:
r"""Initialize FixedHomotopySchedule.
Args:
values: A list of values used in homotopy
"""
self._values = values
self.idx = 0
@property
def num_steps(self) -> int:
return len(self._values)
@property
def value(self) -> Any:
return self._values[self.idx]
@property
def should_stop(self) -> bool:
return self.idx == len(self._values)
def restart(self) -> None:
self.idx = 0
def step(self) -> None:
self.idx += 1
class LinearHomotopySchedule(FixedHomotopySchedule):
"""Linear homotopy schedule."""
def __init__(self, start: float, end: float, num_steps: int) -> None:
r"""Initialize LinearHomotopySchedule.
Args:
start: start value of homotopy
end: end value of homotopy
num_steps: number of steps in the homotopy schedule.
"""
super().__init__(
values=torch.linspace(start, end, num_steps, dtype=torch.double).tolist()
)
class LogLinearHomotopySchedule(FixedHomotopySchedule):
"""Log-linear homotopy schedule."""
def __init__(self, start: float, end: float, num_steps: int):
r"""Initialize LogLinearHomotopySchedule.
Args:
start: start value of homotopy
end: end value of homotopy
num_steps: number of steps in the homotopy schedule.
"""
super().__init__(
values=torch.logspace(
math.log10(start), math.log10(end), num_steps, dtype=torch.double
).tolist()
)
@dataclass
class HomotopyParameter:
r"""Homotopy parameter.
The parameter is expected to either be a torch parameter or a torch tensor which may
correspond to a buffer of a module. The parameter has a corresponding schedule.
"""
parameter: Union[Parameter, Tensor]
schedule: HomotopySchedule
class Homotopy:
"""Generic homotopy class.
This class is designed to be used in `optimize_acqf_homotopy`. Given a set of
homotopy parameters and corresponding schedules we step through the homotopies
until we have solved the final problem. We additionally support passing in a list
of callbacks that will be executed each time `step`, `reset`, and `restart` are
called.
"""
def __init__(
self,
homotopy_parameters: List[HomotopyParameter],
callbacks: Optional[List[Callable]] = None,
) -> None:
r"""Initialize the homotopy.
Args:
homotopy_parameters: List of homotopy parameters
callbacks: Optional list of callbacks that are executed each time
`restart`, `reset`, or `step` are called. These may be used to, e.g.,
reinitialize the acquisition function which is needed when using qNEHVI.
"""
self._homotopy_parameters = homotopy_parameters
self._callbacks = callbacks or []
self._original_values = [
hp.parameter.item() for hp in self._homotopy_parameters
]
assert all(
isinstance(hp.parameter, Parameter) or isinstance(hp.parameter, Tensor)
for hp in self._homotopy_parameters
)
# Assume the same number of steps for now
assert len({h.schedule.num_steps for h in self._homotopy_parameters}) == 1
# Initialize the homotopy parameters
self.restart()
def _execute_callbacks(self) -> None:
"""Execute the callbacks."""
for callback in self._callbacks:
callback()
@property
def should_stop(self) -> bool:
"""Returns true if all schedules have reached the end."""
return all(h.schedule.should_stop for h in self._homotopy_parameters)
def restart(self) -> None:
"""Restart the homotopy to use the initial value in the schedule."""
for hp in self._homotopy_parameters:
hp.schedule.restart()
hp.parameter.data.fill_(hp.schedule.value)
self._execute_callbacks()
def reset(self) -> None:
"""Reset the homotopy parameter to their original values."""
for hp, val in zip(self._homotopy_parameters, self._original_values):
hp.parameter.data.fill_(val)
self._execute_callbacks()
def step(self) -> None:
"""Take a step according to the schedules."""
for hp in self._homotopy_parameters:
hp.schedule.step()
if not hp.schedule.should_stop:
hp.parameter.data.fill_(hp.schedule.value)
self._execute_callbacks()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.optim.closures import (
ForwardBackwardClosure,
get_loss_closure,
get_loss_closure_with_grads,
)
from botorch.optim.core import (
OptimizationResult,
OptimizationStatus,
scipy_minimize,
torch_minimize,
)
from botorch.optim.homotopy import (
FixedHomotopySchedule,
Homotopy,
HomotopyParameter,
LinearHomotopySchedule,
LogLinearHomotopySchedule,
)
from botorch.optim.initializers import initialize_q_batch, initialize_q_batch_nonneg
from botorch.optim.numpy_converter import module_to_array, set_params_with_array
from botorch.optim.optimize import (
gen_batch_initial_conditions,
optimize_acqf,
optimize_acqf_cyclic,
optimize_acqf_discrete,
optimize_acqf_discrete_local_search,
optimize_acqf_mixed,
)
from botorch.optim.optimize_homotopy import optimize_acqf_homotopy
from botorch.optim.stopping import ExpMAStoppingCriterion
__all__ = [
"ForwardBackwardClosure",
"get_loss_closure",
"get_loss_closure_with_grads",
"gen_batch_initial_conditions",
"initialize_q_batch",
"initialize_q_batch_nonneg",
"OptimizationResult",
"OptimizationStatus",
"optimize_acqf",
"optimize_acqf_cyclic",
"optimize_acqf_discrete",
"optimize_acqf_discrete_local_search",
"optimize_acqf_mixed",
"optimize_acqf_homotopy",
"module_to_array",
"scipy_minimize",
"set_params_with_array",
"torch_minimize",
"ExpMAStoppingCriterion",
"FixedHomotopySchedule",
"Homotopy",
"HomotopyParameter",
"LinearHomotopySchedule",
"LogLinearHomotopySchedule",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Core abstractions and generic optimizers."""
from __future__ import annotations
import re
from dataclasses import dataclass, replace
from enum import auto, Enum
from itertools import count
from sys import maxsize
from time import monotonic
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from botorch.optim.closures import NdarrayOptimizationClosure
from botorch.optim.utils.numpy_utils import get_bounds_as_ndarray
from botorch.optim.utils.timeout import minimize_with_timeout
from numpy import asarray, float64 as np_float64, ndarray
from torch import Tensor
from torch.optim.adam import Adam
from torch.optim.optimizer import Optimizer
try:
from torch.optim.lr_scheduler import LRScheduler
except ImportError: # pragma: no cover
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # pragma: no cover
_LBFGSB_MAXITER_MAXFUN_REGEX = re.compile( # regex for maxiter and maxfun messages
"TOTAL NO. of (ITERATIONS REACHED LIMIT|f AND g EVALUATIONS EXCEEDS LIMIT)"
)
class OptimizationStatus(int, Enum):
RUNNING = auto() # incomplete
SUCCESS = auto() # optimizer converged
FAILURE = auto() # terminated abnormally
STOPPED = auto() # stopped due to user provided criterion
@dataclass
class OptimizationResult:
step: int
fval: Union[float, int]
status: OptimizationStatus
runtime: Optional[float] = None
message: Optional[str] = None
def scipy_minimize(
closure: Union[
Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]],
NdarrayOptimizationClosure,
],
parameters: Dict[str, Tensor],
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None,
x0: Optional[ndarray] = None,
method: str = "L-BFGS-B",
options: Optional[Dict[str, Any]] = None,
timeout_sec: Optional[float] = None,
) -> OptimizationResult:
r"""Generic scipy.optimize.minimize-based optimization routine.
Args:
closure: Callable that returns a tensor and an iterable of gradient tensors or
NdarrayOptimizationClosure instance.
parameters: A dictionary of tensors to be optimized.
bounds: A dictionary mapping parameter names to lower and upper bounds.
callback: A callable taking `parameters` and an OptimizationResult as arguments.
x0: An optional initialization vector passed to scipy.optimize.minimize.
method: Solver type, passed along to scipy.minimize.
options: Dictionary of solver options, passed along to scipy.minimize.
timeout_sec: Timeout in seconds to wait before aborting the optimization loop
if not converged (will return the best found solution thus far).
Returns:
An OptimizationResult summarizing the final state of the run.
"""
start_time = monotonic()
wrapped_closure = (
closure
if isinstance(closure, NdarrayOptimizationClosure)
else NdarrayOptimizationClosure(closure, parameters)
)
if bounds is None:
bounds_np = None
else:
bounds_np = get_bounds_as_ndarray(parameters, bounds)
if callback is None:
wrapped_callback = None
else:
call_counter = count(1) # callbacks are typically made at the end of each iter
def wrapped_callback(x: ndarray):
result = OptimizationResult(
step=next(call_counter),
fval=float(wrapped_closure(x)[0]),
status=OptimizationStatus.RUNNING,
runtime=monotonic() - start_time,
)
return callback(parameters, result) # pyre-ignore [29]
raw = minimize_with_timeout(
wrapped_closure,
wrapped_closure.state if x0 is None else x0.astype(np_float64, copy=False),
jac=True,
bounds=bounds_np,
method=method,
options=options,
callback=wrapped_callback,
timeout_sec=timeout_sec,
)
# Post-processing and outcome handling
wrapped_closure.state = asarray(raw.x) # set parameter state to optimal values
msg = raw.message if isinstance(raw.message, str) else raw.message.decode("ascii")
if raw.success:
status = OptimizationStatus.SUCCESS
else:
status = ( # Check whether we stopped due to reaching maxfun or maxiter
OptimizationStatus.STOPPED
if _LBFGSB_MAXITER_MAXFUN_REGEX.search(msg)
or "Optimization timed out after" in msg
else OptimizationStatus.FAILURE
)
return OptimizationResult(
fval=raw.fun,
step=raw.nit,
status=status,
message=msg,
runtime=monotonic() - start_time,
)
def torch_minimize(
closure: Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]],
parameters: Dict[str, Tensor],
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None,
optimizer: Union[Optimizer, Callable[[List[Tensor]], Optimizer]] = Adam,
scheduler: Optional[Union[LRScheduler, Callable[[Optimizer], LRScheduler]]] = None,
step_limit: Optional[int] = None,
timeout_sec: Optional[float] = None,
stopping_criterion: Optional[Callable[[Tensor], bool]] = None,
) -> OptimizationResult:
r"""Generic torch.optim-based optimization routine.
Args:
closure: Callable that returns a tensor and an iterable of gradient tensors.
Responsible for setting relevant parameters' `grad` attributes.
parameters: A dictionary of tensors to be optimized.
bounds: An optional dictionary of bounds for elements of `parameters`.
callback: A callable taking `parameters` and an OptimizationResult as arguments.
optimizer: A `torch.optim.Optimizer` instance or a factory that takes
a list of parameters and returns an `Optimizer` instance.
scheduler: A `torch.optim.lr_scheduler._LRScheduler` instance or a factory
that takes a `Optimizer` instance and returns a `_LRSchedule` instance.
step_limit: Integer specifying a maximum number of optimization steps.
One of `step_limit`, `stopping_criterion`, or `timeout_sec` must be passed.
timeout_sec: Timeout in seconds before terminating the optimization loop.
One of `step_limit`, `stopping_criterion`, or `timeout_sec` must be passed.
stopping_criterion: A StoppingCriterion for the optimization loop.
Returns:
An OptimizationResult summarizing the final state of the run.
"""
result: OptimizationResult
start_time = monotonic()
if step_limit is None:
if stopping_criterion is None and timeout_sec is None:
raise RuntimeError("No termination conditions were given.")
step_limit = maxsize
if not isinstance(optimizer, Optimizer):
optimizer = optimizer(list(parameters.values()))
if not (scheduler is None or isinstance(scheduler, LRScheduler)):
scheduler = scheduler(optimizer)
_bounds = (
{}
if bounds is None
else {name: limits for name, limits in bounds.items() if name in parameters}
)
for step in range(1, step_limit + 1):
fval, _ = closure()
runtime = monotonic() - start_time
result = OptimizationResult(
step=step,
fval=fval.detach().cpu().item(),
status=OptimizationStatus.RUNNING,
runtime=runtime,
)
# TODO: Update stopping_criterion API to return a message.
if stopping_criterion and stopping_criterion(fval):
result.status = OptimizationStatus.STOPPED
result.message = "`torch_minimize` stopped due to `stopping_criterion`."
if timeout_sec is not None and runtime >= timeout_sec:
result.status = OptimizationStatus.STOPPED
result.message = (
f"`torch_minimize` stopped due to timeout after {runtime} seconds."
)
if callback:
callback(parameters, result)
if result.status != OptimizationStatus.RUNNING:
break
optimizer.step()
for name, (lower, upper) in _bounds.items():
parameters[name].data = parameters[name].clamp(min=lower, max=upper)
if scheduler:
scheduler.step()
if result.status != OptimizationStatus.RUNNING:
return replace(result, runtime=monotonic() - start_time)
# Account for final parameter update when stopping due to step_limit
return OptimizationResult(
step=step,
fval=closure()[0].detach().cpu().item(),
status=OptimizationStatus.STOPPED,
runtime=monotonic() - start_time,
message=f"`torch_minimize` stopped after reaching step_limit={step_limit}.",
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A converter that simplifies using numpy-based optimizers with generic torch
`nn.Module` classes. This enables using a `scipy.optim.minimize` optimizer
for optimizing module parameters.
"""
from __future__ import annotations
from collections import OrderedDict
from math import inf
from numbers import Number
from typing import Dict, List, Optional, Set, Tuple
from warnings import warn
import numpy as np
import torch
from botorch.optim.utils import (
_get_extra_mll_args,
_handle_numerical_errors,
get_name_filter,
get_parameters_and_bounds,
TorchAttr,
)
from gpytorch.mlls import MarginalLogLikelihood
from torch.nn import Module
def module_to_array(
module: Module,
bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None,
exclude: Optional[Set[str]] = None,
) -> Tuple[np.ndarray, Dict[str, TorchAttr], Optional[np.ndarray]]:
r"""Extract named parameters from a module into a numpy array.
Only extracts parameters with requires_grad, since it is meant for optimizing.
Args:
module: A module with parameters. May specify parameter constraints in
a `named_parameters_and_constraints` method.
bounds: A dictionary mapping parameter names t lower and upper bounds.
of lower and upper bounds. Bounds specified here take precedence
over bounds on the same parameters specified in the constraints
registered with the module.
exclude: A list of parameter names that are to be excluded from extraction.
Returns:
3-element tuple containing
- The parameter values as a numpy array.
- An ordered dictionary with the name and tensor attributes of each
parameter.
- A `2 x n_params` numpy array with lower and upper bounds if at least
one constraint is finite, and None otherwise.
Example:
>>> mll = ExactMarginalLogLikelihood(model.likelihood, model)
>>> parameter_array, property_dict, bounds_out = module_to_array(mll)
"""
warn(
"`module_to_array` is marked for deprecation, consider using "
"`get_parameters_and_bounds`, `get_parameters_as_ndarray_1d`, or "
"`get_bounds_as_ndarray` instead.",
DeprecationWarning,
)
param_dict, bounds_dict = get_parameters_and_bounds(
module=module,
name_filter=None if exclude is None else get_name_filter(exclude),
requires_grad=True,
)
if bounds is not None:
bounds_dict.update(bounds)
# Record tensor metadata and read parameter values to the tape
param_tape: List[Number] = []
property_dict = OrderedDict()
with torch.no_grad():
for name, param in param_dict.items():
property_dict[name] = TorchAttr(param.shape, param.dtype, param.device)
param_tape.extend(param.view(-1).cpu().double().tolist())
# Extract lower and upper bounds
start = 0
bounds_np = None
params_np = np.asarray(param_tape)
for name, param in param_dict.items():
numel = param.numel()
if name in bounds_dict:
for row, bound in enumerate(bounds_dict[name]):
if bound is None:
continue
if torch.is_tensor(bound):
if (bound == (2 * row - 1) * inf).all():
continue
bound = bound.detach().cpu()
elif bound == (2 * row - 1) * inf:
continue
if bounds_np is None:
bounds_np = np.full((2, len(params_np)), ((-inf,), (inf,)))
bounds_np[row, start : start + numel] = bound
start += numel
return params_np, property_dict, bounds_np
def set_params_with_array(
module: Module, x: np.ndarray, property_dict: Dict[str, TorchAttr]
) -> Module:
r"""Set module parameters with values from numpy array.
Args:
module: Module with parameters to be set
x: Numpy array with parameter values
property_dict: Dictionary of parameter names and torch attributes as
returned by module_to_array.
Returns:
Module: module with parameters updated in-place.
Example:
>>> mll = ExactMarginalLogLikelihood(model.likelihood, model)
>>> parameter_array, property_dict, bounds_out = module_to_array(mll)
>>> parameter_array += 0.1 # perturb parameters (for example only)
>>> mll = set_params_with_array(mll, parameter_array, property_dict)
"""
warn(
"`_set_params_with_array` is marked for deprecation, consider using "
"`set_parameters_from_ndarray_1d` instead.",
DeprecationWarning,
)
param_dict = OrderedDict(module.named_parameters())
start_idx = 0
for p_name, attrs in property_dict.items():
# Construct the new tensor
if len(attrs.shape) == 0: # deal with scalar tensors
end_idx = start_idx + 1
new_data = torch.tensor(
x[start_idx], dtype=attrs.dtype, device=attrs.device
)
else:
end_idx = start_idx + np.prod(attrs.shape)
new_data = torch.tensor(
x[start_idx:end_idx], dtype=attrs.dtype, device=attrs.device
).view(*attrs.shape)
start_idx = end_idx
# Update corresponding parameter in-place. Disable autograd to update.
param_dict[p_name].requires_grad_(False)
param_dict[p_name].copy_(new_data)
param_dict[p_name].requires_grad_(True)
return module
def _scipy_objective_and_grad(
x: np.ndarray, mll: MarginalLogLikelihood, property_dict: Dict[str, TorchAttr]
) -> Tuple[float, np.ndarray]:
r"""Get objective and gradient in format that scipy expects.
Args:
x: The (flattened) input parameters.
mll: The MarginalLogLikelihood module to evaluate.
property_dict: The property dictionary required to "unflatten" the input
parameter vector, as generated by `module_to_array`.
Returns:
2-element tuple containing
- The objective value.
- The gradient of the objective.
"""
warn("`_scipy_objective_and_grad` is marked for deprecation.", DeprecationWarning)
mll = set_params_with_array(mll, x, property_dict)
train_inputs, train_targets = mll.model.train_inputs, mll.model.train_targets
mll.zero_grad()
try: # catch linear algebra errors in gpytorch
output = mll.model(*train_inputs)
args = [output, train_targets] + _get_extra_mll_args(mll)
loss = -mll(*args).sum()
except RuntimeError as e:
return _handle_numerical_errors(error=e, x=x)
loss.backward()
i = 0
param_dict = OrderedDict(mll.named_parameters())
grad = np.zeros(sum([tattr.shape.numel() for tattr in property_dict.values()]))
for p_name in property_dict:
t = param_dict[p_name]
size = t.numel()
if t.requires_grad and t.grad is not None:
grad[i : i + size] = t.grad.detach().view(-1).cpu().double().clone().numpy()
i += size
mll.zero_grad()
return loss.item(), grad
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import ABC, abstractmethod
import torch
from torch import Tensor
class StoppingCriterion(ABC):
r"""Base class for evaluating optimization convergence.
Stopping criteria are implemented as a objects rather than a function, so that they
can keep track of past function values between optimization steps.
:meta private:
"""
@abstractmethod
def evaluate(self, fvals: Tensor) -> bool:
r"""Evaluate the stopping criterion.
Args:
fvals: tensor containing function values for the current iteration. If
`fvals` contains more than one element, then the stopping criterion is
evaluated element-wise and True is returned if the stopping criterion is
true for all elements.
Returns:
Stopping indicator (if True, stop the optimziation).
"""
pass # pragma: no cover
def __call__(self, fvals: Tensor) -> bool:
return self.evaluate(fvals)
class ExpMAStoppingCriterion(StoppingCriterion):
r"""Exponential moving average stopping criterion.
Computes an exponentially weighted moving average over window length `n_window`
and checks whether the relative decrease in this moving average between steps
is less than a provided tolerance level. That is, in iteration `i`, it computes
v[i,j] := fvals[i - n_window + j] * w[j]
for all `j = 0, ..., n_window`, where `w[j] = exp(-eta * (1 - j / n_window))`.
Letting `ma[i] := sum_j(v[i,j])`, the criterion evaluates to `True` whenever
(ma[i-1] - ma[i]) / abs(ma[i-1]) < rel_tol (if minimize=True)
(ma[i] - ma[i-1]) / abs(ma[i-1]) < rel_tol (if minimize=False)
"""
def __init__(
self,
maxiter: int = 10000,
minimize: bool = True,
n_window: int = 10,
eta: float = 1.0,
rel_tol: float = 1e-5,
) -> None:
r"""Exponential moving average stopping criterion.
Args:
maxiter: Maximum number of iterations.
minimize: If True, assume minimization.
n_window: The size of the exponential moving average window.
eta: The exponential decay factor in the weights.
rel_tol: Relative tolerance for termination.
"""
self.maxiter = maxiter
self.minimize = minimize
self.n_window = n_window
self.rel_tol = rel_tol
self.iter = 0
weights = torch.exp(torch.linspace(-eta, 0, self.n_window))
self.weights = weights / weights.sum()
self._prev_fvals = None
def evaluate(self, fvals: Tensor) -> bool:
r"""Evaluate the stopping criterion.
Args:
fvals: tensor containing function values for the current iteration. If
`fvals` contains more than one element, then the stopping criterion is
evaluated element-wise and True is returned if the stopping criterion is
true for all elements.
TODO: add support for utilizing gradient information
Returns:
Stopping indicator (if True, stop the optimziation).
"""
self.iter += 1
if self.iter == self.maxiter:
return True
if self._prev_fvals is None:
self._prev_fvals = fvals.unsqueeze(0)
else:
self._prev_fvals = torch.cat(
[self._prev_fvals[-self.n_window :], fvals.unsqueeze(0)]
)
if self._prev_fvals.size(0) < self.n_window + 1:
return False
weights = self.weights
weights = weights.to(fvals)
if self._prev_fvals.ndim > 1:
weights = weights.unsqueeze(-1)
# TODO: Update the exp moving average efficiently
prev_ma = (self._prev_fvals[:-1] * weights).sum(dim=0)
ma = (self._prev_fvals[1:] * weights).sum(dim=0)
# TODO: Handle approx. zero losses (normalize by min/max loss range)
rel_delta = (prev_ma - ma) / prev_ma.abs()
if not self.minimize:
rel_delta = -rel_delta
if torch.max(rel_delta) < self.rel_tol:
return True
return False
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
References
.. [Regis]
R. G. Regis, C. A. Shoemaker. Combining radial basis function
surrogates and dynamic coordinate search in high-dimensional
expensive black-box optimization, Engineering Optimization, 2013.
"""
from __future__ import annotations
import warnings
from math import ceil
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from botorch import settings
from botorch.acquisition import analytic, monte_carlo, multi_objective
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.knowledge_gradient import (
_get_value_function,
qKnowledgeGradient,
)
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.exceptions.warnings import (
BadInitialCandidatesWarning,
BotorchWarning,
SamplingWarning,
)
from botorch.models.model import Model
from botorch.optim.utils import fix_features, get_X_baseline
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.sampling import (
batched_multinomial,
draw_sobol_samples,
get_polytope_samples,
manual_seed,
)
from botorch.utils.transforms import normalize, standardize, unnormalize
from torch import Tensor
from torch.distributions import Normal
from torch.quasirandom import SobolEngine
TGenInitialConditions = Callable[
[
# reasoning behind this annotation: contravariance
qKnowledgeGradient,
Tensor,
int,
int,
int,
Optional[Dict[int, float]],
Optional[Dict[str, Union[bool, float, int]]],
Optional[List[Tuple[Tensor, Tensor, float]]],
Optional[List[Tuple[Tensor, Tensor, float]]],
],
Optional[Tensor],
]
def transform_constraints(
constraints: Union[List[Tuple[Tensor, Tensor, float]], None], q: int, d: int
) -> List[Tuple[Tensor, Tensor, float]]:
r"""Transform constraints to sample from a d*q-dimensional space instead of a
d-dimensional state.
This function assumes that constraints are the same for each input batch,
and broadcasts the constraints accordingly to the input batch shape.
Args:
constraints: A list of tuples (indices, coefficients, rhs), with each tuple
encoding an (in-)equality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) (>)= rhs`.
If `indices` is a 2-d Tensor, this supports specifying constraints across
the points in the `q`-batch (inter-point constraints). If `None`, this
function is a nullop and simply returns `None`.
q: Size of the `q`-batch.
d: Dimensionality of the problem.
Returns:
List[Tuple[Tensor, Tensor, float]]: List of transformed constraints.
"""
if constraints is None:
return None
transformed = []
for constraint in constraints:
if len(constraint[0].shape) == 1:
transformed += transform_intra_point_constraint(constraint, d, q)
else:
transformed.append(transform_inter_point_constraint(constraint, d))
return transformed
def transform_intra_point_constraint(
constraint: Tuple[Tensor, Tensor, float], d: int, q: int
) -> List[Tuple[Tensor, Tensor, float]]:
r"""Transforms an intra-point/pointwise constraint from
d-dimensional space to a d*q-dimesional space.
Args:
constraints: A list of tuples (indices, coefficients, rhs), with each tuple
encoding an (in-)equality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) (>)= rhs`. Here `indices` must
be one-dimensional, and the constraint is applied to all points within the
`q`-batch.
d: Dimensionality of the problem.
Raises:
ValueError: If indices in the constraints are larger than the
dimensionality d of the problem.
Returns:
List[Tuple[Tensor, Tensor, float]]: List of transformed constraints.
"""
indices, coefficients, rhs = constraint
if indices.max() >= d:
raise ValueError(
f"Constraint indices cannot exceed the problem dimension {d=}."
)
return [
(
torch.tensor(
[i * d + j for j in indices], dtype=torch.int64, device=indices.device
),
coefficients,
rhs,
)
for i in range(q)
]
def transform_inter_point_constraint(
constraint: Tuple[Tensor, Tensor, float], d: int
) -> Tuple[Tensor, Tensor, float]:
r"""Transforms an inter-point constraint from
d-dimensional space to a d*q dimesional space.
Args:
constraints: A list of tuples (indices, coefficients, rhs), with each tuple
encoding an (in-)equality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) (>)= rhs`. `indices` must be a
2-d Tensor, where in each row `indices[i] = (k_i, l_i)` the first index
`k_i` corresponds to the `k_i`-th element of the `q`-batch and the second
index `l_i` corresponds to the `l_i`-th feature of that element.
Raises:
ValueError: If indices in the constraints are larger than the
dimensionality d of the problem.
Returns:
List[Tuple[Tensor, Tensor, float]]: Transformed constraint.
"""
indices, coefficients, rhs = constraint
if indices[:, 1].max() >= d:
raise ValueError(
f"Constraint indices cannot exceed the problem dimension {d=}."
)
return (
torch.tensor(
[r[0] * d + r[1] for r in indices], dtype=torch.int64, device=indices.device
),
coefficients,
rhs,
)
def sample_q_batches_from_polytope(
n: int,
q: int,
bounds: Tensor,
n_burnin: int,
thinning: int,
seed: int,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> Tensor:
r"""Samples `n` q-baches from a polytope of dimension `d`.
Args:
n: Number of q-batches to sample.
q: Number of samples per q-batch
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
n_burnin: The number of burn-in samples for the Markov chain sampler.
thinning: The amount of thinning (number of steps to take between
returning samples).
seed: The random seed.
inequality_constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality_constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
Returns:
A `n x q x d`-dim tensor of samples.
"""
# check if inter-point constraints are present
inter_point = any(
len(indices.shape) > 1
for constraints in (inequality_constraints or [], equality_constraints or [])
for indices, _, _ in constraints
)
if inter_point:
samples = get_polytope_samples(
n=n,
bounds=torch.hstack([bounds for _ in range(q)]),
inequality_constraints=transform_constraints(
constraints=inequality_constraints, q=q, d=bounds.shape[1]
),
equality_constraints=transform_constraints(
constraints=equality_constraints, q=q, d=bounds.shape[1]
),
seed=seed,
n_burnin=n_burnin,
thinning=thinning * q,
)
else:
samples = get_polytope_samples(
n=n * q,
bounds=bounds,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
seed=seed,
n_burnin=n_burnin,
thinning=thinning,
)
return samples.view(n, q, -1).cpu()
def gen_batch_initial_conditions(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: int,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
generator: Optional[Callable[[int, int, int], Tensor]] = None,
) -> Tensor:
r"""Generate a batch of initial conditions for random-restart optimziation.
TODO: Support t-batches of initial conditions.
Args:
acq_function: The acquisition function to be optimized.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The number of candidates to consider.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic. Note: if `sample_around_best` is True (the default is False),
then `2 * raw_samples` samples are used.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for initial condition generation. For valid options see
`initialize_q_batch` and `initialize_q_batch_nonneg`. If `options`
contains a `nonnegative=True` entry, then `acq_function` is
assumed to be non-negative (useful when using custom acquisition
functions). In addition, an "init_batch_limit" option can be passed
to specify the batch limit for the initialization. This is useful
for avoiding memory limits when computing the batch posterior over
raw samples.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
generator: Callable for generating samples that are then further
processed. It receives `n`, `q` and `seed` as arguments and
returns a tensor of shape `n x q x d`.
Returns:
A `num_restarts x q x d` tensor of initial conditions.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0.], [1.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
"""
if bounds.isinf().any():
raise NotImplementedError(
"Currently only finite values in `bounds` are supported "
"for generating initial conditions for optimization."
)
options = options or {}
sample_around_best = options.get("sample_around_best", False)
if sample_around_best and equality_constraints:
raise UnsupportedError(
"Option 'sample_around_best' is not supported when equality"
"constraints are present."
)
if sample_around_best and generator:
raise UnsupportedError(
"Option 'sample_around_best' is not supported when custom "
"generator is be used."
)
seed: Optional[int] = options.get("seed")
batch_limit: Optional[int] = options.get(
"init_batch_limit", options.get("batch_limit")
)
factor, max_factor = 1, 5
init_kwargs = {}
device = bounds.device
bounds_cpu = bounds.cpu()
if "eta" in options:
init_kwargs["eta"] = options.get("eta")
if options.get("nonnegative") or is_nonnegative(acq_function):
init_func = initialize_q_batch_nonneg
if "alpha" in options:
init_kwargs["alpha"] = options.get("alpha")
else:
init_func = initialize_q_batch
q = 1 if q is None else q
# the dimension the samples are drawn from
effective_dim = bounds.shape[-1] * q
if effective_dim > SobolEngine.MAXDIM and settings.debug.on():
warnings.warn(
f"Sample dimension q*d={effective_dim} exceeding Sobol max dimension "
f"({SobolEngine.MAXDIM}). Using iid samples instead.",
SamplingWarning,
)
while factor < max_factor:
with warnings.catch_warnings(record=True) as ws:
n = raw_samples * factor
if generator is not None:
X_rnd = generator(n, q, seed)
elif inequality_constraints is None and equality_constraints is None:
if effective_dim <= SobolEngine.MAXDIM:
X_rnd = draw_sobol_samples(bounds=bounds_cpu, n=n, q=q, seed=seed)
else:
with manual_seed(seed):
# load on cpu
X_rnd_nlzd = torch.rand(
n, q, bounds_cpu.shape[-1], dtype=bounds.dtype
)
X_rnd = bounds_cpu[0] + (bounds_cpu[1] - bounds_cpu[0]) * X_rnd_nlzd
else:
X_rnd = sample_q_batches_from_polytope(
n=n,
q=q,
bounds=bounds,
n_burnin=options.get("n_burnin", 10000),
thinning=options.get("thinning", 32),
seed=seed,
equality_constraints=equality_constraints,
inequality_constraints=inequality_constraints,
)
# sample points around best
if sample_around_best:
X_best_rnd = sample_points_around_best(
acq_function=acq_function,
n_discrete_points=n * q,
sigma=options.get("sample_around_best_sigma", 1e-3),
bounds=bounds,
subset_sigma=options.get("sample_around_best_subset_sigma", 1e-1),
prob_perturb=options.get("sample_around_best_prob_perturb"),
)
if X_best_rnd is not None:
X_rnd = torch.cat(
[
X_rnd,
X_best_rnd.view(n, q, bounds.shape[-1]).cpu(),
],
dim=0,
)
X_rnd = fix_features(X_rnd, fixed_features=fixed_features)
with torch.no_grad():
if batch_limit is None:
batch_limit = X_rnd.shape[0]
Y_rnd_list = []
start_idx = 0
while start_idx < X_rnd.shape[0]:
end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
Y_rnd_curr = acq_function(
X_rnd[start_idx:end_idx].to(device=device)
).cpu()
Y_rnd_list.append(Y_rnd_curr)
start_idx += batch_limit
Y_rnd = torch.cat(Y_rnd_list)
batch_initial_conditions = init_func(
X=X_rnd, Y=Y_rnd, n=num_restarts, **init_kwargs
).to(device=device)
if not any(issubclass(w.category, BadInitialCandidatesWarning) for w in ws):
return batch_initial_conditions
if factor < max_factor:
factor += 1
if seed is not None:
seed += 1 # make sure to sample different X_rnd
warnings.warn(
"Unable to find non-zero acquisition function values - initial conditions "
"are being selected randomly.",
BadInitialCandidatesWarning,
)
return batch_initial_conditions
def gen_one_shot_kg_initial_conditions(
acq_function: qKnowledgeGradient,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: int,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> Optional[Tensor]:
r"""Generate a batch of smart initializations for qKnowledgeGradient.
This function generates initial conditions for optimizing one-shot KG using
the maximizer of the posterior objective. Intutively, the maximizer of the
fantasized posterior will often be close to a maximizer of the current
posterior. This function uses that fact to generate the initital conditions
for the fantasy points. Specifically, a fraction of `1 - frac_random` (see
options) is generated by sampling from the set of maximizers of the
posterior objective (obtained via random restart optimization) according to
a softmax transformation of their respective values. This means that this
initialization strategy internally solves an acquisition function
maximization problem. The remaining `frac_random` fantasy points as well as
all `q` candidate points are chosen according to the standard initialization
strategy in `gen_batch_initial_conditions`.
Args:
acq_function: The qKnowledgeGradient instance to be optimized.
bounds: A `2 x d` tensor of lower and upper bounds for each column of
task features.
q: The number of candidates to consider.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for initial condition generation. These contain all
settings for the standard heuristic initialization from
`gen_batch_initial_conditions`. In addition, they contain
`frac_random` (the fraction of fully random fantasy points),
`num_inner_restarts` and `raw_inner_samples` (the number of random
restarts and raw samples for solving the posterior objective
maximization problem, respectively) and `eta` (temperature parameter
for sampling heuristic from posterior objective maximizers).
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
Returns:
A `num_restarts x q' x d` tensor that can be used as initial conditions
for `optimize_acqf()`. Here `q' = q + num_fantasies` is the total number
of points (candidate points plus fantasy points).
Example:
>>> qKG = qKnowledgeGradient(model, num_fantasies=64)
>>> bounds = torch.tensor([[0., 0.], [1., 1.]])
>>> Xinit = gen_one_shot_kg_initial_conditions(
>>> qKG, bounds, q=3, num_restarts=10, raw_samples=512,
>>> options={"frac_random": 0.25},
>>> )
"""
options = options or {}
frac_random: float = options.get("frac_random", 0.1)
if not 0 < frac_random < 1:
raise ValueError(
f"frac_random must take on values in (0,1). Value: {frac_random}"
)
q_aug = acq_function.get_augmented_q_batch_size(q=q)
# TODO: Avoid unnecessary computation by not generating all candidates
ics = gen_batch_initial_conditions(
acq_function=acq_function,
bounds=bounds,
q=q_aug,
num_restarts=num_restarts,
raw_samples=raw_samples,
fixed_features=fixed_features,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
# compute maximizer of the value function
value_function = _get_value_function(
model=acq_function.model,
objective=acq_function.objective,
posterior_transform=acq_function.posterior_transform,
sampler=acq_function.inner_sampler,
project=getattr(acq_function, "project", None),
)
from botorch.optim.optimize import optimize_acqf
fantasy_cands, fantasy_vals = optimize_acqf(
acq_function=value_function,
bounds=bounds,
q=1,
num_restarts=options.get("num_inner_restarts", 20),
raw_samples=options.get("raw_inner_samples", 1024),
fixed_features=fixed_features,
return_best_only=False,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
# sampling from the optimizers
n_value = int((1 - frac_random) * (q_aug - q)) # number of non-random ICs
eta = options.get("eta", 2.0)
weights = torch.exp(eta * standardize(fantasy_vals))
idx = torch.multinomial(weights, num_restarts * n_value, replacement=True)
# set the respective initial conditions to the sampled optimizers
ics[..., -n_value:, :] = fantasy_cands[idx, 0].view(num_restarts, n_value, -1)
return ics
def gen_value_function_initial_conditions(
acq_function: AcquisitionFunction,
bounds: Tensor,
num_restarts: int,
raw_samples: int,
current_model: Model,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int]]] = None,
) -> Tensor:
r"""Generate a batch of smart initializations for optimizing
the value function of qKnowledgeGradient.
This function generates initial conditions for optimizing the inner problem of
KG, i.e. its value function, using the maximizer of the posterior objective.
Intutively, the maximizer of the fantasized posterior will often be close to a
maximizer of the current posterior. This function uses that fact to generate the
initital conditions for the fantasy points. Specifically, a fraction of `1 -
frac_random` (see options) of raw samples is generated by sampling from the set of
maximizers of the posterior objective (obtained via random restart optimization)
according to a softmax transformation of their respective values. This means that
this initialization strategy internally solves an acquisition function
maximization problem. The remaining raw samples are generated using
`draw_sobol_samples`. All raw samples are then evaluated, and the initial
conditions are selected according to the standard initialization strategy in
'initialize_q_batch' individually for each inner problem.
Args:
acq_function: The value function instance to be optimized.
bounds: A `2 x d` tensor of lower and upper bounds for each column of
task features.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic.
current_model: The model of the KG acquisition function that was used to
generate the fantasy model of the value function.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for initial condition generation. These contain all
settings for the standard heuristic initialization from
`gen_batch_initial_conditions`. In addition, they contain
`frac_random` (the fraction of fully random fantasy points),
`num_inner_restarts` and `raw_inner_samples` (the number of random
restarts and raw samples for solving the posterior objective
maximization problem, respectively) and `eta` (temperature parameter
for sampling heuristic from posterior objective maximizers).
Returns:
A `num_restarts x batch_shape x q x d` tensor that can be used as initial
conditions for `optimize_acqf()`. Here `batch_shape` is the batch shape
of value function model.
Example:
>>> fant_X = torch.rand(5, 1, 2)
>>> fantasy_model = model.fantasize(fant_X, SobolQMCNormalSampler(16))
>>> value_function = PosteriorMean(fantasy_model)
>>> bounds = torch.tensor([[0., 0.], [1., 1.]])
>>> Xinit = gen_value_function_initial_conditions(
>>> value_function, bounds, num_restarts=10, raw_samples=512,
>>> options={"frac_random": 0.25},
>>> )
"""
options = options or {}
seed: Optional[int] = options.get("seed")
frac_random: float = options.get("frac_random", 0.6)
if not 0 < frac_random < 1:
raise ValueError(
f"frac_random must take on values in (0,1). Value: {frac_random}"
)
# compute maximizer of the current value function
value_function = _get_value_function(
model=current_model,
objective=getattr(acq_function, "objective", None),
posterior_transform=acq_function.posterior_transform,
sampler=getattr(acq_function, "sampler", None),
project=getattr(acq_function, "project", None),
)
from botorch.optim.optimize import optimize_acqf
fantasy_cands, fantasy_vals = optimize_acqf(
acq_function=value_function,
bounds=bounds,
q=1,
num_restarts=options.get("num_inner_restarts", 20),
raw_samples=options.get("raw_inner_samples", 1024),
fixed_features=fixed_features,
return_best_only=False,
options={
k: v
for k, v in options.items()
if k
not in ("frac_random", "num_inner_restarts", "raw_inner_samples", "eta")
},
)
batch_shape = acq_function.model.batch_shape
# sampling from the optimizers
n_value = int((1 - frac_random) * raw_samples) # number of non-random ICs
if n_value > 0:
eta = options.get("eta", 2.0)
weights = torch.exp(eta * standardize(fantasy_vals))
idx = batched_multinomial(
weights=weights.expand(*batch_shape, -1),
num_samples=n_value,
replacement=True,
).permute(-1, *range(len(batch_shape)))
resampled = fantasy_cands[idx]
else:
resampled = torch.empty(
0,
*batch_shape,
1,
bounds.shape[-1],
dtype=fantasy_cands.dtype,
device=fantasy_cands.device,
)
# add qMC samples
randomized = draw_sobol_samples(
bounds=bounds, n=raw_samples - n_value, q=1, batch_shape=batch_shape, seed=seed
).to(resampled)
# full set of raw samples
X_rnd = torch.cat([resampled, randomized], dim=0)
X_rnd = fix_features(X_rnd, fixed_features=fixed_features)
# evaluate the raw samples
with torch.no_grad():
Y_rnd = acq_function(X_rnd)
# select the restart points using the heuristic
return initialize_q_batch(
X=X_rnd, Y=Y_rnd, n=num_restarts, eta=options.get("eta", 2.0)
)
def initialize_q_batch(X: Tensor, Y: Tensor, n: int, eta: float = 1.0) -> Tensor:
r"""Heuristic for selecting initial conditions for candidate generation.
This heuristic selects points from `X` (without replacement) with probability
proportional to `exp(eta * Z)`, where `Z = (Y - mean(Y)) / std(Y)` and `eta`
is a temperature parameter.
When using an acquisiton function that is non-negative and possibly zero
over large areas of the feature space (e.g. qEI), you should use
`initialize_q_batch_nonneg` instead.
Args:
X: A `b x batch_shape x q x d` tensor of `b` - `batch_shape` samples of
`q`-batches from a d`-dim feature space. Typically, these are generated
using qMC sampling.
Y: A tensor of `b x batch_shape` outcomes associated with the samples.
Typically, this is the value of the batch acquisition function to be
maximized.
n: The number of initial condition to be generated. Must be less than `b`.
eta: Temperature parameter for weighting samples.
Returns:
A `n x batch_shape x q x d` tensor of `n` - `batch_shape` `q`-batch initial
conditions, where each batch of `n x q x d` samples is selected independently.
Example:
>>> # To get `n=10` starting points of q-batch size `q=3`
>>> # for model with `d=6`:
>>> qUCB = qUpperConfidenceBound(model, beta=0.1)
>>> Xrnd = torch.rand(500, 3, 6)
>>> Xinit = initialize_q_batch(Xrnd, qUCB(Xrnd), 10)
"""
n_samples = X.shape[0]
batch_shape = X.shape[1:-2] or torch.Size()
if n > n_samples:
raise RuntimeError(
f"n ({n}) cannot be larger than the number of "
f"provided samples ({n_samples})"
)
elif n == n_samples:
return X
Ystd = Y.std(dim=0)
if torch.any(Ystd == 0):
warnings.warn(
"All acquisition values for raw samples points are the same for "
"at least one batch. Choosing initial conditions at random.",
BadInitialCandidatesWarning,
)
return X[torch.randperm(n=n_samples, device=X.device)][:n]
max_val, max_idx = torch.max(Y, dim=0)
Z = (Y - Y.mean(dim=0)) / Ystd
etaZ = eta * Z
weights = torch.exp(etaZ)
while torch.isinf(weights).any():
etaZ *= 0.5
weights = torch.exp(etaZ)
if batch_shape == torch.Size():
idcs = torch.multinomial(weights, n)
else:
idcs = batched_multinomial(
weights=weights.permute(*range(1, len(batch_shape) + 1), 0), num_samples=n
).permute(-1, *range(len(batch_shape)))
# make sure we get the maximum
if max_idx not in idcs:
idcs[-1] = max_idx
if batch_shape == torch.Size():
return X[idcs]
else:
return X.gather(
dim=0, index=idcs.view(*idcs.shape, 1, 1).expand(n, *X.shape[1:])
)
def initialize_q_batch_nonneg(
X: Tensor, Y: Tensor, n: int, eta: float = 1.0, alpha: float = 1e-4
) -> Tensor:
r"""Heuristic for selecting initial conditions for non-neg. acquisition functions.
This function is similar to `initialize_q_batch`, but designed specifically
for acquisition functions that are non-negative and possibly zero over
large areas of the feature space (e.g. qEI). All samples for which
`Y < alpha * max(Y)` will be ignored (assuming that `Y` contains at least
one positive value).
Args:
X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim.
feature space. Typically, these are generated using qMC.
Y: A tensor of `b` outcomes associated with the samples. Typically, this
is the value of the batch acquisition function to be maximized.
n: The number of initial condition to be generated. Must be less than `b`.
eta: Temperature parameter for weighting samples.
alpha: The threshold (as a fraction of the maximum observed value) under
which to ignore samples. All input samples for which
`Y < alpha * max(Y)` will be ignored.
Returns:
A `n x q x d` tensor of `n` `q`-batch initial conditions.
Example:
>>> # To get `n=10` starting points of q-batch size `q=3`
>>> # for model with `d=6`:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> Xrnd = torch.rand(500, 3, 6)
>>> Xinit = initialize_q_batch(Xrnd, qEI(Xrnd), 10)
"""
n_samples = X.shape[0]
if n > n_samples:
raise RuntimeError("n cannot be larger than the number of provided samples")
elif n == n_samples:
return X
max_val, max_idx = torch.max(Y, dim=0)
if torch.any(max_val <= 0):
warnings.warn(
"All acquisition values for raw sampled points are nonpositive, so "
"initial conditions are being selected randomly.",
BadInitialCandidatesWarning,
)
return X[torch.randperm(n=n_samples, device=X.device)][:n]
# make sure there are at least `n` points with positive acquisition values
pos = Y > 0
num_pos = pos.sum().item()
if num_pos < n:
# select all positive points and then fill remaining quota with randomly
# selected points
remaining_indices = (~pos).nonzero(as_tuple=False).view(-1)
rand_indices = torch.randperm(remaining_indices.shape[0], device=Y.device)
sampled_remaining_indices = remaining_indices[rand_indices[: n - num_pos]]
pos[sampled_remaining_indices] = 1
return X[pos]
# select points within alpha of max_val, iteratively decreasing alpha by a
# factor of 10 as necessary
alpha_pos = Y >= alpha * max_val
while alpha_pos.sum() < n:
alpha = 0.1 * alpha
alpha_pos = Y >= alpha * max_val
alpha_pos_idcs = torch.arange(len(Y), device=Y.device)[alpha_pos]
weights = torch.exp(eta * (Y[alpha_pos] / max_val - 1))
idcs = alpha_pos_idcs[torch.multinomial(weights, n)]
if max_idx not in idcs:
idcs[-1] = max_idx
return X[idcs]
def sample_points_around_best(
acq_function: AcquisitionFunction,
n_discrete_points: int,
sigma: float,
bounds: Tensor,
best_pct: float = 5.0,
subset_sigma: float = 1e-1,
prob_perturb: Optional[float] = None,
) -> Optional[Tensor]:
r"""Find best points and sample nearby points.
Args:
acq_function: The acquisition function.
n_discrete_points: The number of points to sample.
sigma: The standard deviation of the additive gaussian noise for
perturbing the best points.
bounds: A `2 x d`-dim tensor containing the bounds.
best_pct: The percentage of best points to perturb.
subset_sigma: The standard deviation of the additive gaussian
noise for perturbing a subset of dimensions of the best points.
prob_perturb: The probability of perturbing each dimension.
Returns:
An optional `n_discrete_points x d`-dim tensor containing the
sampled points. This is None if no baseline points are found.
"""
X = get_X_baseline(acq_function=acq_function)
if X is None:
return
with torch.no_grad():
try:
posterior = acq_function.model.posterior(X)
except AttributeError:
warnings.warn(
"Failed to sample around previous best points.",
BotorchWarning,
)
return
mean = posterior.mean
while mean.ndim > 2:
# take average over batch dims
mean = mean.mean(dim=0)
try:
f_pred = acq_function.objective(mean)
# Some acquisition functions do not have an objective
# and for some acquisition functions the objective is None
except (AttributeError, TypeError):
f_pred = mean
if hasattr(acq_function, "maximize"):
# make sure that the optimiztaion direction is set properly
if not acq_function.maximize:
f_pred = -f_pred
try:
# handle constraints for EHVI-based acquisition functions
constraints = acq_function.constraints
if constraints is not None:
neg_violation = -torch.stack(
[c(mean).clamp_min(0.0) for c in constraints], dim=-1
).sum(dim=-1)
feas = neg_violation == 0
if feas.any():
f_pred[~feas] = float("-inf")
else:
# set objective equal to negative violation
f_pred = neg_violation
except AttributeError:
pass
if f_pred.ndim == mean.ndim and f_pred.shape[-1] > 1:
# multi-objective
# find pareto set
is_pareto = is_non_dominated(f_pred)
best_X = X[is_pareto]
else:
if f_pred.shape[-1] == 1:
f_pred = f_pred.squeeze(-1)
n_best = max(1, round(X.shape[0] * best_pct / 100))
# the view() is to ensure that best_idcs is not a scalar tensor
best_idcs = torch.topk(f_pred, n_best).indices.view(-1)
best_X = X[best_idcs]
use_perturbed_sampling = best_X.shape[-1] >= 20 or prob_perturb is not None
n_trunc_normal_points = (
n_discrete_points // 2 if use_perturbed_sampling else n_discrete_points
)
perturbed_X = sample_truncated_normal_perturbations(
X=best_X,
n_discrete_points=n_trunc_normal_points,
sigma=sigma,
bounds=bounds,
)
if use_perturbed_sampling:
perturbed_subset_dims_X = sample_perturbed_subset_dims(
X=best_X,
bounds=bounds,
# ensure that we return n_discrete_points
n_discrete_points=n_discrete_points - n_trunc_normal_points,
sigma=sigma,
prob_perturb=prob_perturb,
)
perturbed_X = torch.cat([perturbed_X, perturbed_subset_dims_X], dim=0)
# shuffle points
perm = torch.randperm(perturbed_X.shape[0], device=X.device)
perturbed_X = perturbed_X[perm]
return perturbed_X
def sample_truncated_normal_perturbations(
X: Tensor,
n_discrete_points: int,
sigma: float,
bounds: Tensor,
qmc: bool = True,
) -> Tensor:
r"""Sample points around `X`.
Sample perturbed points around `X` such that the added perturbations
are sampled from N(0, sigma^2 I) and truncated to be within [0,1]^d.
Args:
X: A `n x d`-dim tensor starting points.
n_discrete_points: The number of points to sample.
sigma: The standard deviation of the additive gaussian noise for
perturbing the points.
bounds: A `2 x d`-dim tensor containing the bounds.
qmc: A boolean indicating whether to use qmc.
Returns:
A `n_discrete_points x d`-dim tensor containing the sampled points.
"""
X = normalize(X, bounds=bounds)
d = X.shape[1]
# sample points from N(X_center, sigma^2 I), truncated to be within
# [0, 1]^d.
if X.shape[0] > 1:
rand_indices = torch.randint(X.shape[0], (n_discrete_points,), device=X.device)
X = X[rand_indices]
if qmc:
std_bounds = torch.zeros(2, d, dtype=X.dtype, device=X.device)
std_bounds[1] = 1
u = draw_sobol_samples(bounds=std_bounds, n=n_discrete_points, q=1).squeeze(1)
else:
u = torch.rand((n_discrete_points, d), dtype=X.dtype, device=X.device)
# compute bounds to sample from
a = -X
b = 1 - X
# compute z-score of bounds
alpha = a / sigma
beta = b / sigma
normal = Normal(0, 1)
cdf_alpha = normal.cdf(alpha)
# use inverse transform
perturbation = normal.icdf(cdf_alpha + u * (normal.cdf(beta) - cdf_alpha)) * sigma
# add perturbation and clip points that are still outside
perturbed_X = (X + perturbation).clamp(0.0, 1.0)
return unnormalize(perturbed_X, bounds=bounds)
def sample_perturbed_subset_dims(
X: Tensor,
bounds: Tensor,
n_discrete_points: int,
sigma: float = 1e-1,
qmc: bool = True,
prob_perturb: Optional[float] = None,
) -> Tensor:
r"""Sample around `X` by perturbing a subset of the dimensions.
By default, dimensions are perturbed with probability equal to
`min(20 / d, 1)`. As shown in [Regis]_, perturbing a small number
of dimensions can be beneificial. The perturbations are sampled
from N(0, sigma^2 I) and truncated to be within [0,1]^d.
Args:
X: A `n x d`-dim tensor starting points. `X`
must be normalized to be within `[0, 1]^d`.
bounds: The bounds to sample perturbed values from
n_discrete_points: The number of points to sample.
sigma: The standard deviation of the additive gaussian noise for
perturbing the points.
qmc: A boolean indicating whether to use qmc.
prob_perturb: The probability of perturbing each dimension. If omitted,
defaults to `min(20 / d, 1)`.
Returns:
A `n_discrete_points x d`-dim tensor containing the sampled points.
"""
if bounds.ndim != 2:
raise BotorchTensorDimensionError("bounds must be a `2 x d`-dim tensor.")
elif X.ndim != 2:
raise BotorchTensorDimensionError("X must be a `n x d`-dim tensor.")
d = bounds.shape[-1]
if prob_perturb is None:
# Only perturb a subset of the features
prob_perturb = min(20.0 / d, 1.0)
if X.shape[0] == 1:
X_cand = X.repeat(n_discrete_points, 1)
else:
rand_indices = torch.randint(X.shape[0], (n_discrete_points,), device=X.device)
X_cand = X[rand_indices]
pert = sample_truncated_normal_perturbations(
X=X_cand,
n_discrete_points=n_discrete_points,
sigma=sigma,
bounds=bounds,
qmc=qmc,
)
# find cases where we are not perturbing any dimensions
mask = (
torch.rand(
n_discrete_points,
d,
dtype=bounds.dtype,
device=bounds.device,
)
<= prob_perturb
)
ind = (~mask).all(dim=-1).nonzero()
# perturb `n_perturb` of the dimensions
n_perturb = ceil(d * prob_perturb)
perturb_mask = torch.zeros(d, dtype=mask.dtype, device=mask.device)
perturb_mask[:n_perturb].fill_(1)
# TODO: use batched `torch.randperm` when available:
# https://github.com/pytorch/pytorch/issues/42502
for idx in ind:
mask[idx] = perturb_mask[torch.randperm(d, device=bounds.device)]
# Create candidate points
X_cand[mask] = pert[mask]
return X_cand
def is_nonnegative(acq_function: AcquisitionFunction) -> bool:
r"""Determine whether a given acquisition function is non-negative.
Args:
acq_function: The `AcquisitionFunction` instance.
Returns:
True if `acq_function` is non-negative, False if not, or if the behavior
is unknown (for custom acquisition functions).
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.1)
>>> is_nonnegative(qEI) # returns True
"""
return isinstance(
acq_function,
(
analytic.ExpectedImprovement,
analytic.ConstrainedExpectedImprovement,
analytic.ProbabilityOfImprovement,
analytic.NoisyExpectedImprovement,
monte_carlo.qExpectedImprovement,
monte_carlo.qNoisyExpectedImprovement,
monte_carlo.qProbabilityOfImprovement,
multi_objective.analytic.ExpectedHypervolumeImprovement,
multi_objective.monte_carlo.qExpectedHypervolumeImprovement,
multi_objective.monte_carlo.qNoisyExpectedHypervolumeImprovement,
),
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, Optional, Tuple, Union
import torch
from botorch.acquisition import AcquisitionFunction
from botorch.optim.homotopy import Homotopy
from botorch.optim.optimize import optimize_acqf
from torch import Tensor
def prune_candidates(
candidates: Tensor, acq_values: Tensor, prune_tolerance: float
) -> Tensor:
r"""Prune candidates based on their distance to other candidates.
Args:
candidates: An `n x d` tensor of candidates.
acq_values: An `n` tensor of candidate values.
prune_tolerance: The minimum distance to prune candidates.
Returns:
An `m x d` tensor of pruned candidates.
"""
if candidates.ndim != 2:
raise ValueError("`candidates` must be of size `n x d`.")
if acq_values.ndim != 1 or len(acq_values) != candidates.shape[0]:
raise ValueError("`acq_values` must be of size `n`.")
if prune_tolerance < 0:
raise ValueError("`prune_tolerance` must be >= 0.")
sorted_inds = acq_values.argsort(descending=True)
candidates = candidates[sorted_inds]
candidates_new = candidates[:1, :]
for i in range(1, candidates.shape[0]):
if (
torch.cdist(candidates[i : i + 1, :], candidates_new).min()
> prune_tolerance
):
candidates_new = torch.cat(
[candidates_new, candidates[i : i + 1, :]], dim=-2
)
return candidates_new
def optimize_acqf_homotopy(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
homotopy: Homotopy,
num_restarts: int,
raw_samples: Optional[int] = None,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
final_options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
batch_initial_conditions: Optional[Tensor] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
prune_tolerance: float = 1e-4,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of candidates via multi-start optimization.
Args:
acq_function: An AcquisitionFunction.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The number of candidates.
homotopy: Homotopy object that will make the necessary modifications to the
problem when calling `step()`.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for candidate generation.
final_options: Options for candidate generation in the last homotopy step.
batch_initial_conditions: A tensor to specify the initial conditions. Set
this if you do not want to use default initialization strategy.
post_processing_func: Post processing function (such as roundingor clamping)
that is applied before choosing the final candidate.
"""
candidate_list, acq_value_list = [], []
if q > 1:
base_X_pending = acq_function.X_pending
for _ in range(q):
candidates = batch_initial_conditions
homotopy.restart()
while not homotopy.should_stop:
candidates, acq_values = optimize_acqf(
q=1,
acq_function=acq_function,
bounds=bounds,
num_restarts=num_restarts,
batch_initial_conditions=candidates,
raw_samples=raw_samples,
fixed_features=fixed_features,
return_best_only=False,
options=options,
)
homotopy.step()
# Prune candidates
candidates = prune_candidates(
candidates=candidates.squeeze(1),
acq_values=acq_values,
prune_tolerance=prune_tolerance,
).unsqueeze(1)
# Optimize one more time with the final options
candidates, acq_values = optimize_acqf(
q=1,
acq_function=acq_function,
bounds=bounds,
num_restarts=num_restarts,
batch_initial_conditions=candidates,
return_best_only=False,
options=final_options,
)
# Post-process the candidates and grab the best candidate
if post_processing_func is not None:
candidates = post_processing_func(candidates)
acq_values = acq_function(candidates)
best = torch.argmax(acq_values.view(-1), dim=0)
candidate, acq_value = candidates[best], acq_values[best]
# Keep the new candidate and update the pending points
candidate_list.append(candidate)
acq_value_list.append(acq_value)
selected_candidates = torch.cat(candidate_list, dim=-2)
if q > 1:
acq_function.set_X_pending(
torch.cat([base_X_pending, selected_candidates], dim=-2)
if base_X_pending is not None
else selected_candidates
)
if q > 1: # Reset acq_function to previous X_pending state
acq_function.set_X_pending(base_X_pending)
homotopy.reset() # Reset the homotopy parameters
return selected_candidates, torch.stack(acq_value_list)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utility functions for constrained optimization.
"""
from __future__ import annotations
from functools import partial
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
from scipy.optimize import Bounds
from torch import Tensor
ScipyConstraintDict = Dict[
str, Union[str, Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]
]
NLC_TOL = -1e-6
def make_scipy_bounds(
X: Tensor,
lower_bounds: Optional[Union[float, Tensor]] = None,
upper_bounds: Optional[Union[float, Tensor]] = None,
) -> Optional[Bounds]:
r"""Creates a scipy Bounds object for optimziation
Args:
X: `... x d` tensor
lower_bounds: Lower bounds on each column (last dimension) of `X`. If
this is a single float, then all columns have the same bound.
upper_bounds: Lower bounds on each column (last dimension) of `X`. If
this is a single float, then all columns have the same bound.
Returns:
A scipy `Bounds` object if either lower_bounds or upper_bounds is not
None, and None otherwise.
Example:
>>> X = torch.rand(5, 2)
>>> scipy_bounds = make_scipy_bounds(X, 0.1, 0.8)
"""
if lower_bounds is None and upper_bounds is None:
return None
def _expand(bounds: Union[float, Tensor], X: Tensor, lower: bool) -> Tensor:
if bounds is None:
ebounds = torch.full_like(X, float("-inf" if lower else "inf"))
else:
if not torch.is_tensor(bounds):
bounds = torch.tensor(bounds)
ebounds = bounds.expand_as(X)
return _arrayify(ebounds).flatten()
lb = _expand(bounds=lower_bounds, X=X, lower=True)
ub = _expand(bounds=upper_bounds, X=X, lower=False)
return Bounds(lb=lb, ub=ub, keep_feasible=True)
def make_scipy_linear_constraints(
shapeX: torch.Size,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> List[ScipyConstraintDict]:
r"""Generate scipy constraints from torch representation.
Args:
shapeX: The shape of the torch.Tensor to optimize over (i.e. `(b) x q x d`)
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`, where
`indices` is a single-dimensional index tensor (long dtype) containing
indices into the last dimension of `X`, `coefficients` is a
single-dimensional tensor of coefficients of the same length, and
rhs is a scalar.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) == rhs` (with `indices`
and `coefficients` of the same form as in `inequality_constraints`).
Returns:
A list of dictionaries containing callables for constraint function
values and Jacobians and a string indicating the associated constraint
type ("eq", "ineq"), as expected by `scipy.minimize`.
This function assumes that constraints are the same for each input batch,
and broadcasts the constraints accordingly to the input batch shape. This
function does support constraints across elements of a q-batch if the
indices are a 2-d Tensor.
Example:
The following will enforce that `x[1] + 0.5 x[3] >= -0.1` for each `x`
in both elements of the q-batch, and each of the 3 t-batches:
>>> constraints = make_scipy_linear_constraints(
>>> torch.Size([3, 2, 4]),
>>> [(torch.tensor([1, 3]), torch.tensor([1.0, 0.5]), -0.1)],
>>> )
The following will enforce that `x[0, 1] + 0.5 x[1, 3] >= -0.1` where
x[0, :] is the first element of the q-batch and x[1, :] is the second
element of the q-batch, for each of the 3 t-batches:
>>> constraints = make_scipy_linear_constraints(
>>> torch.size([3, 2, 4])
>>> [(torch.tensor([[0, 1], [1, 3]), torch.tensor([1.0, 0.5]), -0.1)],
>>> )
"""
constraints = []
if inequality_constraints is not None:
for indcs, coeffs, rhs in inequality_constraints:
constraints += _make_linear_constraints(
indices=indcs, coefficients=coeffs, rhs=rhs, shapeX=shapeX, eq=False
)
if equality_constraints is not None:
for indcs, coeffs, rhs in equality_constraints:
constraints += _make_linear_constraints(
indices=indcs, coefficients=coeffs, rhs=rhs, shapeX=shapeX, eq=True
)
return constraints
def eval_lin_constraint(
x: np.ndarray, flat_idxr: List[int], coeffs: np.ndarray, rhs: float
) -> np.float64:
r"""Evaluate a single linear constraint.
Args:
x: The input array.
flat_idxr: The indices in `x` to consider.
coeffs: The coefficients corresponding to the indices.
rhs: The right-hand-side of the constraint.
Returns:
The evaluted constraint: `\sum_i (coeffs[i] * x[i]) - rhs`
"""
return np.sum(x[flat_idxr] * coeffs, -1) - rhs
def lin_constraint_jac(
x: np.ndarray, flat_idxr: List[int], coeffs: np.ndarray, n: int
) -> np.ndarray:
r"""Return the Jacobian associated with a linear constraint.
Args:
x: The input array.
flat_idxr: The indices for the elements of x that appear in the constraint.
coeffs: The coefficients corresponding to the indices.
n: number of elements
Returns:
The Jacobian.
"""
# TODO: Use sparse representation (not sure if scipy optim supports that)
jac = np.zeros(n)
jac[flat_idxr] = coeffs
return jac
def _arrayify(X: Tensor) -> np.ndarray:
r"""Convert a torch.Tensor (any dtype or device) to a numpy (double) array.
Args:
X: The input tensor.
Returns:
A numpy array of double dtype with the same shape and data as `X`.
"""
return X.cpu().detach().contiguous().double().clone().numpy()
def _validate_linear_constraints_shape_input(shapeX: torch.Size) -> torch.Size:
"""
Validate `shapeX` input to `_make_linear_constraints`.
Check that it has either 2 or 3 dimensions, and add a scalar batch
dimension if it is only 2d.
"""
if len(shapeX) not in (2, 3):
raise UnsupportedError(
f"`shapeX` must be `(b) x q x d` (at least two-dimensional). It is "
f"{shapeX}."
)
if len(shapeX) == 2:
shapeX = torch.Size([1, *shapeX])
return shapeX
def _validate_linear_constraints_indices_input(indices: Tensor, q: int, d: int) -> None:
if indices.dim() > 2:
raise UnsupportedError(
"Linear constraints supported only on individual candidates and "
"across q-batches, not across general batch shapes."
)
elif indices.dim() == 2:
if indices[:, 0].max() > q - 1:
raise RuntimeError(f"Index out of bounds for {q}-batch")
if indices[:, 1].max() > d - 1:
raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor")
elif indices.dim() == 1:
if indices.max() > d - 1:
raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor")
else:
raise ValueError("`indices` must be at least one-dimensional")
def _make_linear_constraints(
indices: Tensor,
coefficients: Tensor,
rhs: float,
shapeX: torch.Size,
eq: bool = False,
) -> List[ScipyConstraintDict]:
r"""Create linear constraints to be used by `scipy.minimize`.
Encodes constraints of the form
`\sum_i (coefficients[i] * X[..., indices[i]]) ? rhs`
where `?` can be designated either as `>=` by setting `eq=False`, or as
`=` by setting `eq=True`.
If indices is one-dimensional, the constraints are broadcasted across
all elements of the q-batch. If indices is two-dimensional, then
constraints are applied across elements of a q-batch. In either case,
constraints are created for all t-batches.
Args:
indices: A tensor of shape `c` or `c x 2`, where c is the number of terms
in the constraint. If single-dimensional, contains the indices of
the dimensions of the feature space that occur in the linear
constraint. If two-dimensional, contains pairs of indices of the
q-batch (0) and the feature space (1) that occur in the linear
constraint.
coefficients: A single-dimensional tensor of coefficients with the same
number of elements as `indices`.
rhs: The right hand side of the constraint.
shapeX: The shape of the torch tensor to construct the constraints for
(i.e. `(b) x q x d`). Must have two or three dimensions.
eq: If True, return an equality constraint, o/w return an inequality
constraint (indicated by "eq" / "ineq" value of the `type` key).
Returns:
A list of constraint dictionaries with the following keys
- "type": Indicates the type of the constraint ("eq" if `eq=True`, "ineq" o/w)
- "fun": A callable evaluating the constraint value on `x`, a flattened
version of the input tensor `X`, returning a scalar.
- "jac": A callable evaluating the constraint's Jacobian on `x`, a flattened
version of the input tensor `X`, returning a numpy array.
>>> shapeX = torch.Size([3, 5, 4])
>>> constraints = _make_linear_constraints(
... indices=torch.tensor([1., 2.]),
... coefficients=torch.tensor([-0.5, 1.3]),
... rhs=0.49,
... shapeX=shapeX,
... eq=True
... )
>>> len(constraints)
15
>>> constraints[0].keys()
dict_keys(['type', 'fun', 'jac'])
>>> x = np.arange(60).reshape(shapeX)
>>> constraints[0]["fun"](x)
1.61 # 1 * -0.5 + 2 * 1.3 - 0.49
>>> constraints[0]["jac"](x)
[0., -0.5, 1.3, 0., 0., ...]
>>> constraints[1]["fun"](x) #
4.81
"""
shapeX = _validate_linear_constraints_shape_input(shapeX)
b, q, d = shapeX
_validate_linear_constraints_indices_input(indices, q, d)
n = shapeX.numel()
constraints: List[ScipyConstraintDict] = []
coeffs = _arrayify(coefficients)
ctype = "eq" if eq else "ineq"
offsets = [q * d, d]
if indices.dim() == 2:
# indices has two dimensions (potential constraints across q-batch elements)
# rule is [i, j, k] is at
# i * offsets[0] + j * offsets[1] + k
for i in range(b):
list_ind = (idx.tolist() for idx in indices)
idxr = [i * offsets[0] + idx[0] * offsets[1] + idx[1] for idx in list_ind]
fun = partial(
eval_lin_constraint, flat_idxr=idxr, coeffs=coeffs, rhs=float(rhs)
)
jac = partial(lin_constraint_jac, flat_idxr=idxr, coeffs=coeffs, n=n)
constraints.append({"type": ctype, "fun": fun, "jac": jac})
elif indices.dim() == 1:
# indices is one-dim - broadcast constraints across q-batches and t-batches
for i in range(b):
for j in range(q):
idxr = (i * offsets[0] + j * offsets[1] + indices).tolist()
fun = partial(
eval_lin_constraint, flat_idxr=idxr, coeffs=coeffs, rhs=float(rhs)
)
jac = partial(lin_constraint_jac, flat_idxr=idxr, coeffs=coeffs, n=n)
constraints.append({"type": ctype, "fun": fun, "jac": jac})
return constraints
def _generate_unfixed_nonlin_constraints(
constraints: Optional[List[Callable[[Tensor], Tensor]]],
fixed_features: Dict[int, float],
dimension: int,
) -> Optional[List[Callable[[Tensor], Tensor]]]:
"""Given a dictionary of fixed features, returns a list of callables for
nonlinear inequality constraints expecting only a tensor with the non-fixed
features as input.
"""
if not constraints:
return constraints
selector = []
idx_X, idx_f = 0, dimension - len(fixed_features)
for i in range(dimension):
if i in fixed_features.keys():
selector.append(idx_f)
idx_f += 1
else:
selector.append(idx_X)
idx_X += 1
values = torch.tensor(list(fixed_features.values()), dtype=torch.double)
def _wrap_nonlin_constraint(
constraint: Callable[[Tensor], Tensor]
) -> Callable[[Tensor], Tensor]:
def new_nonlin_constraint(X: Tensor) -> Tensor:
ivalues = values.to(X).expand(*X.shape[:-1], len(fixed_features))
X_perm = torch.cat([X, ivalues], dim=-1)
return constraint(X_perm[..., selector])
return new_nonlin_constraint
return [
_wrap_nonlin_constraint(constraint=constraint) for constraint in constraints
]
def _generate_unfixed_lin_constraints(
constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
fixed_features: Dict[int, float],
dimension: int,
eq: bool,
) -> Optional[List[Tuple[Tensor, Tensor, float]]]:
# If constraints is None or an empty list, then return itself
if not constraints:
return constraints
# replace_index generates the new indices for the unfixed dimensions
# after eliminating the fixed dimensions.
# Example: dimension = 5, ff.keys() = [1, 3], replace_index = {0: 0, 2: 1, 4: 2}
unfixed_keys = sorted(set(range(dimension)) - set(fixed_features))
unfixed_keys = torch.tensor(unfixed_keys).to(constraints[0][0])
replace_index = torch.arange(dimension - len(fixed_features)).to(constraints[0][0])
new_constraints = []
# parse constraints one-by-one
for constraint_id, (indices, coefficients, rhs) in enumerate(constraints):
new_rhs = rhs
new_indices = []
new_coefficients = []
# the following unsqueeze is done to facilitate a simpler for-loop.
indices_2dim = indices if indices.ndim == 2 else indices.unsqueeze(-1)
for coefficient, index in zip(coefficients, indices_2dim):
ffval_or_None = fixed_features.get(index[-1].item())
# if ffval_or_None is None, then the index is not fixed
if ffval_or_None is None:
new_indices.append(index)
new_coefficients.append(coefficient)
# otherwise, we "remove" the constraints corresponding to that index
else:
new_rhs = new_rhs - coefficient.item() * ffval_or_None
# all indices were fixed, so the constraint is gone.
if len(new_indices) == 0:
if (eq and new_rhs != 0) or (not eq and new_rhs > 0):
prefix = "Eq" if eq else "Ineq"
raise CandidateGenerationError(
f"{prefix}uality constraint {constraint_id} not met "
"with fixed_features."
)
else:
# However, one key transformation has to be noted.
# new_indices is with respect to the older (fuller) domain, and so it will
# have to be converted using replace_index.
new_indices = torch.stack(new_indices, dim=0)
# generate new index location after the removal of fixed_features indices
new_indices_dim_d = new_indices[:, -1].unsqueeze(-1)
new_indices_dim_d = replace_index[
torch.nonzero(new_indices_dim_d == unfixed_keys, as_tuple=True)[1]
]
new_indices[:, -1] = new_indices_dim_d
# squeeze(-1) is a no-op if dim -1 is not singleton
new_indices.squeeze_(-1)
# convert new_coefficients to Tensor
new_coefficients = torch.stack(new_coefficients)
new_constraints.append((new_indices, new_coefficients, new_rhs))
return new_constraints
def _make_f_and_grad_nonlinear_inequality_constraints(
f_np_wrapper: Callable, nlc: Callable
) -> Tuple[Callable[[Tensor], Tensor], Callable[[Tensor], Tensor]]:
"""
Create callables for objective + grad for the nonlinear inequality constraints.
The Scipy interface requires specifying separate callables and we use caching to
avoid evaluating the same input twice. This caching onlh works if
the returned functions are evaluated on the same input in immediate
sequence (i.e., calling `f_obj(X_1)`, `f_grad(X_1)` will result in a
single forward pass, while `f_obj(X_1)`, `f_grad(X_2)`, `f_obj(X_1)`
will result in three forward passes).
"""
def f_obj_and_grad(x):
obj, grad = f_np_wrapper(x, f=nlc)
return obj, grad
cache = {"X": None, "obj": None, "grad": None}
def f_obj(X):
X_c = cache["X"]
if X_c is None or not np.array_equal(X_c, X):
cache["X"] = X.copy()
cache["obj"], cache["grad"] = f_obj_and_grad(X)
return cache["obj"]
def f_grad(X):
X_c = cache["X"]
if X_c is None or not np.array_equal(X_c, X):
cache["X"] = X.copy()
cache["obj"], cache["grad"] = f_obj_and_grad(X)
return cache["grad"]
return f_obj, f_grad
def make_scipy_nonlinear_inequality_constraints(
nonlinear_inequality_constraints: List[Callable],
f_np_wrapper: Callable,
x0: Tensor,
) -> List[Dict]:
r"""Generate Scipy nonlinear inequality constraints from callables.
Args:
nonlinear_inequality_constraints: List of callables for the nonlinear
inequality constraints. Each callable represents a constraint of the
form >= 0 and takes a torch tensor of size (p x q x dim) and returns a
torch tensor of size (p x q).
f_np_wrapper: A wrapper function that given a constraint evaluates the value
and gradient (using autograd) of a numpy input and returns both the
objective and the gradient.
x0: The starting point for SLSQP. We return this starting point in (rare)
cases where SLSQP fails and thus require it to be feasible.
Returns:
A list of dictionaries containing callables for constraint function
values and Jacobians and a string indicating the associated constraint
type ("eq", "ineq"), as expected by `scipy.minimize`.
"""
if not isinstance(nonlinear_inequality_constraints, list):
raise ValueError(
"`nonlinear_inequality_constraints` must be a list of callables, "
f"got {type(nonlinear_inequality_constraints)}."
)
scipy_nonlinear_inequality_constraints = []
for nlc in nonlinear_inequality_constraints:
if _arrayify(nlc(x0)).item() < NLC_TOL:
raise ValueError(
"`batch_initial_conditions` must satisfy the non-linear inequality "
"constraints."
)
f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints(
f_np_wrapper=f_np_wrapper, nlc=nlc
)
scipy_nonlinear_inequality_constraints.append(
{
"type": "ineq",
"fun": f_obj,
"jac": f_grad,
}
)
return scipy_nonlinear_inequality_constraints
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Utilities for fitting and manipulating models."""
from __future__ import annotations
from re import Pattern
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
from warnings import warn
import torch
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.gpytorch import GPyTorchModel
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood
from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, TensorDataset
class TorchAttr(NamedTuple):
shape: torch.Size
dtype: torch.dtype
device: torch.device
def _get_extra_mll_args(
mll: MarginalLogLikelihood,
) -> Union[List[Tensor], List[List[Tensor]]]:
r"""Obtain extra arguments for MarginalLogLikelihood objects.
Get extra arguments (beyond the model output and training targets) required
for the particular type of MarginalLogLikelihood for a forward pass.
Args:
mll: The MarginalLogLikelihood module.
Returns:
Extra arguments for the MarginalLogLikelihood.
Returns an empty list if the mll type is unknown.
"""
warn("`_get_extra_mll_args` is marked for deprecation.", DeprecationWarning)
if isinstance(mll, ExactMarginalLogLikelihood):
return list(mll.model.train_inputs)
elif isinstance(mll, SumMarginalLogLikelihood):
return [list(x) for x in mll.model.train_inputs]
return []
def get_data_loader(
model: GPyTorchModel, batch_size: int = 1024, **kwargs: Any
) -> DataLoader:
dataset = TensorDataset(*model.train_inputs, model.train_targets)
return DataLoader(
dataset=dataset, batch_size=min(batch_size, len(model.train_targets)), **kwargs
)
def get_parameters(
module: Module,
requires_grad: Optional[bool] = None,
name_filter: Optional[Callable[[str], bool]] = None,
) -> Dict[str, Tensor]:
r"""Helper method for obtaining a module's parameters and their respective ranges.
Args:
module: The target module from which parameters are to be extracted.
requires_grad: Optional Boolean used to filter parameters based on whether
or not their require_grad attribute matches the user provided value.
name_filter: Optional Boolean function used to filter parameters by name.
Returns:
A dictionary of parameters.
"""
parameters = {}
for name, param in module.named_parameters():
if requires_grad is not None and param.requires_grad != requires_grad:
continue
if name_filter and not name_filter(name):
continue
parameters[name] = param
return parameters
def get_parameters_and_bounds(
module: Module,
requires_grad: Optional[bool] = None,
name_filter: Optional[Callable[[str], bool]] = None,
default_bounds: Tuple[float, float] = (-float("inf"), float("inf")),
) -> Tuple[Dict[str, Tensor], Dict[str, Tuple[Optional[float], Optional[float]]]]:
r"""Helper method for obtaining a module's parameters and their respective ranges.
Args:
module: The target module from which parameters are to be extracted.
name_filter: Optional Boolean function used to filter parameters by name.
requires_grad: Optional Boolean used to filter parameters based on whether
or not their require_grad attribute matches the user provided value.
default_bounds: Default lower and upper bounds for constrained parameters
with `None` typed bounds.
Returns:
A dictionary of parameters and a dictionary of parameter bounds.
"""
if hasattr(module, "named_parameters_and_constraints"):
bounds = {}
params = {}
for name, param, constraint in module.named_parameters_and_constraints():
if (requires_grad is None or (param.requires_grad == requires_grad)) and (
name_filter is None or name_filter(name)
):
params[name] = param
if constraint is None:
continue
bounds[name] = tuple(
default if bound is None else constraint.inverse_transform(bound)
for (bound, default) in zip(constraint, default_bounds)
)
return params, bounds
params = get_parameters(
module, requires_grad=requires_grad, name_filter=name_filter
)
return params, {}
def get_name_filter(
patterns: Iterator[Union[Pattern, str]]
) -> Callable[[Union[str, Tuple[str, Any, ...]]], bool]:
r"""Returns a binary function that filters strings (or iterables whose first
element is a string) according to a bank of excluded patterns. Typically, used
in conjunction with generators such as `module.named_parameters()`.
Args:
patterns: A collection of regular expressions or strings that
define the set of names to be excluded.
Returns:
A binary function indicating whether or not an item should be filtered.
"""
names = set()
_patterns = set()
for pattern in patterns:
if isinstance(pattern, str):
names.add(pattern)
elif isinstance(pattern, Pattern):
_patterns.add(pattern)
else:
raise TypeError(
"Expected `patterns` to contain `str` or `re.Pattern` typed elements, "
f"but found {type(pattern)}."
)
def name_filter(item: Union[str, Tuple[str, Any, ...]]) -> bool:
name = item if isinstance(item, str) else next(iter(item))
if name in names:
return False
for pattern in _patterns:
if pattern.search(name):
return False
return True
return name_filter
def sample_all_priors(model: GPyTorchModel, max_retries: int = 100) -> None:
r"""Sample from hyperparameter priors (in-place).
Args:
model: A GPyTorchModel.
"""
for _, module, prior, closure, setting_closure in model.named_priors():
if setting_closure is None:
raise RuntimeError(
"Must provide inverse transform to be able to sample from prior."
)
for i in range(max_retries):
try:
setting_closure(module, prior.sample(closure(module).shape))
break
except NotImplementedError:
warn(
f"`rsample` not implemented for {type(prior)}. Skipping.",
BotorchWarning,
)
break
except RuntimeError as e:
if "out of bounds of its current constraints" in str(e):
if i == max_retries - 1:
raise RuntimeError(
"Failed to sample a feasible parameter value "
f"from the prior after {max_retries} attempts."
)
else:
raise e
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Utilities for interfacing Numpy and Torch."""
from __future__ import annotations
from itertools import tee
from typing import Callable, Dict, Iterator, Optional, Tuple, Union
import numpy as np
import torch
from botorch.utils.types import NoneType
from numpy import ndarray
from torch import Tensor
torch_to_numpy_dtype_dict = {
torch.bool: bool,
torch.uint8: np.uint8,
torch.int8: np.int8,
torch.int16: np.int16,
torch.int32: np.int32,
torch.int64: np.int64,
torch.float16: np.float16,
torch.float32: np.float32,
torch.float64: np.float64,
torch.complex64: np.complex64,
torch.complex128: np.complex128,
}
def as_ndarray(
values: Tensor, dtype: Optional[np.dtype] = None, inplace: bool = True
) -> ndarray:
r"""Helper for going from torch.Tensor to numpy.ndarray.
Args:
values: Tensor to be converted to ndarray.
dtype: Optional numpy.dtype for the converted tensor.
inplace: Boolean indicating whether memory should be shared if possible.
Returns:
An ndarray with the same data as `values`.
"""
with torch.no_grad():
out = values.cpu() # maybe transfer to cpu
# Determine whether or not to `clone`
if (
# cond 1: are we not in `inplace` mode?
not inplace
# cond 2: did we already copy when calling `cpu` above?
and out.device == values.device
# cond 3: will we copy when calling `astype` below?
and (dtype is None or out.dtype == torch_to_numpy_dtype_dict[dtype])
):
out = out.clone()
# Convert to ndarray and maybe cast to `dtype`
out = out.numpy()
return out.astype(dtype, copy=False)
def get_tensors_as_ndarray_1d(
tensors: Union[Iterator[Tensor], Dict[str, Tensor]],
out: Optional[ndarray] = None,
dtype: Optional[Union[np.dtype, str]] = None,
as_array: Callable[[Tensor], ndarray] = as_ndarray,
) -> ndarray:
# Create a pair of iterators, one for setup and one for data transfer
named_tensors_iter, named_tensors_iter2 = tee(
iter(tensors.items()) if isinstance(tensors, dict) else enumerate(tensors), 2
)
# Use `named_tensors_iter` to get size of `out` and `dtype` when None
try:
name, tnsr = next(named_tensors_iter)
except StopIteration:
raise RuntimeError(f"Argument `tensors` with type {type(tensors)} is empty.")
size = tnsr.numel() + sum(tnsr.numel() for _, tnsr in named_tensors_iter)
dtype = torch_to_numpy_dtype_dict[tnsr.dtype] if dtype is None else dtype
# Preallocate or validate `out`
if out is None: # use first tensor as a reference when `dtype` is None
out = np.empty([size], dtype=dtype)
elif out.ndim != 1:
raise ValueError(f"Expected a vector for `out`, but out.shape={out.shape}.")
elif out.size != size:
raise ValueError(
f"Size of `parameters` ({size}) does not match size of `out` ({out.size})."
)
# Use `named_tensors_iter2` to transfer data from `tensors` to `out`
index = 0
for name, tnsr in named_tensors_iter2:
try:
size = tnsr.numel()
out[index : index + size] = as_array(tnsr.view(-1))
index += size
except Exception as e:
raise RuntimeError(
"`get_tensors_as_ndarray_1d` failed while copying values from "
f"tensor {name}; rethrowing original exception."
) from e
return out
def set_tensors_from_ndarray_1d(
tensors: Union[Iterator[Tensor], Dict[str, Tensor]],
array: ndarray,
as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor,
) -> None:
r"""Sets the values of one more tensors based off of a vector of assignments."""
named_tensors_iter = (
iter(tensors.items()) if isinstance(tensors, dict) else enumerate(tensors)
)
with torch.no_grad():
index = 0
for name, tnsr in named_tensors_iter:
try:
size = tnsr.numel()
vals = array[index : index + size] if tnsr.ndim else array[index]
tnsr.copy_(as_tensor(vals).to(tnsr).view(tnsr.shape).to(tnsr))
index += size
except Exception as e:
raise RuntimeError(
"`set_tensors_from_ndarray_1d` failed while copying values to "
f"tensor {name}; rethrowing original exception."
) from e
def get_bounds_as_ndarray(
parameters: Dict[str, Tensor],
bounds: Dict[
str, Tuple[Union[float, Tensor, NoneType], Union[float, Tensor, NoneType]]
],
) -> Optional[np.ndarray]:
r"""Helper method for converting bounds into an ndarray.
Args:
parameters: A dictionary of parameters.
bounds: A dictionary of (optional) lower and upper bounds.
Returns:
An ndarray of bounds.
"""
inf = float("inf")
full_size = sum(param.numel() for param in parameters.values())
out = np.full((full_size, 2), (-inf, inf))
index = 0
for name, param in parameters.items():
size = param.numel()
if name in bounds:
lower, upper = bounds[name]
lower = -inf if lower is None else lower
upper = inf if upper is None else upper
if isinstance(lower, Tensor):
lower = lower.cpu()
if isinstance(upper, Tensor):
upper = upper.cpu()
out[index : index + size, 0] = lower
out[index : index + size, 1] = upper
index = index + size
# If all bounds are +/- inf, return None.
if np.isinf(out).all():
out = None
return out
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import time
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
from botorch.exceptions.errors import OptimizationTimeoutError
from scipy import optimize
def minimize_with_timeout(
fun: Callable[[np.ndarray, *Any], float],
x0: np.ndarray,
args: Tuple[Any, ...] = (),
method: Optional[str] = None,
jac: Optional[Union[str, Callable, bool]] = None,
hess: Optional[Union[str, Callable, optimize.HessianUpdateStrategy]] = None,
hessp: Optional[Callable] = None,
bounds: Optional[Union[Sequence[Tuple[float, float]], optimize.Bounds]] = None,
constraints=(), # Typing this properly is a s**t job
tol: Optional[float] = None,
callback: Optional[Callable] = None,
options: Optional[Dict[str, Any]] = None,
timeout_sec: Optional[float] = None,
) -> optimize.OptimizeResult:
r"""Wrapper around scipy.optimize.minimize to support timeout.
This method calls scipy.optimize.minimize with all arguments forwarded
verbatim. The only difference is that if provided a `timeout_sec` argument,
it will automatically stop the optimziation after the timeout is reached.
Internally, this is achieved by automatically constructing a wrapper callback
method that is injected to the scipy.optimize.minimize call and that keeps
track of the runtime and the optimization variables at the current iteration.
"""
if timeout_sec:
start_time = time.monotonic()
callback_data = {"num_iterations": 0} # update from withing callback below
def timeout_callback(xk: np.ndarray) -> bool:
runtime = time.monotonic() - start_time
callback_data["num_iterations"] += 1
if runtime > timeout_sec:
raise OptimizationTimeoutError(current_x=xk, runtime=runtime)
return False
if callback is None:
wrapped_callback = timeout_callback
elif callable(method):
raise NotImplementedError(
"Custom callable not supported for `method` argument."
)
elif method == "trust-constr": # special signature
def wrapped_callback(
xk: np.ndarray, state: optimize.OptimizeResult
) -> bool:
# order here is important to make sure base callback gets executed
return callback(xk, state) or timeout_callback(xk=xk)
else:
def wrapped_callback(xk: np.ndarray) -> None:
timeout_callback(xk=xk)
callback(xk)
else:
wrapped_callback = callback
try:
return optimize.minimize(
fun=fun,
x0=x0,
args=args,
method=method,
jac=jac,
hess=hess,
hessp=hessp,
bounds=bounds,
constraints=constraints,
tol=tol,
callback=wrapped_callback,
options=options,
)
except OptimizationTimeoutError as e:
msg = f"Optimization timed out after {e.runtime} seconds."
current_fun, *_ = fun(e.current_x, *args)
return optimize.OptimizeResult(
fun=current_fun,
x=e.current_x,
nit=callback_data["num_iterations"],
success=False, # same as when maxiter is reached
status=1, # same as when L-BFGS-B reaches maxiter
message=msg,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.optim.utils.acquisition_utils import (
columnwise_clamp,
fix_features,
get_X_baseline,
)
from botorch.optim.utils.common import (
_filter_kwargs,
_handle_numerical_errors,
_warning_handler_template,
)
from botorch.optim.utils.model_utils import (
_get_extra_mll_args,
get_data_loader,
get_name_filter,
get_parameters,
get_parameters_and_bounds,
sample_all_priors,
TorchAttr,
)
from botorch.optim.utils.numpy_utils import (
as_ndarray,
get_bounds_as_ndarray,
get_tensors_as_ndarray_1d,
set_tensors_from_ndarray_1d,
)
from botorch.optim.utils.timeout import minimize_with_timeout
__all__ = [
"_filter_kwargs",
"_get_extra_mll_args",
"_handle_numerical_errors",
"_warning_handler_template",
"as_ndarray",
"columnwise_clamp",
"fix_features",
"get_name_filter",
"get_bounds_as_ndarray",
"get_data_loader",
"get_parameters",
"get_parameters_and_bounds",
"get_tensors_as_ndarray_1d",
"get_X_baseline",
"minimize_with_timeout",
"sample_all_priors",
"set_tensors_from_ndarray_1d",
"TorchAttr",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Utilities for maximizing acquisition functions."""
from __future__ import annotations
from typing import Dict, Optional, Union
from warnings import warn
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.exceptions.errors import BotorchError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.gpytorch import ModelListGPyTorchModel
from torch import Tensor
def columnwise_clamp(
X: Tensor,
lower: Optional[Union[float, Tensor]] = None,
upper: Optional[Union[float, Tensor]] = None,
raise_on_violation: bool = False,
) -> Tensor:
r"""Clamp values of a Tensor in column-wise fashion (with support for t-batches).
This function is useful in conjunction with optimizers from the torch.optim
package, which don't natively handle constraints. If you apply this after
a gradient step you can be fancy and call it "projected gradient descent".
This funtion is also useful for post-processing candidates generated by the
scipy optimizer that satisfy bounds only up to numerical accuracy.
Args:
X: The `b x n x d` input tensor. If 2-dimensional, `b` is assumed to be 1.
lower: The column-wise lower bounds. If scalar, apply bound to all columns.
upper: The column-wise upper bounds. If scalar, apply bound to all columns.
raise_on_violation: If `True`, raise an exception when the elments in `X`
are out of the specified bounds (up to numerical accuracy). This is
useful for post-processing candidates generated by optimizers that
satisfy imposed bounds only up to numerical accuracy.
Returns:
The clamped tensor.
"""
if lower is None and upper is None:
return X
if lower is not None:
lower = torch.as_tensor(lower).expand_as(X).to(X)
if upper is not None:
upper = torch.as_tensor(upper).expand_as(X).to(X)
if lower is not None and (lower > upper).any():
raise ValueError("Lower bounds cannot exceed upper bounds.")
out = X.clamp(lower, upper)
if raise_on_violation and not X.allclose(out):
raise BotorchError("Original value(s) are out of bounds.")
return out
def fix_features(
X: Tensor, fixed_features: Optional[Dict[int, Optional[float]]] = None
) -> Tensor:
r"""Fix feature values in a Tensor.
The fixed features will have zero gradient in downstream calculations.
Args:
X: input Tensor with shape `... x p`, where `p` is the number of features
fixed_features: A dictionary with keys as column indices and values
equal to what the feature should be set to in `X`. If the value is
None, that column is just considered fixed. Keys should be in the
range `[0, p - 1]`.
Returns:
The tensor X with fixed features.
"""
if fixed_features is None:
return X
columns = list(X.unbind(dim=-1))
for index, value in fixed_features.items():
if value is None:
columns[index] = columns[index].detach()
else:
columns[index] = torch.full_like(columns[index], value)
return torch.stack(columns, dim=-1)
def get_X_baseline(acq_function: AcquisitionFunction) -> Optional[Tensor]:
r"""Extract X_baseline from an acquisition function.
This tries to find the baseline set of points. First, this checks if the
acquisition function has an `X_baseline` attribute. If it does not,
then this method attempts to use the model's `train_inputs` as `X_baseline`.
Args:
acq_function: The acquisition function.
Returns
An optional `n x d`-dim tensor of baseline points. This is None if no
baseline points are found.
"""
try:
X = acq_function.X_baseline
# if there are no baseline points, use training points
if X.shape[0] == 0:
raise BotorchError
except (BotorchError, AttributeError):
try:
# for entropy MOO methods
model = acq_function.mo_model
except AttributeError:
try:
# some acquisition functions do not have a model attribute
# e.g. FixedFeatureAcquisitionFunction
model = acq_function.model
except AttributeError:
warn("Failed to extract X_baseline.", BotorchWarning)
return
try:
# Make sure we get the original train inputs.
m = model.models[0] if isinstance(model, ModelListGPyTorchModel) else model
if m._has_transformed_inputs:
X = m._original_train_inputs
else:
X = m.train_inputs[0]
except (BotorchError, AttributeError):
warn("Failed to extract X_baseline.", BotorchWarning)
return
# just use one batch
while X.ndim > 2:
X = X[0]
return X
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""General-purpose optimization utilities."""
from __future__ import annotations
from inspect import signature
from logging import debug as logging_debug
from typing import Any, Callable, Optional, Tuple
from warnings import warn, warn_explicit, WarningMessage
import numpy as np
from linear_operator.utils.errors import NanError, NotPSDError
def _filter_kwargs(function: Callable, **kwargs: Any) -> Any:
r"""Filter out kwargs that are not applicable for a given function.
Return a copy of given kwargs dict with only the required kwargs."""
allowed_params = signature(function).parameters
removed = {k for k in kwargs.keys() if k not in allowed_params}
if len(removed) > 0:
fn_descriptor = (
f" for function {function.__name__}"
if hasattr(function, "__name__")
else ""
)
warn(
f"Keyword arguments {list(removed)} will be ignored because they are"
f" not allowed parameters{fn_descriptor}. Allowed "
f"parameters are {list(allowed_params.keys())}."
)
return {k: v for k, v in kwargs.items() if k not in removed}
def _handle_numerical_errors(
error: RuntimeError, x: np.ndarray, dtype: Optional[np.dtype] = None
) -> Tuple[np.ndarray, np.ndarray]:
if isinstance(error, NotPSDError):
raise error
error_message = error.args[0] if len(error.args) > 0 else ""
if (
isinstance(error, NanError)
or "singular" in error_message # old pytorch message
or "input is not positive-definite" in error_message # since pytorch #63864
):
_dtype = x.dtype if dtype is None else dtype
return np.full((), "nan", dtype=_dtype), np.full_like(x, "nan", dtype=_dtype)
raise error # pragma: nocover
def _warning_handler_template(
w: WarningMessage,
debug: Optional[Callable[[WarningMessage], bool]] = None,
rethrow: Optional[Callable[[WarningMessage], bool]] = None,
) -> bool:
r"""Helper for making basic warning handlers. Typically used with functools.partial.
Args:
w: The WarningMessage to be resolved and filtered out or returned unresolved.
debug: Optional callable used to specify that a warning should be
resolved as a logging statement at the DEBUG level.
rethrow: Optional callable used to specify that a warning should be
resolved by rethrowing the warning.
Returns:
Boolean indicating whether or not the warning message was resolved.
"""
if debug and debug(w):
logging_debug(str(w.message))
return True
if rethrow and rethrow(w):
warn_explicit(str(w.message), w.category, w.filename, w.lineno)
return True
return False
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.optim.closures.core import (
ForwardBackwardClosure,
NdarrayOptimizationClosure,
)
from botorch.optim.closures.model_closures import (
get_loss_closure,
get_loss_closure_with_grads,
)
__all__ = [
"ForwardBackwardClosure",
"get_loss_closure",
"get_loss_closure_with_grads",
"NdarrayOptimizationClosure",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Core methods for building closures in torch and interfacing with numpy."""
from __future__ import annotations
from functools import partial
from typing import Any, Callable, Dict, Optional, Sequence, Tuple
import torch
from botorch.optim.utils import (
_handle_numerical_errors,
get_tensors_as_ndarray_1d,
set_tensors_from_ndarray_1d,
)
from botorch.optim.utils.numpy_utils import as_ndarray
from botorch.utils.context_managers import zero_grad_ctx
from numpy import float64 as np_float64, full as np_full, ndarray, zeros as np_zeros
from torch import Tensor
class ForwardBackwardClosure:
r"""Wrapper for fused forward and backward closures."""
def __init__(
self,
forward: Callable[[], Tensor],
parameters: Dict[str, Tensor],
backward: Callable[[Tensor], None] = Tensor.backward,
reducer: Optional[Callable[[Tensor], Tensor]] = torch.sum,
callback: Optional[Callable[[Tensor, Sequence[Optional[Tensor]]], None]] = None,
context_manager: Callable = None, # pyre-ignore [9]
) -> None:
r"""Initializes a ForwardBackwardClosure instance.
Args:
closure: Callable that returns a tensor.
parameters: A dictionary of tensors whose `grad` fields are to be returned.
backward: Callable that takes the (reduced) output of `forward` and sets the
`grad` attributes of tensors in `parameters`.
reducer: Optional callable used to reduce the output of the forward pass.
callback: Optional callable that takes the reduced output of `forward` and
the gradients of `parameters` as positional arguments.
context_manager: A ContextManager used to wrap each forward-backward call.
When passed as `None`, `context_manager` defaults to a `zero_grad_ctx`
that zeroes the gradients of `parameters` upon entry.
"""
if context_manager is None:
context_manager = partial(zero_grad_ctx, parameters)
self.forward = forward
self.backward = backward
self.parameters = parameters
self.reducer = reducer
self.callback = callback
self.context_manager = context_manager
def __call__(self, **kwargs: Any) -> Tuple[Tensor, Tuple[Optional[Tensor], ...]]:
with self.context_manager():
values = self.forward(**kwargs)
value = values if self.reducer is None else self.reducer(values)
self.backward(value)
grads = tuple(param.grad for param in self.parameters.values())
if self.callback:
self.callback(value, grads)
return value, grads
class NdarrayOptimizationClosure:
r"""Adds stateful behavior and a numpy.ndarray-typed API to a closure with an
expected return type Tuple[Tensor, Union[Tensor, Sequence[Optional[Tensor]]]]."""
def __init__(
self,
closure: Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]],
parameters: Dict[str, Tensor],
as_array: Callable[[Tensor], ndarray] = None, # pyre-ignore [9]
as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor,
get_state: Callable[[], ndarray] = None, # pyre-ignore [9]
set_state: Callable[[ndarray], None] = None, # pyre-ignore [9]
fill_value: float = 0.0,
persistent: bool = True,
) -> None:
r"""Initializes a NdarrayOptimizationClosure instance.
Args:
closure: A ForwardBackwardClosure instance.
parameters: A dictionary of tensors representing the closure's state.
Expected to correspond with the first `len(parameters)` optional
gradient tensors returned by `closure`.
as_array: Callable used to convert tensors to ndarrays.
as_tensor: Callable used to convert ndarrays to tensors.
get_state: Callable that returns the closure's state as an ndarray. When
passed as `None`, defaults to calling `get_tensors_as_ndarray_1d`
on `closure.parameters` while passing `as_array` (if given by the user).
set_state: Callable that takes a 1-dimensional ndarray and sets the
closure's state. When passed as `None`, `set_state` defaults to
calling `set_tensors_from_ndarray_1d` with `closure.parameters` and
a given ndarray while passing `as_tensor`.
fill_value: Fill value for parameters whose gradients are None. In most
cases, `fill_value` should either be zero or NaN.
persistent: Boolean specifying whether an ndarray should be retained
as a persistent buffer for gradients.
"""
if get_state is None:
# Note: Numpy supports copying data between ndarrays with different dtypes.
# Hence, our default behavior need not coerce the ndarray representations
# of tensors in `parameters` to float64 when copying over data.
_as_array = as_ndarray if as_array is None else as_array
get_state = partial(
get_tensors_as_ndarray_1d,
tensors=parameters,
dtype=np_float64,
as_array=_as_array,
)
if as_array is None: # per the note, do this after resolving `get_state`
as_array = partial(as_ndarray, dtype=np_float64)
if set_state is None:
set_state = partial(
set_tensors_from_ndarray_1d, parameters, as_tensor=as_tensor
)
self.closure = closure
self.parameters = parameters
self.as_array = as_ndarray
self.as_tensor = as_tensor
self._get_state = get_state
self._set_state = set_state
self.fill_value = fill_value
self.persistent = persistent
self._gradient_ndarray: Optional[ndarray] = None
def __call__(
self, state: Optional[ndarray] = None, **kwargs: Any
) -> Tuple[ndarray, ndarray]:
if state is not None:
self.state = state
try:
value_tensor, grad_tensors = self.closure(**kwargs)
value = self.as_array(value_tensor)
grads = self._get_gradient_ndarray(fill_value=self.fill_value)
index = 0
for param, grad in zip(self.parameters.values(), grad_tensors):
size = param.numel()
if grad is not None:
grads[index : index + size] = self.as_array(grad.view(-1))
index += size
except RuntimeError as e:
value, grads = _handle_numerical_errors(e, x=self.state, dtype=np_float64)
return value, grads
@property
def state(self) -> ndarray:
return self._get_state()
@state.setter
def state(self, state: ndarray) -> None:
self._set_state(state)
def _get_gradient_ndarray(self, fill_value: Optional[float] = None) -> ndarray:
if self.persistent and self._gradient_ndarray is not None:
if fill_value is not None:
self._gradient_ndarray.fill(fill_value)
return self._gradient_ndarray
size = sum(param.numel() for param in self.parameters.values())
array = (
np_zeros(size, dtype=np_float64)
if fill_value is None or fill_value == 0.0
else np_full(size, fill_value, dtype=np_float64)
)
if self.persistent:
self._gradient_ndarray = array
return array
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Utilities for building model-based closures."""
from __future__ import annotations
from itertools import chain, repeat
from typing import Any, Callable, Dict, Optional, Sequence, Tuple
from botorch.optim.closures.core import ForwardBackwardClosure
from botorch.utils.dispatcher import Dispatcher, type_bypassing_encoder
from botorch.utils.types import NoneType
from gpytorch.mlls import (
ExactMarginalLogLikelihood,
MarginalLogLikelihood,
SumMarginalLogLikelihood,
)
from torch import Tensor
from torch.utils.data import DataLoader
GetLossClosure = Dispatcher("get_loss_closure", encoder=type_bypassing_encoder)
GetLossClosureWithGrads = Dispatcher(
"get_loss_closure_with_grads", encoder=type_bypassing_encoder
)
def get_loss_closure(
mll: MarginalLogLikelihood,
data_loader: Optional[DataLoader] = None,
**kwargs: Any,
) -> Callable[[], Tensor]:
r"""Public API for GetLossClosure dispatcher.
This method, and the dispatcher that powers it, acts as a clearing house
for factory functions that define how `mll` is evaluated.
Users may specify custom evaluation routines by registering a factory function
with GetLossClosure. These factories should be registered using the type signature
`Type[MarginalLogLikeLihood], Type[Likelihood], Type[Model], Type[DataLoader]`.
The final argument, Type[DataLoader], is optional. Evaluation routines that obtain
training data from, e.g., `mll.model` should register this argument as `type(None)`.
Args:
mll: A MarginalLogLikelihood instance whose negative defines the loss.
data_loader: An optional DataLoader instance for cases where training
data is passed in rather than obtained from `mll.model`.
Returns:
A closure that takes zero positional arguments and returns the negated
value of `mll`.
"""
return GetLossClosure(
mll, type(mll.likelihood), type(mll.model), data_loader, **kwargs
)
def get_loss_closure_with_grads(
mll: MarginalLogLikelihood,
parameters: Dict[str, Tensor],
data_loader: Optional[DataLoader] = None,
backward: Callable[[Tensor], None] = Tensor.backward,
reducer: Optional[Callable[[Tensor], Tensor]] = Tensor.sum,
context_manager: Optional[Callable] = None,
**kwargs: Any,
) -> Callable[[], Tuple[Tensor, Tuple[Tensor, ...]]]:
r"""Public API for GetLossClosureWithGrads dispatcher.
In most cases, this method simply adds a backward pass to a loss closure obtained by
calling `get_loss_closure`. For further details, see `get_loss_closure`.
Args:
mll: A MarginalLogLikelihood instance whose negative defines the loss.
parameters: A dictionary of tensors whose `grad` fields are to be returned.
reducer: Optional callable used to reduce the output of the forward pass.
data_loader: An optional DataLoader instance for cases where training
data is passed in rather than obtained from `mll.model`.
context_manager: An optional ContextManager used to wrap each forward-backward
pass. Defaults to a `zero_grad_ctx` that zeroes the gradients of
`parameters` upon entry. None may be passed as an alias for `nullcontext`.
Returns:
A closure that takes zero positional arguments and returns the reduced and
negated value of `mll` along with the gradients of `parameters`.
"""
return GetLossClosureWithGrads(
mll,
type(mll.likelihood),
type(mll.model),
data_loader,
parameters=parameters,
reducer=reducer,
backward=backward,
context_manager=context_manager,
**kwargs,
)
@GetLossClosureWithGrads.register(object, object, object, object)
def _get_loss_closure_with_grads_fallback(
mll: MarginalLogLikelihood,
_: object,
__: object,
data_loader: Optional[DataLoader],
parameters: Dict[str, Tensor],
reducer: Callable[[Tensor], Tensor] = Tensor.sum,
backward: Callable[[Tensor], None] = Tensor.backward,
context_manager: Callable = None, # pyre-ignore [9]
**kwargs: Any,
) -> ForwardBackwardClosure:
r"""Wraps a `loss_closure` with a ForwardBackwardClosure."""
loss_closure = get_loss_closure(mll, data_loader=data_loader, **kwargs)
return ForwardBackwardClosure(
forward=loss_closure,
backward=backward,
parameters=parameters,
reducer=reducer,
context_manager=context_manager,
)
@GetLossClosure.register(MarginalLogLikelihood, object, object, DataLoader)
def _get_loss_closure_fallback_external(
mll: MarginalLogLikelihood,
_: object,
__: object,
data_loader: DataLoader,
**ignore: Any,
) -> Callable[[], Tensor]:
r"""Fallback loss closure with externally provided data."""
batch_generator = chain.from_iterable(iter(data_loader) for _ in repeat(None))
def closure(**kwargs: Any) -> Tensor:
batch = next(batch_generator)
if not isinstance(batch, Sequence):
raise TypeError(
"Expected `data_loader` to generate a batch of tensors, "
f"but found {type(batch)}."
)
num_inputs = len(mll.model.train_inputs)
model_output = mll.model(*batch[:num_inputs])
log_likelihood = mll(model_output, *batch[num_inputs:], **kwargs)
return -log_likelihood
return closure
@GetLossClosure.register(MarginalLogLikelihood, object, object, NoneType)
def _get_loss_closure_fallback_internal(
mll: MarginalLogLikelihood, _: object, __: object, ___: NoneType, **ignore: Any
) -> Callable[[], Tensor]:
r"""Fallback loss closure with internally managed data."""
def closure(**kwargs: Any) -> Tensor:
model_output = mll.model(*mll.model.train_inputs)
log_likelihood = mll(model_output, mll.model.train_targets, **kwargs)
return -log_likelihood
return closure
@GetLossClosure.register(ExactMarginalLogLikelihood, object, object, NoneType)
def _get_loss_closure_exact_internal(
mll: ExactMarginalLogLikelihood, _: object, __: object, ___: NoneType, **ignore: Any
) -> Callable[[], Tensor]:
r"""ExactMarginalLogLikelihood loss closure with internally managed data."""
def closure(**kwargs: Any) -> Tensor:
model_output = mll.model(*mll.model.train_inputs)
log_likelihood = mll(
model_output, mll.model.train_targets, *mll.model.train_inputs, **kwargs
)
return -log_likelihood
return closure
@GetLossClosure.register(SumMarginalLogLikelihood, object, object, NoneType)
def _get_loss_closure_sum_internal(
mll: SumMarginalLogLikelihood, _: object, __: object, ___: NoneType, **ignore: Any
) -> Callable[[], Tensor]:
r"""SumMarginalLogLikelihood loss closure with internally managed data."""
def closure(**kwargs: Any) -> Tensor:
model_output = mll.model(*mll.model.train_inputs)
log_likelihood = mll(
model_output,
mll.model.train_targets,
*map(list, mll.model.train_inputs),
**kwargs,
)
return -log_likelihood
return closure
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from inspect import getsource, getsourcefile
from typing import Any, Callable, Optional, Tuple, Type
from multipledispatch.dispatcher import (
Dispatcher as MDDispatcher,
MDNotImplementedError, # trivial subclass of NotImplementedError
str_signature,
)
def type_bypassing_encoder(arg: Any) -> Type:
# Allow type variables to be passed as pre-encoded arguments
return arg if isinstance(arg, type) else type(arg)
class Dispatcher(MDDispatcher):
r"""Clearing house for multiple dispatch functionality. This class extends
`<multipledispatch.Dispatcher>` by: (i) generalizing the argument encoding
convention during method lookup, (ii) implementing `__getitem__` as a dedicated
method lookup function.
"""
def __init__(
self,
name: str,
doc: Optional[str] = None,
encoder: Callable[Any, Type] = type,
) -> None:
"""
Args:
name: A string identifier for the `Dispatcher` instance.
doc: A docstring for the multiply dispatched method(s).
encoder: A callable that individually transforms the arguments passed
at runtime in order to construct the key used for method lookup as
`tuple(map(encoder, args))`. Defaults to `type`.
"""
super().__init__(name=name, doc=doc)
self._encoder = encoder
def __getitem__(
self,
args: Optional[Any] = None,
types: Optional[Tuple[Type]] = None,
) -> Callable:
r"""Method lookup.
Args:
args: A set of arguments that act as identifiers for a stored method.
types: A tuple of types that encodes `args`.
Returns:
A callable corresponding to the given `args` or `types`.
"""
if types is None:
if args is None:
raise RuntimeError("One of `args` or `types` must be provided.")
types = self.encode_args(args)
elif args is not None:
raise RuntimeError("Only one of `args` or `types` may be provided.")
try:
func = self._cache[types]
except KeyError:
func = self.dispatch(*types)
if not func:
msg = f"{self.name}: <{', '.join(cls.__name__ for cls in types)}"
raise NotImplementedError(f"Could not find signature for {msg}")
self._cache[types] = func
return func
def __call__(self, *args: Any, **kwargs: Any) -> Any:
r"""Multiply dispatches a call to a collection of methods.
Args:
args: A set of arguments that act as identifiers for a stored method.
kwargs: Optional keyword arguments passed to the retrieved method.
Returns:
The result of evaluating `func(*args, **kwargs)`, where `func` is
the function obtained via method lookup.
"""
types = self.encode_args(args)
func = self.__getitem__(types=types)
try:
return func(*args, **kwargs)
except MDNotImplementedError:
# Traverses registered methods in order, yields whenever a match is found
funcs = self.dispatch_iter(*types)
next(funcs) # burn first, same as self.__getitem__(types=types)
for func in funcs:
try:
return func(*args, **kwargs)
except MDNotImplementedError:
pass
raise NotImplementedError(
f"Matching functions for {self.name:s}: {str_signature(types):s} "
"found, but none completed successfully"
)
def dispatch(self, *types: Type) -> Callable:
r"""Method lookup strategy. Checks for an exact match before traversing
the set of registered methods according to the current ordering.
Args:
types: A tuple of types that gets compared with the signatures
of registered methods to determine compatibility.
Returns:
The first method encountered with a matching signature.
"""
if types in self.funcs:
return self.funcs[types]
try:
return next(self.dispatch_iter(*types))
except StopIteration:
return None
def encode_args(self, args: Any) -> Tuple[Type]:
r"""Converts arguments into a tuple of types used during method lookup."""
return tuple(map(self.encoder, args if isinstance(args, tuple) else (args,)))
def _help(self, *args: Any) -> str:
r"""Returns the retrieved method's docstring."""
return self.dispatch(*self.encode_args(args)).__doc__
def help(self, *args: Any, **kwargs: Any) -> None:
r"""Prints the retrieved method's docstring."""
print(self._help(*args))
def _source(self, *args: Any) -> str:
r"""Returns the retrieved method's source types as a string."""
func = self.dispatch(*self.encode_args(args))
if not func:
raise TypeError("No function found")
return f"File: {getsourcefile(func)}\n\n{getsource(func)}"
def source(self, *args, **kwargs) -> None:
r"""Prints the retrieved method's source types."""
print(self._source(*args))
@property
def encoder(self) -> Callable[Any, Type]:
return self._encoder
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Some basic data transformation helpers.
"""
from __future__ import annotations
import warnings
from functools import wraps
from typing import Any, Callable, List, Optional, TYPE_CHECKING
import torch
from torch import Tensor
if TYPE_CHECKING:
from botorch.acquisition import AcquisitionFunction # pragma: no cover
from botorch.model import Model # pragma: no cover
def standardize(Y: Tensor) -> Tensor:
r"""Standardizes (zero mean, unit variance) a tensor by dim=-2.
If the tensor is single-dimensional, simply standardizes the tensor.
If for some batch index all elements are equal (or if there is only a single
data point), this function will return 0 for that batch index.
Args:
Y: A `batch_shape x n x m`-dim tensor.
Returns:
The standardized `Y`.
Example:
>>> Y = torch.rand(4, 3)
>>> Y_standardized = standardize(Y)
"""
stddim = -1 if Y.dim() < 2 else -2
Y_std = Y.std(dim=stddim, keepdim=True)
Y_std = Y_std.where(Y_std >= 1e-9, torch.full_like(Y_std, 1.0))
return (Y - Y.mean(dim=stddim, keepdim=True)) / Y_std
def normalize(X: Tensor, bounds: Tensor) -> Tensor:
r"""Min-max normalize X w.r.t. the provided bounds.
Args:
X: `... x d` tensor of data
bounds: `2 x d` tensor of lower and upper bounds for each of the X's d
columns.
Returns:
A `... x d`-dim tensor of normalized data, given by
`(X - bounds[0]) / (bounds[1] - bounds[0])`. If all elements of `X`
are contained within `bounds`, the normalized values will be
contained within `[0, 1]^d`.
Example:
>>> X = torch.rand(4, 3)
>>> bounds = torch.stack([torch.zeros(3), 0.5 * torch.ones(3)])
>>> X_normalized = normalize(X, bounds)
"""
return (X - bounds[0]) / (bounds[1] - bounds[0])
def unnormalize(X: Tensor, bounds: Tensor) -> Tensor:
r"""Un-normalizes X w.r.t. the provided bounds.
Args:
X: `... x d` tensor of data
bounds: `2 x d` tensor of lower and upper bounds for each of the X's d
columns.
Returns:
A `... x d`-dim tensor of unnormalized data, given by
`X * (bounds[1] - bounds[0]) + bounds[0]`. If all elements of `X`
are contained in `[0, 1]^d`, the un-normalized values will be
contained within `bounds`.
Example:
>>> X_normalized = torch.rand(4, 3)
>>> bounds = torch.stack([torch.zeros(3), 0.5 * torch.ones(3)])
>>> X = unnormalize(X_normalized, bounds)
"""
return X * (bounds[1] - bounds[0]) + bounds[0]
def normalize_indices(indices: Optional[List[int]], d: int) -> Optional[List[int]]:
r"""Normalize a list of indices to ensure that they are positive.
Args:
indices: A list of indices (may contain negative indices for indexing
"from the back").
d: The dimension of the tensor to index.
Returns:
A normalized list of indices such that each index is between `0` and
`d-1`, or None if indices is None.
"""
if indices is None:
return indices
normalized_indices = []
for i in indices:
if i < 0:
i = i + d
if i < 0 or i > d - 1:
raise ValueError(f"Index {i} out of bounds for tensor or length {d}.")
normalized_indices.append(i)
return normalized_indices
def _verify_output_shape(acqf: Any, X: Tensor, output: Tensor) -> bool:
r"""
Performs the output shape checks for `t_batch_mode_transform`. Output shape checks
help in catching the errors due to AcquisitionFunction arguments with erroneous
return shapes before these errors propagate further down the line.
This method checks that the `output` shape matches either the t-batch shape of X
or the `batch_shape` of `acqf.model`.
Args:
acqf: The AcquisitionFunction object being evaluated.
X: The `... x q x d`-dim input tensor with an explicit t-batch.
output: The return value of `acqf.method(X, ...)`.
Returns:
True if `output` has the correct shape, False otherwise.
"""
try:
X_batch_shape = X.shape[:-2]
if output.shape == X_batch_shape:
return True
if output.shape == torch.Size() and X_batch_shape == torch.Size([1]):
# X has a batch shape of [1] which gets squeezed.
return True
# Cases with model batch shape involved.
model_b_shape = acqf.model.batch_shape
if output.shape == model_b_shape:
# Simple inputs with batched model.
return True
model_b_dim = len(model_b_shape)
if output.shape == X_batch_shape[:-model_b_dim] + model_b_shape and all(
xs in [1, ms] for xs, ms in zip(X_batch_shape[-model_b_dim:], model_b_shape)
):
# X has additional batch dimensions beyond the model batch shape.
# For a batched model, some of the input dimensions might get broadcasted
# to the model batch shape. In that case the acquisition function output
# should replace the right-most batch dim of X with the model's batch shape.
return True
return False
except (AttributeError, NotImplementedError):
# acqf does not have model or acqf.model does not define `batch_shape`
warnings.warn(
"Output shape checks failed! Expected output shape to match t-batch shape"
f"of X, but got output with shape {output.shape} for X with shape"
f"{X.shape}. Make sure that this is the intended behavior!",
RuntimeWarning,
)
return True
def is_fully_bayesian(model: Model) -> bool:
r"""Check if at least one model is a SaasFullyBayesianSingleTaskGP
Args:
model: A BoTorch model (may be a `ModelList` or `ModelListGP`)
d: The dimension of the tensor to index.
Returns:
True if at least one model is a `SaasFullyBayesianSingleTaskGP`
"""
from botorch.models import ModelList
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP
full_bayesian_model_cls = (
SaasFullyBayesianSingleTaskGP,
SaasFullyBayesianMultiTaskGP,
)
if isinstance(model, full_bayesian_model_cls) or getattr(
model, "is_fully_bayesian", False
):
return True
elif isinstance(model, ModelList):
for m in model.models:
if is_fully_bayesian(m):
return True
return False
def t_batch_mode_transform(
expected_q: Optional[int] = None,
assert_output_shape: bool = True,
) -> Callable[
[Callable[[AcquisitionFunction, Any], Any]],
Callable[[AcquisitionFunction, Any], Any],
]:
r"""Factory for decorators enabling consistent t-batch behavior.
This method creates decorators for instance methods to transform an input tensor
`X` to t-batch mode (i.e. with at least 3 dimensions). This assumes the tensor
has a q-batch dimension. The decorator also checks the q-batch size if `expected_q`
is provided, and the output shape if `assert_output_shape` is `True`.
Args:
expected_q: The expected q-batch size of `X`. If specified, this will raise an
AssertionError if `X`'s q-batch size does not equal expected_q.
assert_output_shape: If `True`, this will raise an AssertionError if the
output shape does not match either the t-batch shape of `X`,
or the `acqf.model.batch_shape` for acquisition functions using
batched models.
Returns:
The decorated instance method.
Example:
>>> class ExampleClass:
>>> @t_batch_mode_transform(expected_q=1)
>>> def single_q_method(self, X):
>>> ...
>>>
>>> @t_batch_mode_transform()
>>> def arbitrary_q_method(self, X):
>>> ...
"""
def decorator(
method: Callable[[AcquisitionFunction, Any], Any],
) -> Callable[[AcquisitionFunction, Any], Any]:
@wraps(method)
def decorated(
acqf: AcquisitionFunction, X: Any, *args: Any, **kwargs: Any
) -> Any:
# Allow using acquisition functions for other inputs (e.g. lists of strings)
if not isinstance(X, Tensor):
return method(acqf, X, *args, **kwargs)
if X.dim() < 2:
raise ValueError(
f"{type(acqf).__name__} requires X to have at least 2 dimensions,"
f" but received X with only {X.dim()} dimensions."
)
elif expected_q is not None and X.shape[-2] != expected_q:
raise AssertionError(
f"Expected X to be `batch_shape x q={expected_q} x d`, but"
f" got X with shape {X.shape}."
)
# add t-batch dim
X = X if X.dim() > 2 else X.unsqueeze(0)
output = method(acqf, X, *args, **kwargs)
if hasattr(acqf, "model") and is_fully_bayesian(acqf.model):
output = output.mean(dim=-1)
if assert_output_shape and not _verify_output_shape(
acqf=acqf,
X=X,
output=output,
):
raise AssertionError(
"Expected the output shape to match either the t-batch shape of "
"X, or the `model.batch_shape` in the case of acquisition "
"functions using batch models; but got output with shape "
f"{output.shape} for X with shape {X.shape}."
)
return output
return decorated
return decorator
def concatenate_pending_points(
method: Callable[[Any, Tensor], Any]
) -> Callable[[Any, Tensor], Any]:
r"""Decorator concatenating X_pending into an acquisition function's argument.
This decorator works on the `forward` method of acquisition functions taking
a tensor `X` as the argument. If the acquisition function has an `X_pending`
attribute (that is not `None`), this is concatenated into the input `X`,
appropriately expanding the pending points to match the batch shape of `X`.
Example:
>>> class ExampleAcquisitionFunction:
>>> @concatenate_pending_points
>>> @t_batch_mode_transform()
>>> def forward(self, X):
>>> ...
"""
@wraps(method)
def decorated(cls: Any, X: Tensor, **kwargs: Any) -> Any:
if cls.X_pending is not None:
X = torch.cat([X, match_batch_shape(cls.X_pending, X)], dim=-2)
return method(cls, X, **kwargs)
return decorated
def match_batch_shape(X: Tensor, Y: Tensor) -> Tensor:
r"""Matches the batch dimension of a tensor to that of another tensor.
Args:
X: A `batch_shape_X x q x d` tensor, whose batch dimensions that
correspond to batch dimensions of `Y` are to be matched to those
(if compatible).
Y: A `batch_shape_Y x q' x d` tensor.
Returns:
A `batch_shape_Y x q x d` tensor containing the data of `X` expanded to
the batch dimensions of `Y` (if compatible). For instance, if `X` is
`b'' x b' x q x d` and `Y` is `b x q x d`, then the returned tensor is
`b'' x b x q x d`.
Example:
>>> X = torch.rand(2, 1, 5, 3)
>>> Y = torch.rand(2, 6, 4, 3)
>>> X_matched = match_batch_shape(X, Y)
>>> X_matched.shape
torch.Size([2, 6, 5, 3])
"""
return X.expand(X.shape[: -(Y.dim())] + Y.shape[:-2] + X.shape[-2:])
def convert_to_target_pre_hook(module, *args):
r"""Pre-hook for automatically calling `.to(X)` on module prior to `forward`"""
module.to(args[0][0])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchError
from botorch.posteriors.base_samples import _reshape_base_samples_non_interleaved
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch.distributions.multitask_multivariate_normal import (
MultitaskMultivariateNormal,
)
from linear_operator.operators import BlockDiagLinearOperator, LinearOperator
from linear_operator.utils.cholesky import psd_safe_cholesky
from linear_operator.utils.errors import NanError
from torch import Tensor
def extract_batch_covar(mt_mvn: MultitaskMultivariateNormal) -> LinearOperator:
r"""Extract a batched independent covariance matrix from an MTMVN.
Args:
mt_mvn: A multi-task multivariate normal with a block diagonal
covariance matrix.
Returns:
A lazy covariance matrix consisting of a batch of the blocks of
the diagonal of the MultitaskMultivariateNormal.
"""
lazy_covar = mt_mvn.lazy_covariance_matrix
if not isinstance(lazy_covar, BlockDiagLinearOperator):
raise BotorchError(
f"Expected BlockDiagLinearOperator, but got {type(lazy_covar)}."
)
return lazy_covar.base_linear_op
def _reshape_base_samples(
base_samples: Tensor, sample_shape: torch.Size, posterior: GPyTorchPosterior
) -> Tensor:
r"""Manipulate shape of base_samples to match `MultivariateNormal.rsample`.
This ensure that base_samples are used in the same way as in
gpytorch.distributions.MultivariateNormal. For CBD, it is important to ensure
that the same base samples are used for the in-sample points here and in the
cached box decompositions.
Args:
base_samples: The base samples.
sample_shape: The sample shape.
posterior: The joint posterior is over (X_baseline, X).
Returns:
Reshaped and expanded base samples.
"""
mvn = posterior.distribution
loc = mvn.loc
peshape = posterior._extended_shape()
base_samples = base_samples.view(
sample_shape + torch.Size([1] * (loc.ndim - 1)) + peshape[-2:]
).expand(sample_shape + loc.shape[:-1] + peshape[-2:])
if posterior._is_mt:
base_samples = _reshape_base_samples_non_interleaved(
mvn=posterior.distribution,
base_samples=base_samples,
sample_shape=sample_shape,
)
base_samples = base_samples.reshape(
-1, *loc.shape[:-1], mvn.lazy_covariance_matrix.shape[-1]
)
base_samples = base_samples.permute(*range(1, loc.dim() + 1), 0)
return base_samples.reshape(
*peshape[:-2],
peshape[-1],
peshape[-2],
*sample_shape,
)
def sample_cached_cholesky(
posterior: GPyTorchPosterior,
baseline_L: Tensor,
q: int,
base_samples: Tensor,
sample_shape: torch.Size,
max_tries: int = 6,
) -> Tensor:
r"""Get posterior samples at the `q` new points from the joint multi-output
posterior.
Args:
posterior: The joint posterior is over (X_baseline, X).
baseline_L: The baseline lower triangular cholesky factor.
q: The number of new points in X.
base_samples: The base samples.
sample_shape: The sample shape.
max_tries: The number of tries for computing the Cholesky
decomposition with increasing jitter.
Returns:
A `sample_shape x batch_shape x q x m`-dim tensor of posterior
samples at the new points.
"""
# compute bottom left covariance block
mvn = posterior.distribution
lazy_covar = (
extract_batch_covar(mt_mvn=mvn)
if isinstance(mvn, MultitaskMultivariateNormal)
else mvn.lazy_covariance_matrix
)
# Get the `q` new rows of the batched covariance matrix
bottom_rows = lazy_covar[..., -q:, :].to_dense()
# The covariance in block form is:
# [K(X_baseline, X_baseline), K(X_baseline, X)]
# [K(X, X_baseline), K(X, X)]
# bl := K(X, X_baseline)
# br := K(X, X)
# Get bottom right block of new covariance
bl, br = bottom_rows.split([bottom_rows.shape[-1] - q, q], dim=-1)
# Solve Ax = b
# where A = K(X_baseline, X_baseline) and b = K(X, X_baseline)^T
# and bl_chol := x^T
# bl_chol is the new `(batch_shape) x q x n`-dim bottom left block
# of the cholesky decomposition
bl_chol = torch.linalg.solve_triangular(
baseline_L, bl.transpose(-2, -1), upper=False
).transpose(-2, -1)
# Compute the new bottom right block of the Cholesky
# decomposition via:
# Cholesky(K(X, X) - bl_chol @ bl_chol^T)
br_to_chol = br - bl_chol @ bl_chol.transpose(-2, -1)
# TODO: technically we should make sure that we add a
# consistent nugget to the cached covariance and the new block
br_chol = psd_safe_cholesky(br_to_chol, max_tries=max_tries)
# Create a `(batch_shape) x q x (n+q)`-dim tensor containing the
# `q` new bottom rows of the Cholesky decomposition
new_Lq = torch.cat([bl_chol, br_chol], dim=-1)
mean = posterior.distribution.mean
base_samples = _reshape_base_samples(
base_samples=base_samples,
sample_shape=sample_shape,
posterior=posterior,
)
if not isinstance(posterior.distribution, MultitaskMultivariateNormal):
# add output dim
mean = mean.unsqueeze(-1)
# add batch dim corresponding to output dim
new_Lq = new_Lq.unsqueeze(-3)
new_mean = mean[..., -q:, :]
res = (
new_Lq.matmul(base_samples)
.add(new_mean.transpose(-1, -2).unsqueeze(-1))
.permute(-1, *range(posterior.distribution.loc.dim() - 1), -2, -3)
.contiguous()
)
contains_nans = torch.isnan(res).any()
contains_infs = torch.isinf(res).any()
if contains_nans or contains_infs:
suffix_args = []
if contains_nans:
suffix_args.append("nans")
if contains_infs:
suffix_args.append("infs")
suffix = " and ".join(suffix_args)
raise NanError(f"Samples contain {suffix}.")
return res
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Helpers for handling objectives.
"""
from __future__ import annotations
import warnings
from typing import Callable, List, Optional, Union
import torch
from botorch.utils.safe_math import log_fatmoid, logexpit
from torch import Tensor
def get_objective_weights_transform(
weights: Optional[Tensor],
) -> Callable[[Tensor, Optional[Tensor]], Tensor]:
r"""Create a linear objective callable from a set of weights.
Create a callable mapping a Tensor of size `b x q x m` and an (optional)
Tensor of size `b x q x d` to a Tensor of size `b x q`, where `m` is the
number of outputs of the model using scalarization via the objective weights.
This callable supports broadcasting (e.g. for calling on a tensor of shape
`mc_samples x b x q x m`). For `m = 1`, the objective weight is used to
determine the optimization direction.
Args:
weights: a 1-dimensional Tensor containing a weight for each task.
If not provided, the identity mapping is used.
Returns:
Transform function using the objective weights.
Example:
>>> weights = torch.tensor([0.75, 0.25])
>>> transform = get_objective_weights_transform(weights)
"""
# if no weights provided, just extract the single output
if weights is None:
return lambda Y: Y.squeeze(-1)
def _objective(Y: Tensor, X: Optional[Tensor] = None):
r"""Evaluate objective.
Note: einsum multiples Y by weights and sums over the `m`-dimension.
Einsum is ~2x faster than using `(Y * weights.view(1, 1, -1)).sum(dim-1)`.
Args:
Y: A `... x b x q x m` tensor of function values.
Returns:
A `... x b x q`-dim tensor of objective values.
"""
return torch.einsum("...m, m", [Y, weights])
return _objective
def apply_constraints_nonnegative_soft(
obj: Tensor,
constraints: List[Callable[[Tensor], Tensor]],
samples: Tensor,
eta: Union[Tensor, float],
) -> Tensor:
r"""Applies constraints to a non-negative objective.
This function uses a sigmoid approximation to an indicator function for
each constraint.
Args:
obj: A `n_samples x b x q (x m')`-dim Tensor of objective values.
constraints: A list of callables, each mapping a Tensor of size `b x q x m`
to a Tensor of size `b x q`, where negative values imply feasibility.
This callable must support broadcasting. Only relevant for multi-
output models (`m` > 1).
samples: A `n_samples x b x q x m` Tensor of samples drawn from the posterior.
eta: The temperature parameter for the sigmoid function. Can be either a float
or a 1-dim tensor. In case of a float the same eta is used for every
constraint in constraints. In case of a tensor the length of the tensor
must match the number of provided constraints. The i-th constraint is
then estimated with the i-th eta value.
Returns:
A `n_samples x b x q (x m')`-dim tensor of feasibility-weighted objectives.
"""
w = compute_smoothed_feasibility_indicator(
constraints=constraints, samples=samples, eta=eta
)
if obj.dim() == samples.dim():
w = w.unsqueeze(-1) # Need to unsqueeze to accommodate the outcome dimension.
return obj.clamp_min(0).mul(w) # Enforce non-negativity of obj, apply constraints.
def compute_feasibility_indicator(
constraints: Optional[List[Callable[[Tensor], Tensor]]],
samples: Tensor,
) -> Tensor:
r"""Computes the feasibility of a list of constraints given posterior samples.
Args:
constraints: A list of callables, each mapping a batch_shape x q x m`-dim Tensor
to a `batch_shape x q`-dim Tensor, where negative values imply feasibility.
samples: A batch_shape x q x m`-dim Tensor of posterior samples.
Returns:
A `batch_shape x q`-dim tensor of Boolean feasibility values.
"""
ind = torch.ones(samples.shape[:-1], dtype=torch.bool, device=samples.device)
if constraints is not None:
for constraint in constraints:
ind = ind.logical_and(constraint(samples) < 0)
return ind
def compute_smoothed_feasibility_indicator(
constraints: List[Callable[[Tensor], Tensor]],
samples: Tensor,
eta: Union[Tensor, float],
log: bool = False,
fat: bool = False,
) -> Tensor:
r"""Computes the smoothed feasibility indicator of a list of constraints.
Given posterior samples, using a sigmoid to smoothly approximate the feasibility
indicator of each individual constraint to ensure differentiability and high
gradient signal. The `fat` and `log` options improve the numerical behavior of
the smooth approximation.
NOTE: *Negative* constraint values are associated with feasibility.
Args:
constraints: A list of callables, each mapping a Tensor of size `b x q x m`
to a Tensor of size `b x q`, where negative values imply feasibility.
This callable must support broadcasting. Only relevant for multi-
output models (`m` > 1).
samples: A `n_samples x b x q x m` Tensor of samples drawn from the posterior.
eta: The temperature parameter for the sigmoid function. Can be either a float
or a 1-dim tensor. In case of a float the same eta is used for every
constraint in constraints. In case of a tensor the length of the tensor
must match the number of provided constraints. The i-th constraint is
then estimated with the i-th eta value.
log: Toggles the computation of the log-feasibility indicator.
fat: Toggles the computation of the fat-tailed feasibility indicator.
Returns:
A `n_samples x b x q`-dim tensor of feasibility indicator values.
"""
if type(eta) is not Tensor:
eta = torch.full((len(constraints),), eta)
if len(eta) != len(constraints):
raise ValueError(
"Number of provided constraints and number of provided etas do not match."
)
if not (eta > 0).all():
raise ValueError("eta must be positive.")
is_feasible = torch.zeros_like(samples[..., 0])
log_sigmoid = log_fatmoid if fat else logexpit
for constraint, e in zip(constraints, eta):
is_feasible = is_feasible + log_sigmoid(-constraint(samples) / e)
return is_feasible if log else is_feasible.exp()
# TODO: deprecate this function
def soft_eval_constraint(lhs: Tensor, eta: float = 1e-3) -> Tensor:
r"""Element-wise evaluation of a constraint in a 'soft' fashion
`value(x) = 1 / (1 + exp(x / eta))`
Args:
lhs: The left hand side of the constraint `lhs <= 0`.
eta: The temperature parameter of the softmax function. As eta
decreases, this approximates the Heaviside step function.
Returns:
Element-wise 'soft' feasibility indicator of the same shape as `lhs`.
For each element `x`, `value(x) -> 0` as `x` becomes positive, and
`value(x) -> 1` as x becomes negative.
"""
warnings.warn(
"`soft_eval_constraint` is deprecated. Please consider `torch.utils.sigmoid` "
+ "with its `fat` and `log` options to compute feasibility indicators.",
DeprecationWarning,
)
if eta <= 0:
raise ValueError("eta must be positive.")
return torch.sigmoid(-lhs / eta)
def apply_constraints(
obj: Tensor,
constraints: List[Callable[[Tensor], Tensor]],
samples: Tensor,
infeasible_cost: float,
eta: Union[Tensor, float] = 1e-3,
) -> Tensor:
r"""Apply constraints using an infeasible_cost `M` for negative objectives.
This allows feasibility-weighting an objective for the case where the
objective can be negative by using the following strategy:
(1) Add `M` to make obj non-negative;
(2) Apply constraints using the sigmoid approximation;
(3) Shift by `-M`.
Args:
obj: A `n_samples x b x q (x m')`-dim Tensor of objective values.
constraints: A list of callables, each mapping a Tensor of size `b x q x m`
to a Tensor of size `b x q`, where negative values imply feasibility.
This callable must support broadcasting. Only relevant for multi-
output models (`m` > 1).
samples: A `n_samples x b x q x m` Tensor of samples drawn from the posterior.
infeasible_cost: The infeasible value.
eta: The temperature parameter of the sigmoid function. Can be either a float
or a 1-dim tensor. In case of a float the same eta is used for every
constraint in constraints. In case of a tensor the length of the tensor
must match the number of provided constraints. The i-th constraint is
then estimated with the i-th eta value.
Returns:
A `n_samples x b x q (x m')`-dim tensor of feasibility-weighted objectives.
"""
# obj has dimensions n_samples x b x q (x m')
obj = obj.add(infeasible_cost) # now it is nonnegative
obj = apply_constraints_nonnegative_soft(
obj=obj,
constraints=constraints,
samples=samples,
eta=eta,
)
return obj.add(-infeasible_cost)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Representations for different kinds of data."""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, fields
from typing import Any
from torch import device as Device, dtype as Dtype, LongTensor, Size, Tensor
class BotorchContainer(ABC):
r"""Abstract base class for BoTorch's data containers.
A BotorchContainer represents a tensor, which should be the sole object
returned by its `__call__` method. Said tensor is expected to consist of
one or more "events" (e.g. data points or feature vectors), whose shape is
given by the required `event_shape` field.
Notice: Once version 3.10 becomes standard, this class should
be reworked to take advantage of dataclasses' `kw_only` flag.
:meta private:
"""
event_shape: Size
def __post_init__(self, validate_init: bool = True) -> None:
if validate_init:
self._validate()
@abstractmethod
def __call__(self) -> Tensor:
raise NotImplementedError
@abstractmethod
def __eq__(self, other: Any) -> bool:
raise NotImplementedError
@property
@abstractmethod
def shape(self) -> Size:
raise NotImplementedError
@property
@abstractmethod
def device(self) -> Device:
raise NotImplementedError
@property
@abstractmethod
def dtype(self) -> Dtype:
raise NotImplementedError
def _validate(self) -> None:
for field in fields(self):
if field.name == "event_shape":
return
raise AttributeError("Missing required field `event_shape`.")
@dataclass(eq=False)
class DenseContainer(BotorchContainer):
r"""Basic representation of data stored as a dense Tensor."""
values: Tensor
event_shape: Size
def __call__(self) -> Tensor:
"""Returns a dense tensor representation of the container's contents."""
return self.values
def __eq__(self, other: Any) -> bool:
return (
type(other) is type(self)
and self.shape == other.shape
and self.values.equal(other.values)
)
@property
def shape(self) -> Size:
return self.values.shape
@property
def device(self) -> Device:
return self.values.device
@property
def dtype(self) -> Dtype:
return self.values.dtype
def _validate(self) -> None:
super()._validate()
for a, b in zip(reversed(self.event_shape), reversed(self.values.shape)):
if a != b:
raise ValueError(
f"Shape of `values` {self.values.shape} incompatible with "
f"`event shape` {self.event_shape}."
)
@dataclass(eq=False)
class SliceContainer(BotorchContainer):
r"""Represent data points formed by concatenating (n-1)-dimensional slices
taken from the leading dimension of an n-dimensional source tensor."""
values: Tensor
indices: LongTensor
event_shape: Size
def __call__(self) -> Tensor:
flat = self.values.index_select(dim=0, index=self.indices.view(-1))
return flat.view(*self.indices.shape[:-1], -1, *self.values.shape[2:])
def __eq__(self, other: Any) -> bool:
return (
type(other) is type(self)
and self.values.equal(other.values)
and self.indices.equal(other.indices)
)
@property
def shape(self) -> Size:
return self.indices.shape[:-1] + self.event_shape
@property
def device(self) -> Device:
return self.values.device
@property
def dtype(self) -> Dtype:
return self.values.dtype
def _validate(self) -> None:
super()._validate()
values = self.values
indices = self.indices
assert indices.ndim > 1
assert (-1 < indices.min()) & (indices.max() < len(values))
event_shape = self.event_shape
_event_shape = (indices.shape[-1] * values.shape[1],) + values.shape[2:]
if event_shape != _event_shape:
raise ValueError(
f"Shapes of `values` {values.shape} and `indices` "
f"{indices.shape} incompatible with `event_shape` {event_shape}."
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from functools import lru_cache
from numbers import Number
from typing import Iterator, Optional, Tuple, Union
import torch
from torch import Tensor
@lru_cache(maxsize=None)
def get_constants(
values: Union[Number, Iterator[Number]],
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""Returns scalar-valued Tensors containing each of the given constants.
Used to expedite tensor operations involving scalar arithmetic. Note that
the returned Tensors should not be modified in-place."""
if isinstance(values, Number):
return torch.full((), values, dtype=dtype, device=device)
return tuple(torch.full((), val, dtype=dtype, device=device) for val in values)
def get_constants_like(
values: Union[Number, Iterator[Number]],
ref: Tensor,
) -> Union[Tensor, Iterator[Tensor]]:
return get_constants(values, device=ref.device, dtype=ref.dtype)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Discretization (rounding) functions for acquisition optimization.
References
.. [Daulton2022bopr]
S. Daulton, X. Wan, D. Eriksson, M. Balandat, M. A. Osborne, E. Bakshy.
Bayesian Optimization over Discrete and Mixed Spaces via Probabilistic
Reparameterization. Advances in Neural Information Processing Systems
35, 2022.
"""
from __future__ import annotations
import torch
from torch import Tensor
from torch.autograd import Function
from torch.nn.functional import one_hot
def approximate_round(X: Tensor, tau: float = 1e-3) -> Tensor:
r"""Diffentiable approximate rounding function.
This method is a piecewise approximation of a rounding function where
each piece is a hyperbolic tangent function.
Args:
X: The tensor to round to the nearest integer (element-wise).
tau: A temperature hyperparameter.
Returns:
The approximately rounded input tensor.
"""
offset = X.floor()
scaled_remainder = (X - offset - 0.5) / tau
rounding_component = (torch.tanh(scaled_remainder) + 1) / 2
return offset + rounding_component
class IdentitySTEFunction(Function):
"""Base class for functions using straight through gradient estimators.
This class approximates the gradient with the identity function.
"""
@staticmethod
def backward(ctx, grad_output: Tensor) -> Tensor:
r"""Use a straight-through estimator the gradient.
This uses the identity function.
Args:
grad_output: A tensor of gradients.
Returns:
The provided tensor.
"""
return grad_output
class RoundSTE(IdentitySTEFunction):
r"""Round the input tensor and use a straight-through gradient estimator.
[Daulton2022bopr]_ proposes using this in acquisition optimization.
"""
@staticmethod
def forward(ctx, X: Tensor) -> Tensor:
r"""Round the input tensor element-wise.
Args:
X: The tensor to be rounded.
Returns:
A tensor where each element is rounded to the nearest integer.
"""
return X.round()
class OneHotArgmaxSTE(IdentitySTEFunction):
r"""Discretize a continuous relaxation of a one-hot encoded categorical.
This returns a one-hot encoded categorical and use a straight-through
gradient estimator via an identity function.
[Daulton2022bopr]_ proposes using this in acquisition optimization.
"""
@staticmethod
def forward(ctx, X: Tensor) -> Tensor:
r"""Discretize the input tensor.
This applies a argmax along the last dimensions of the input tensor
and one-hot encodes the result.
Args:
X: The tensor to be rounded.
Returns:
A tensor where each element is rounded to the nearest integer.
"""
return one_hot(X.argmax(dim=-1), num_classes=X.shape[-1]).to(X)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Representations for different kinds of datasets."""
from __future__ import annotations
import warnings
from itertools import count, repeat
from typing import Any, Dict, Hashable, Iterable, Optional, TypeVar, Union
import torch
from botorch.utils.containers import BotorchContainer, SliceContainer
from torch import long, ones, Tensor
T = TypeVar("T")
MaybeIterable = Union[T, Iterable[T]]
class SupervisedDataset:
r"""Base class for datasets consisting of labelled pairs `(X, Y)`
and an optional `Yvar` that stipulates observations variances so
that `Y[i] ~ N(f(X[i]), Yvar[i])`.
Example:
.. code-block:: python
X = torch.rand(16, 2)
Y = torch.rand(16, 1)
A = SupervisedDataset(X, Y)
B = SupervisedDataset(
DenseContainer(X, event_shape=X.shape[-1:]),
DenseContainer(Y, event_shape=Y.shape[-1:]),
)
assert A == B
"""
def __init__(
self,
X: Union[BotorchContainer, Tensor],
Y: Union[BotorchContainer, Tensor],
Yvar: Union[BotorchContainer, Tensor, None] = None,
validate_init: bool = True,
) -> None:
r"""Constructs a `SupervisedDataset`.
Args:
X: A `Tensor` or `BotorchContainer` representing the input features.
Y: A `Tensor` or `BotorchContainer` representing the outcomes.
Yvar: An optional `Tensor` or `BotorchContainer` representing
the observation noise.
validate_init: If `True`, validates the input shapes.
"""
self._X = X
self._Y = Y
self._Yvar = Yvar
if validate_init:
self._validate()
@property
def X(self) -> Tensor:
if isinstance(self._X, Tensor):
return self._X
return self._X()
@property
def Y(self) -> Tensor:
if isinstance(self._Y, Tensor):
return self._Y
return self._Y()
@property
def Yvar(self) -> Optional[Tensor]:
if self._Yvar is None or isinstance(self._Yvar, Tensor):
return self._Yvar
return self._Yvar()
def _validate(self) -> None:
shape_X = self.X.shape
if isinstance(self._X, BotorchContainer):
shape_X = shape_X[: len(shape_X) - len(self._X.event_shape)]
else:
shape_X = shape_X[:-1]
shape_Y = self.Y.shape
if isinstance(self._Y, BotorchContainer):
shape_Y = shape_Y[: len(shape_Y) - len(self._Y.event_shape)]
else:
shape_Y = shape_Y[:-1]
if shape_X != shape_Y:
raise ValueError("Batch dimensions of `X` and `Y` are incompatible.")
if self.Yvar is not None and self.Yvar.shape != self.Y.shape:
raise ValueError("Shapes of `Y` and `Yvar` are incompatible.")
@classmethod
def dict_from_iter(
cls,
X: MaybeIterable[Union[BotorchContainer, Tensor]],
Y: MaybeIterable[Union[BotorchContainer, Tensor]],
Yvar: Optional[MaybeIterable[Union[BotorchContainer, Tensor]]] = None,
*,
keys: Optional[Iterable[Hashable]] = None,
) -> Dict[Hashable, SupervisedDataset]:
r"""Returns a dictionary of `SupervisedDataset` from iterables."""
single_X = isinstance(X, (Tensor, BotorchContainer))
single_Y = isinstance(Y, (Tensor, BotorchContainer))
if single_X:
X = (X,) if single_Y else repeat(X)
if single_Y:
Y = (Y,) if single_X else repeat(Y)
Yvar = repeat(Yvar) if isinstance(Yvar, (Tensor, BotorchContainer)) else Yvar
# Pass in Yvar only if it is not None.
iterables = (X, Y) if Yvar is None else (X, Y, Yvar)
return {
elements[0]: cls(*elements[1:])
for elements in zip(keys or count(), *iterables)
}
def __eq__(self, other: Any) -> bool:
return (
type(other) is type(self)
and torch.equal(self.X, other.X)
and torch.equal(self.Y, other.Y)
and (
other.Yvar is None
if self.Yvar is None
else torch.equal(self.Yvar, other.Yvar)
)
)
class FixedNoiseDataset(SupervisedDataset):
r"""A SupervisedDataset with an additional field `Yvar` that stipulates
observations variances so that `Y[i] ~ N(f(X[i]), Yvar[i])`.
NOTE: This is deprecated. Use `SupervisedDataset` instead.
"""
def __init__(
self,
X: Union[BotorchContainer, Tensor],
Y: Union[BotorchContainer, Tensor],
Yvar: Union[BotorchContainer, Tensor],
validate_init: bool = True,
) -> None:
r"""Initialize a `FixedNoiseDataset` -- deprecated!"""
warnings.warn(
"`FixedNoiseDataset` is deprecated. Use `SupervisedDataset` instead.",
DeprecationWarning,
)
super().__init__(X=X, Y=Y, Yvar=Yvar, validate_init=validate_init)
class RankingDataset(SupervisedDataset):
r"""A SupervisedDataset whose labelled pairs `(x, y)` consist of m-ary combinations
`x ∈ Z^{m}` of elements from a ground set `Z = (z_1, ...)` and ranking vectors
`y {0, ..., m - 1}^{m}` with properties:
a) Ranks start at zero, i.e. min(y) = 0.
b) Sorted ranks are contiguous unless one or more ties are present.
c) `k` ranks are skipped after a `k`-way tie.
Example:
.. code-block:: python
X = SliceContainer(
values=torch.rand(16, 2),
indices=torch.stack([torch.randperm(16)[:3] for _ in range(8)]),
event_shape=torch.Size([3 * 2]),
)
Y = DenseContainer(
torch.stack([torch.randperm(3) for _ in range(8)]),
event_shape=torch.Size([3])
)
dataset = RankingDataset(X, Y)
"""
def __init__(
self,
X: SliceContainer,
Y: Union[BotorchContainer, Tensor],
validate_init: bool = True,
) -> None:
r"""Construct a `RankingDataset`.
Args:
X: A `SliceContainer` representing the input features being ranked.
Y: A `Tensor` or `BotorchContainer` representing the rankings.
validate_init: If `True`, validates the input shapes.
"""
super().__init__(X=X, Y=Y, Yvar=None, validate_init=validate_init)
def _validate(self) -> None:
super()._validate()
Y = self.Y
arity = self._X.indices.shape[-1]
if Y.min() < 0 or Y.max() >= arity:
raise ValueError("Invalid ranking(s): out-of-bounds ranks detected.")
# Ensure that rankings are well-defined
Y_sort = Y.sort(descending=False, dim=-1).values
y_incr = ones([], dtype=long)
y_prev = None
for i, y in enumerate(Y_sort.unbind(dim=-1)):
if i == 0:
if (y != 0).any():
raise ValueError("Invalid ranking(s): missing zero-th rank.")
y_prev = y
continue
y_diff = y - y_prev
y_prev = y
# Either a tie or next ranking when accounting for previous ties
if not ((y_diff == 0) | (y_diff == y_incr)).all():
raise ValueError("Invalid ranking(s): ranks not skipped after ties.")
# Same as: torch.where(y_diff == 0, y_incr + 1, 1)
y_incr = y_incr - y_diff + 1
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.utils.constraints import get_outcome_constraint_transforms
from botorch.utils.feasible_volume import estimate_feasible_volume
from botorch.utils.objective import apply_constraints, get_objective_weights_transform
from botorch.utils.rounding import approximate_round
from botorch.utils.sampling import (
batched_multinomial,
draw_sobol_normal_samples,
draw_sobol_samples,
manual_seed,
)
from botorch.utils.transforms import standardize, t_batch_mode_transform
__all__ = [
"apply_constraints",
"approximate_round",
"batched_multinomial",
"draw_sobol_normal_samples",
"draw_sobol_samples",
"estimate_feasible_volume",
"get_objective_weights_transform",
"get_outcome_constraint_transforms",
"manual_seed",
"standardize",
"t_batch_mode_transform",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
NoneType = type(None) # stop gap for the return of NoneType in 3.10
class _DefaultType(type):
r"""
Private class whose sole instance `DEFAULT` is as a special indicator
representing that a default value should be assigned to an argument.
Typically used in cases where `None` is an allowed argument.
"""
DEFAULT = _DefaultType("DEFAULT", (), {})
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for optimization.
"""
from __future__ import annotations
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, Iterable, NamedTuple, Optional, Union
from torch import device as Device, dtype as Dtype, Tensor
from torch.nn import Module
class TensorCheckpoint(NamedTuple):
values: Tensor
device: Optional[Device] = None
dtype: Optional[Dtype] = None
@contextmanager
def delattr_ctx(
instance: object, *attrs: str, enforce_hasattr: bool = False
) -> Generator[None, None, None]:
r"""Contextmanager for temporarily deleting attributes."""
try:
cache = {}
for key in attrs:
if hasattr(instance, key):
cache[key] = getattr(instance, key)
delattr(instance, key)
elif enforce_hasattr:
raise ValueError(
f"Attribute {key} missing from {type(instance)} instance."
)
yield
finally:
for key, cached_val in cache.items():
setattr(instance, key, cached_val)
@contextmanager
def requires_grad_ctx(
module: Module, assignments: Dict[str, bool]
) -> Generator[None, None, None]:
r"""Contextmanager for temporarily setting the requires_grad field of a module's
parameters."""
try:
cache = {}
for name, mode in assignments.items():
parameter = module.get_parameter(name)
cache[name] = parameter.requires_grad
parameter.requires_grad_(mode)
yield
finally:
for name, mode in cache.items():
module.get_parameter(name).requires_grad_(mode)
@contextmanager
def parameter_rollback_ctx(
parameters: Dict[str, Tensor],
checkpoint: Optional[Dict[str, TensorCheckpoint]] = None,
**tkwargs: Any,
) -> Generator[Dict[str, TensorCheckpoint], None, None]:
r"""Contextmanager that exits by rolling back a module's state_dict.
Args:
module: Module instance.
name_filter: Optional Boolean function used to filter items by name.
checkpoint: Optional cache of values and tensor metadata specifying the rollback
state for the module (or some subset thereof).
**tkwargs: Keyword arguments passed to `torch.Tensor.to` when copying data from
each tensor in `module.state_dict()` to the internally created checkpoint.
Only adhered to when the `checkpoint` argument is None.
Yields:
A dictionary of TensorCheckpoints for the module's state_dict. Any in-places
changes to the checkpoint will be observed at rollback time. If the checkpoint
is cleared, no rollback will occur.
"""
# Create copies of the orginal values
if checkpoint is None:
checkpoint = {
name: TensorCheckpoint(
values=param.detach().to(**tkwargs).clone(),
device=param.device,
dtype=param.dtype,
)
for name, param in parameters.items()
}
try: # yield the checkpoint dictionary to the user
yield checkpoint
finally: # restore original values of tracked parameters
if checkpoint:
for name, param in parameters.items():
if name in checkpoint:
values, device, dtype = checkpoint[name]
param.data.copy_(values.to(device=device, dtype=dtype))
@contextmanager
def module_rollback_ctx(
module: Module,
name_filter: Optional[Callable[[str], bool]] = None,
checkpoint: Optional[Dict[str, TensorCheckpoint]] = None,
**tkwargs: Any,
) -> Generator[Dict[str, TensorCheckpoint], None, None]:
r"""Contextmanager that exits by rolling back a module's state_dict.
Args:
module: Module instance.
name_filter: Optional Boolean function used to filter items by name.
checkpoint: Optional cache of values and tensor metadata specifying the rollback
state for the module (or some subset thereof).
**tkwargs: Keyword arguments passed to `torch.Tensor.to` when copying data from
each tensor in `module.state_dict()` to the internally created checkpoint.
Only adhered to when the `checkpoint` argument is None.
Yields:
A dictionary of TensorCheckpoints for the module's state_dict. Any in-places
changes to the checkpoint will be observed at rollback time. If the checkpoint
is cleared, no rollback will occur.
"""
# Create copies of the orginal values
if checkpoint is None:
checkpoint = {
name: TensorCheckpoint(
values=values.detach().to(**tkwargs).clone(),
device=values.device,
dtype=values.dtype,
)
for name, values in module.state_dict().items()
if name_filter is None or name_filter(name)
}
try: # yield the checkpoint dictionary to the user
yield checkpoint
finally: # restore original values of tracked parameters
if checkpoint:
state_dict = module.state_dict()
for key, (values, device, dtype) in checkpoint.items():
tnsr = state_dict.get(key)
if tnsr is None:
state_dict[key] = values.to(device=device, dtype=dtype)
else:
tnsr[...] = values.to(device=device, dtype=dtype)
module.load_state_dict(state_dict)
@contextmanager
def zero_grad_ctx(
parameters: Union[Dict[str, Tensor], Iterable[Tensor]],
zero_on_enter: bool = True,
zero_on_exit: bool = False,
) -> Generator[None, None, None]:
def zero_() -> None:
for param in (
parameters.values() if isinstance(parameters, dict) else parameters
):
if param.grad is not None:
param.grad.zero_()
if zero_on_enter:
zero_()
try:
yield
finally:
if zero_on_exit:
zero_()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for MC and qMC sampling.
References
.. [Trikalinos2014polytope]
T. A. Trikalinos and G. van Valkenhoef. Efficient sampling from uniform
density n-polytopes. Technical report, Brown University, 2014.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, Generator, Iterable, List, Optional, Tuple, TYPE_CHECKING
import numpy as np
import scipy
import torch
from botorch.exceptions.errors import BotorchError
from botorch.sampling.qmc import NormalQMCEngine
from botorch.utils.transforms import unnormalize
from scipy.spatial import Delaunay, HalfspaceIntersection
from torch import LongTensor, Tensor
from torch.quasirandom import SobolEngine
if TYPE_CHECKING:
from botorch.sampling.pathwise.paths import SamplePath # pragma: no cover
@contextmanager
def manual_seed(seed: Optional[int] = None) -> Generator[None, None, None]:
r"""Contextmanager for manual setting the torch.random seed.
Args:
seed: The seed to set the random number generator to.
Returns:
Generator
Example:
>>> with manual_seed(1234):
>>> X = torch.rand(3)
"""
old_state = torch.random.get_rng_state()
try:
if seed is not None:
torch.random.manual_seed(seed)
yield
finally:
if seed is not None:
torch.random.set_rng_state(old_state)
def draw_sobol_samples(
bounds: Tensor,
n: int,
q: int,
batch_shape: Optional[Iterable[int], torch.Size] = None,
seed: Optional[int] = None,
) -> Tensor:
r"""Draw qMC samples from the box defined by bounds.
Args:
bounds: A `2 x d` dimensional tensor specifying box constraints on a
`d`-dimensional space, where bounds[0, :] and bounds[1, :] correspond
to lower and upper bounds, respectively.
n: The number of (q-batch) samples. As a best practice, use powers of 2.
q: The size of each q-batch.
batch_shape: The batch shape of the samples. If given, returns samples
of shape `n x batch_shape x q x d`, where each batch is an
`n x q x d`-dim tensor of qMC samples.
seed: The seed used for initializing Owen scrambling. If None (default),
use a random seed.
Returns:
A `n x batch_shape x q x d`-dim tensor of qMC samples from the box
defined by bounds.
Example:
>>> bounds = torch.stack([torch.zeros(3), torch.ones(3)])
>>> samples = draw_sobol_samples(bounds, 16, 2)
"""
batch_shape = batch_shape or torch.Size()
batch_size = int(torch.prod(torch.tensor(batch_shape)))
d = bounds.shape[-1]
lower = bounds[0]
rng = bounds[1] - bounds[0]
sobol_engine = SobolEngine(q * d, scramble=True, seed=seed)
samples_raw = sobol_engine.draw(batch_size * n, dtype=lower.dtype)
samples_raw = samples_raw.view(*batch_shape, n, q, d).to(device=lower.device)
if batch_shape != torch.Size():
samples_raw = samples_raw.permute(-3, *range(len(batch_shape)), -2, -1)
return lower + rng * samples_raw
def draw_sobol_normal_samples(
d: int,
n: int,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
seed: Optional[int] = None,
) -> Tensor:
r"""Draw qMC samples from a multi-variate standard normal N(0, I_d).
A primary use-case for this functionality is to compute an QMC average
of f(X) over X where each element of X is drawn N(0, 1).
Args:
d: The dimension of the normal distribution.
n: The number of samples to return. As a best practice, use powers of 2.
device: The torch device.
dtype: The torch dtype.
seed: The seed used for initializing Owen scrambling. If None (default),
use a random seed.
Returns:
A tensor of qMC standard normal samples with dimension `n x d` with device
and dtype specified by the input.
Example:
>>> samples = draw_sobol_normal_samples(2, 16)
"""
normal_qmc_engine = NormalQMCEngine(d=d, seed=seed, inv_transform=True)
samples = normal_qmc_engine.draw(n, dtype=torch.float if dtype is None else dtype)
return samples.to(device=device)
def sample_hypersphere(
d: int,
n: int = 1,
qmc: bool = False,
seed: Optional[int] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> Tensor:
r"""Sample uniformly from a unit d-sphere.
Args:
d: The dimension of the hypersphere.
n: The number of samples to return.
qmc: If True, use QMC Sobol sampling (instead of i.i.d. uniform).
seed: If provided, use as a seed for the RNG.
device: The torch device.
dtype: The torch dtype.
Returns:
An `n x d` tensor of uniform samples from from the d-hypersphere.
Example:
>>> sample_hypersphere(d=5, n=10)
"""
dtype = torch.float if dtype is None else dtype
if d == 1:
rnd = torch.randint(0, 2, (n, 1), device=device, dtype=dtype)
return 2 * rnd - 1
if qmc:
rnd = draw_sobol_normal_samples(d=d, n=n, device=device, dtype=dtype, seed=seed)
else:
with manual_seed(seed=seed):
rnd = torch.randn(n, d, dtype=dtype)
samples = rnd / torch.linalg.norm(rnd, dim=-1, keepdim=True)
if device is not None:
samples = samples.to(device)
return samples
def sample_simplex(
d: int,
n: int = 1,
qmc: bool = False,
seed: Optional[int] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> Tensor:
r"""Sample uniformly from a d-simplex.
Args:
d: The dimension of the simplex.
n: The number of samples to return.
qmc: If True, use QMC Sobol sampling (instead of i.i.d. uniform).
seed: If provided, use as a seed for the RNG.
device: The torch device.
dtype: The torch dtype.
Returns:
An `n x d` tensor of uniform samples from from the d-simplex.
Example:
>>> sample_simplex(d=3, n=10)
"""
dtype = torch.float if dtype is None else dtype
if d == 1:
return torch.ones(n, 1, device=device, dtype=dtype)
if qmc:
sobol_engine = SobolEngine(d - 1, scramble=True, seed=seed)
rnd = sobol_engine.draw(n, dtype=dtype)
else:
with manual_seed(seed=seed):
rnd = torch.rand(n, d - 1, dtype=dtype)
srnd, _ = torch.sort(rnd, dim=-1)
zeros = torch.zeros(n, 1, dtype=dtype)
ones = torch.ones(n, 1, dtype=dtype)
srnd = torch.cat([zeros, srnd, ones], dim=-1)
if device is not None:
srnd = srnd.to(device)
return srnd[..., 1:] - srnd[..., :-1]
def sample_polytope(
A: Tensor,
b: Tensor,
x0: Tensor,
n: int = 10000,
n0: int = 100,
seed: Optional[int] = None,
) -> Tensor:
r"""
Hit and run sampler from uniform sampling points from a polytope,
described via inequality constraints A*x<=b.
Args:
A: A Tensor describing inequality constraints
so that all samples satisfy Ax<=b.
b: A Tensor describing the inequality constraints
so that all samples satisfy Ax<=b.
x0: A `d`-dim Tensor representing a starting point of the chain
satisfying the constraints.
n: The number of resulting samples kept in the output.
n0: The number of burn-in samples. The chain will produce
n+n0 samples but the first n0 samples are not saved.
seed: The seed for the sampler. If omitted, use a random seed.
Returns:
(n, d) dim Tensor containing the resulting samples.
"""
n_tot = n + n0
seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item()
with manual_seed(seed=seed):
rands = torch.rand(n_tot, dtype=A.dtype, device=A.device)
# pre-sample samples from hypersphere
d = x0.size(0)
# uniform samples from unit ball in d dims
Rs = sample_hypersphere(
d=d, n=n_tot, dtype=A.dtype, device=A.device, seed=seed
).unsqueeze(-1)
# compute matprods in batch
ARs = (A @ Rs).squeeze(-1)
out = torch.empty(n, A.size(-1), dtype=A.dtype, device=A.device)
x = x0.clone()
for i, (ar, r, rnd) in enumerate(zip(ARs, Rs, rands)):
# given x, the next point in the chain is x+alpha*r
# it also satisfies A(x+alpha*r)<=b which implies A*alpha*r<=b-Ax
# so alpha<=(b-Ax)/ar for ar>0, and alpha>=(b-Ax)/ar for ar<0.
# b - A @ x is always >= 0, clamping for numerical tolerances
w = (b - A @ x).squeeze().clamp(min=0.0) / ar
pos = w >= 0
alpha_max = w[pos].min()
# important to include equality here in cases x is at the boundary
# of the polytope
neg = w <= 0
alpha_min = w[neg].max()
# alpha~Unif[alpha_min, alpha_max]
alpha = alpha_min + rnd * (alpha_max - alpha_min)
x = x + alpha * r
if i >= n0: # save samples after burn-in period
out[i - n0] = x.squeeze()
return out
def batched_multinomial(
weights: Tensor,
num_samples: int,
replacement: bool = False,
generator: Optional[torch.Generator] = None,
out: Optional[Tensor] = None,
) -> LongTensor:
r"""Sample from multinomial with an arbitrary number of batch dimensions.
Args:
weights: A `batch_shape x num_categories` tensor of weights. For each batch
index `i, j, ...`, this functions samples from a multinomial with `input`
`weights[i, j, ..., :]`. Note that the weights need not sum to one, but must
be non-negative, finite and have a non-zero sum.
num_samples: The number of samples to draw for each batch index. Must be smaller
than `num_categories` if `replacement=False`.
replacement: If True, samples are drawn with replacement.
generator: A a pseudorandom number generator for sampling.
out: The output tensor (optional). If provided, must be of size
`batch_shape x num_samples`.
Returns:
A `batch_shape x num_samples` tensor of samples.
This is a thin wrapper around `torch.multinomial` that allows weight (`input`)
tensors with an arbitrary number of batch dimensions (`torch.multinomial` only
allows a single batch dimension). The calling signature is the same as for
`torch.multinomial`.
Example:
>>> weights = torch.rand(2, 3, 10)
>>> samples = batched_multinomial(weights, 4) # shape is 2 x 3 x 4
"""
batch_shape, n_categories = weights.shape[:-1], weights.size(-1)
flat_samples = torch.multinomial(
input=weights.view(-1, n_categories),
num_samples=num_samples,
replacement=replacement,
generator=generator,
out=None if out is None else out.view(-1, num_samples),
)
return flat_samples.view(*batch_shape, num_samples)
def _convert_bounds_to_inequality_constraints(bounds: Tensor) -> Tuple[Tensor, Tensor]:
r"""Convert bounds into inequality constraints of the form Ax <= b.
Args:
bounds: A `2 x d`-dim tensor of bounds
Returns:
A two-element tuple containing
- A: A `2d x d`-dim tensor of coefficients
- b: A `2d x 1`-dim tensor containing the right hand side
"""
d = bounds.shape[-1]
eye = torch.eye(d, dtype=bounds.dtype, device=bounds.device)
lower, upper = bounds
lower_finite, upper_finite = bounds.isfinite()
A = torch.cat([-eye[lower_finite], eye[upper_finite]], dim=0)
b = torch.cat([-lower[lower_finite], upper[upper_finite]], dim=0).unsqueeze(-1)
return A, b
def find_interior_point(
A: np.ndarray,
b: np.ndarray,
A_eq: Optional[np.ndarray] = None,
b_eq: Optional[np.ndarray] = None,
) -> np.ndarray:
r"""Find an interior point of a polytope via linear programming.
Args:
A: A `n_ineq x d`-dim numpy array containing the coefficients of the
constraint inequalities.
b: A `n_ineq x 1`-dim numpy array containing the right hand sides of
the constraint inequalities.
A_eq: A `n_eq x d`-dim numpy array containing the coefficients of the
constraint equalities.
b_eq: A `n_eq x 1`-dim numpy array containing the right hand sides of
the constraint equalities.
Returns:
A `d`-dim numpy array containing an interior point of the polytope.
This function will raise a ValueError if there is no such point.
This method solves the following Linear Program:
min -s subject to A @ x <= b - 2 * s, s >= 0, A_eq @ x = b_eq
In case the polytope is unbounded, then it will also constrain the slack
variable `s` to `s<=1`.
"""
# augment inequality constraints: A @ (x, s) <= b
d = A.shape[-1]
ncon = A.shape[-2] + 1
c = np.zeros(d + 1)
c[-1] = -1
b_ub = np.zeros(ncon)
b_ub[:-1] = b.reshape(-1)
A_ub = np.zeros((ncon, d + 1))
A_ub[:-1, :-1] = A
A_ub[:-1, -1] = 2.0
A_ub[-1, -1] = -1.0
result = scipy.optimize.linprog(
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=(None, None),
method="highs",
)
if result.status == 3:
# problem is unbounded - to find a bounded solution we constrain the
# slack variable `s` to `s <= 1.0`.
A_s = np.concatenate([np.zeros((1, d)), np.ones((1, 1))], axis=-1)
A_ub = np.concatenate([A_ub, A_s], axis=0)
b_ub = np.concatenate([b_ub, np.ones(1)], axis=-1)
result = scipy.optimize.linprog(
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=(None, None),
method="highs",
)
if result.status == 2:
raise ValueError(
"No feasible point found. Constraint polytope appears empty. "
+ "Check your constraints."
)
elif result.status > 0:
raise ValueError(
"Problem checking constraint specification. "
+ "linprog status: {}".format(result.message)
)
# the x in the result is really (x, s)
return result.x[:-1]
class PolytopeSampler(ABC):
r"""
Base class for samplers that sample points from a polytope.
:meta private:
"""
def __init__(
self,
inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
equality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
bounds: Optional[Tensor] = None,
interior_point: Optional[Tensor] = None,
) -> None:
r"""
Args:
inequality_constraints: Tensors `(A, b)` describing inequality
constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim
Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con`
the number of inequalities and `d` the dimension of the sample space.
equality_constraints: Tensors `(C, d)` describing the equality constraints
`C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a
`n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities.
bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means
that the respective dimension is unbounded above (below).
interior_point: A `d x 1`-dim Tensor presenting a point in the
(relative) interior of the polytope. If omitted, determined
automatically by solving a Linear Program.
"""
if inequality_constraints is None:
if bounds is None:
raise BotorchError(
"PolytopeSampler requires either inequality constraints or bounds."
)
A = torch.empty(
0, bounds.shape[-1], dtype=bounds.dtype, device=bounds.device
)
b = torch.empty(0, 1, dtype=bounds.dtype, device=bounds.device)
else:
A, b = inequality_constraints
if bounds is not None:
# add inequality constraints for bounds
# TODO: make sure there are not deduplicate constraints
A2, b2 = _convert_bounds_to_inequality_constraints(bounds=bounds)
A = torch.cat([A, A2], dim=0)
b = torch.cat([b, b2], dim=0)
self.A = A
self.b = b
self.equality_constraints = equality_constraints
if equality_constraints is not None:
self.C, self.d = equality_constraints
U, S, Vh = torch.linalg.svd(self.C)
r = torch.nonzero(S).size(0) # rank of matrix C
self.nullC = Vh[r:, :].transpose(-1, -2) # orthonormal null space of C,
# satisfying # C @ nullC = 0 and nullC.T @ nullC = I
# using the change of variables x=x0+nullC*y,
# sample y satisfies A*nullC*y<=b-A*x0.
# the linear constraint is automatically satisfied as x0 satisfies it.
else:
self.C = None
self.d = None
self.nullC = torch.eye(
self.A.size(-1), dtype=self.A.dtype, device=self.A.device
)
self.new_A = self.A @ self.nullC # doesn't depend on the initial point
# initial point for the original, not transformed, problem
if interior_point is not None:
if self.feasible(interior_point):
self.x0 = interior_point
else:
raise ValueError("The given input point is not feasible.")
else:
self.x0 = self.find_interior_point()
def feasible(self, x: Tensor) -> bool:
r"""Check whether a point is contained in the polytope.
Args:
x: A `d x 1`-dim Tensor.
Returns:
True if `x` is contained inside the polytope (incl. its boundary),
False otherwise.
"""
ineq = (self.A @ x - self.b <= 0).all()
if self.equality_constraints is not None:
eq = (self.C @ x - self.d == 0).all()
return ineq & eq
return ineq
def find_interior_point(self) -> Tensor:
r"""Find an interior point of the polytope.
Returns:
A `d x 1`-dim Tensor representing a point contained in the polytope.
This function will raise a ValueError if there is no such point.
"""
if self.equality_constraints:
# equality constraints: A_eq * (x, s) = b_eq
A_eq = np.zeros((self.C.size(0), self.C.size(-1) + 1))
A_eq[:, :-1] = self.C.cpu().numpy()
b_eq = self.d.cpu().numpy()
else:
A_eq = None
b_eq = None
x0 = find_interior_point(
A=self.A.cpu().numpy(), b=self.b.cpu().numpy(), A_eq=A_eq, b_eq=b_eq
)
return torch.from_numpy(x0).to(self.A).unsqueeze(-1)
# -------- Abstract methods to be implemented by subclasses -------- #
@abstractmethod
def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor:
r"""Draw samples from the polytope.
Args:
n: The number of samples.
seed: The random seed.
Returns:
A `n x d` Tensor of samples from the polytope.
"""
pass # pragma: no cover
class HitAndRunPolytopeSampler(PolytopeSampler):
r"""A sampler for sampling from a polyope using a hit-and-run algorithm."""
def __init__(
self,
inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
equality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
bounds: Optional[Tensor] = None,
interior_point: Optional[Tensor] = None,
n_burnin: int = 0,
) -> None:
r"""A sampler for sampling from a polyope using a hit-and-run algorithm.
Args:
inequality_constraints: Tensors `(A, b)` describing inequality
constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim
Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con`
the number of inequalities and `d` the dimension of the sample space.
equality_constraints: Tensors `(C, d)` describing the equality constraints
`C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a
`n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities.
bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means
that the respective dimension is unbounded from above (below).
interior_point: A `d x 1`-dim Tensor representing a point in the
(relative) interior of the polytope. If omitted, determined
automatically by solving a Linear Program.
n_burnin: The number of burn in samples.
"""
super().__init__(
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
bounds=bounds,
interior_point=interior_point,
)
self.n_burnin = n_burnin
def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor:
r"""Draw samples from the polytope.
Args:
n: The number of samples.
seed: The random seed.
Returns:
A `n x d` Tensor of samples from the polytope.
"""
transformed_samples = sample_polytope(
# run this on the cpu
A=self.new_A.cpu(),
b=(self.b - self.A @ self.x0).cpu(),
x0=torch.zeros((self.nullC.size(1), 1), dtype=self.A.dtype),
n=n,
n0=self.n_burnin,
seed=seed,
).to(self.b)
init_shift = self.x0.transpose(-1, -2)
samples = init_shift + transformed_samples @ self.nullC.transpose(-1, -2)
# keep the last element of the resulting chain as
# the beginning of the next chain
self.x0 = samples[-1].reshape(-1, 1)
# reset counter so there is no burn-in for subsequent samples
self.n_burnin = 0
return samples
class DelaunayPolytopeSampler(PolytopeSampler):
r"""A polytope sampler using Delaunay triangulation.
This sampler first enumerates the vertices of the constraint polytope and
then uses a Delaunay triangulation to tesselate its convex hull.
The sampling happens in two stages:
1. First, we sample from the set of hypertriangles generated by the
Delaunay triangulation (i.e. which hyper-triangle to draw the sample
from) with probabilities proportional to the triangle volumes.
2. Then, we sample uniformly from the chosen hypertriangle by sampling
uniformly from the unit simplex of the appropriate dimension, and
then computing the convex combination of the vertices of the
hypertriangle according to that draw from the simplex.
The best reference (not exactly the same, but functionally equivalent) is
[Trikalinos2014polytope]_. A simple R implementation is available at
https://github.com/gertvv/tesselample.
"""
def __init__(
self,
inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
equality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
bounds: Optional[Tensor] = None,
interior_point: Optional[Tensor] = None,
) -> None:
r"""Initialize DelaunayPolytopeSampler.
Args:
inequality_constraints: Tensors `(A, b)` describing inequality
constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim
Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con`
the number of inequalities and `d` the dimension of the sample space.
equality_constraints: Tensors `(C, d)` describing the equality constraints
`C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a
`n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities.
bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means
that the respective dimension is unbounded from above (below).
interior_point: A `d x 1`-dim Tensor representing a point in the
(relative) interior of the polytope. If omitted, determined
automatically by solving a Linear Program.
Warning: The vertex enumeration performed in this algorithm can become
extremely costly if there are a large number of inequalities. Similarly,
the triangulation can get very expensive in high dimensions. Only use
this algorithm for moderate dimensions / moderately complex constraint sets.
An alternative is the `HitAndRunPolytopeSampler`.
"""
super().__init__(
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
bounds=bounds,
interior_point=interior_point,
)
# shift coordinate system to be anchored at x0
new_b = self.b - self.A @ self.x0
if self.new_A.shape[-1] < 2:
# if the polytope is in dim 1 (i.e. a line segment) Qhull won't work
tshlds = new_b / self.new_A
neg = self.new_A < 0
self.y_min = tshlds[neg].max()
self.y_max = tshlds[~neg].min()
self.dim = 1
else:
# Qhull expects inputs of the form A @ x + b <= 0, so we need to negate here
halfspaces = torch.cat([self.new_A, -new_b], dim=-1).cpu().numpy()
vertices = HalfspaceIntersection(
halfspaces=halfspaces, interior_point=np.zeros(self.new_A.shape[-1])
).intersections
self.dim = vertices.shape[-1]
try:
delaunay = Delaunay(vertices)
except ValueError as e:
if "Points cannot contain NaN" in str(e):
raise ValueError("Polytope is unbounded.")
raise e # pragma: no cover
polytopes = torch.from_numpy(
np.array([delaunay.points[s] for s in delaunay.simplices]),
).to(self.A)
volumes = torch.stack([torch.det(p[1:] - p[0]).abs() for p in polytopes])
self._polytopes = polytopes
self._p = volumes / volumes.sum()
def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor:
r"""Draw samples from the polytope.
Args:
n: The number of samples.
seed: The random seed.
Returns:
A `n x d` Tensor of samples from the polytope.
"""
if self.dim == 1:
with manual_seed(seed):
e = torch.rand(n, 1, device=self.new_A.device, dtype=self.new_A.dtype)
transformed_samples = self.y_min + (self.y_max - self.y_min) * e
else:
if seed is None:
generator = None
else:
generator = torch.Generator(device=self.A.device)
generator.manual_seed(seed)
index_rvs = torch.multinomial(
self._p,
num_samples=n,
replacement=True,
generator=generator,
)
simplex_rvs = sample_simplex(
d=self.dim + 1, n=n, seed=seed, device=self.A.device, dtype=self.A.dtype
)
transformed_samples = torch.stack(
[rv @ self._polytopes[idx] for rv, idx in zip(simplex_rvs, index_rvs)]
)
init_shift = self.x0.transpose(-1, -2)
samples = init_shift + transformed_samples @ self.nullC.transpose(-1, -2)
return samples
def normalize_linear_constraints(
bounds: Tensor, constraints: List[Tuple[Tensor, Tensor, float]]
) -> List[Tuple[Tensor, Tensor, float]]:
r"""Normalize linear constraints to the unit cube.
Args:
bounds (Tensor): A `2 x d`-dim tensor containing the box bounds.
constraints (List[Tuple[Tensor, Tensor, float]]): A list of
tuples (indices, coefficients, rhs), with each tuple encoding
an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs` or
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
"""
new_constraints = []
for index, coefficient, rhs in constraints:
lower, upper = bounds[:, index]
s = upper - lower
new_constraints.append(
(index, s * coefficient, (rhs - torch.dot(coefficient, lower)).item())
)
return new_constraints
def get_polytope_samples(
n: int,
bounds: Tensor,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
seed: Optional[int] = None,
thinning: int = 32,
n_burnin: int = 10_000,
) -> Tensor:
r"""Sample from polytope defined by box bounds and (in)equality constraints.
This uses a hit-and-run Markov chain sampler.
TODO: make this method return the sampler object, to avoid doing burn-in
every time we draw samples.
Args:
n: The number of samples.
bounds: A `2 x d`-dim tensor containing the box bounds.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
seed: The random seed.
thinning: The amount of thinning.
n_burnin: The number of burn-in samples for the Markov chain sampler.
Returns:
A `n x d`-dim tensor of samples.
"""
# create tensors representing linear inequality constraints
# of the form Ax >= b.
if inequality_constraints:
# normalize_linear_constraints is called to solve this issue:
# https://github.com/pytorch/botorch/issues/1225
constraints = normalize_linear_constraints(bounds, inequality_constraints)
A, b = sparse_to_dense_constraints(
d=bounds.shape[-1],
constraints=constraints,
)
# Note the inequality constraints are of the form Ax >= b,
# but PolytopeSampler expects inequality constraints of the
# form Ax <= b, so we multiply by -1 below.
dense_inequality_constraints = -A, -b
else:
dense_inequality_constraints = None
if equality_constraints:
constraints = normalize_linear_constraints(bounds, equality_constraints)
dense_equality_constraints = sparse_to_dense_constraints(
d=bounds.shape[-1], constraints=constraints
)
else:
dense_equality_constraints = None
normalized_bounds = torch.zeros_like(bounds)
normalized_bounds[1, :] = 1.0
polytope_sampler = HitAndRunPolytopeSampler(
bounds=normalized_bounds,
inequality_constraints=dense_inequality_constraints,
equality_constraints=dense_equality_constraints,
n_burnin=n_burnin,
)
samples = polytope_sampler.draw(n=n * thinning, seed=seed)[::thinning]
return bounds[0] + samples * (bounds[1] - bounds[0])
def sparse_to_dense_constraints(
d: int,
constraints: List[Tuple[Tensor, Tensor, float]],
) -> Tuple[Tensor, Tensor]:
r"""Convert parameter constraints from a sparse format into a dense format.
This method converts sparse triples of the form (indices, coefficients, rhs)
to constraints of the form Ax >= b or Ax = b.
Args:
d: The input dimension.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an (in)equality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs` or
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
Returns:
A two-element tuple containing:
- A: A `n_constraints x d`-dim tensor of coefficients.
- b: A `n_constraints x 1`-dim tensor of right hand sides.
"""
_t = constraints[0][1]
A = torch.zeros(len(constraints), d, dtype=_t.dtype, device=_t.device)
b = torch.zeros(len(constraints), 1, dtype=_t.dtype, device=_t.device)
for i, (indices, coefficients, rhs) in enumerate(constraints):
A[i, indices.long()] = coefficients
b[i] = rhs
return A, b
def optimize_posterior_samples(
paths: SamplePath,
bounds: Tensor,
candidates: Optional[Tensor] = None,
raw_samples: Optional[int] = 1024,
num_restarts: int = 20,
maximize: bool = True,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Cheaply maximizes posterior samples by random querying followed by vanilla
gradient descent on the best num_restarts points.
Args:
paths: Random Fourier Feature-based sample paths from the GP
bounds: The bounds on the search space.
candidates: A priori good candidates (typically previous design points)
which acts as extra initial guesses for the optimization routine.
raw_samples: The number of samples with which to query the samples initially.
num_restarts: The number of points selected for gradient-based optimization.
maximize: Boolean indicating whether to maimize or minimize
Returns:
A two-element tuple containing:
- X_opt: A `num_optima x [batch_size] x d`-dim tensor of optimal inputs x*.
- f_opt: A `num_optima x [batch_size] x 1`-dim tensor of optimal outputs f*.
"""
if maximize:
def path_func(x):
return paths(x)
else:
def path_func(x):
return -paths(x)
candidate_set = unnormalize(
SobolEngine(dimension=bounds.shape[1], scramble=True).draw(raw_samples), bounds
)
# queries all samples on all candidates - output shape
# raw_samples * num_optima * num_models
candidate_queries = path_func(candidate_set)
argtop_k = torch.topk(candidate_queries, num_restarts, dim=-1).indices
X_top_k = candidate_set[argtop_k, :]
# to avoid circular import, the import occurs here
from botorch.generation.gen import gen_candidates_torch
X_top_k, f_top_k = gen_candidates_torch(
X_top_k, path_func, lower_bounds=bounds[0], upper_bounds=bounds[1], **kwargs
)
f_opt, arg_opt = f_top_k.max(dim=-1, keepdim=True)
# For each sample (and possibly for every model in the batch of models), this
# retrieves the argmax. We flatten, pick out the indices and then reshape to
# the original batch shapes (so instead of pickig out the argmax of a
# (3, 7, num_restarts, D)) along the num_restarts dim, we pick it out of a
# (21 , num_restarts, D)
final_shape = candidate_queries.shape[:-1]
X_opt = X_top_k.reshape(final_shape.numel(), num_restarts, -1)[
torch.arange(final_shape.numel()), arg_opt.flatten()
].reshape(*final_shape, -1)
if not maximize:
f_opt = -f_opt
return X_opt, f_opt
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Helpers for handling input or outcome constraints.
"""
from __future__ import annotations
from functools import partial
from typing import Callable, List, Optional, Tuple
import torch
from torch import Tensor
def get_outcome_constraint_transforms(
outcome_constraints: Optional[Tuple[Tensor, Tensor]]
) -> Optional[List[Callable[[Tensor], Tensor]]]:
r"""Create outcome constraint callables from outcome constraint tensors.
Args:
outcome_constraints: A tuple of `(A, b)`. For `k` outcome constraints
and `m` outputs at `f(x)``, `A` is `k x m` and `b` is `k x 1` such
that `A f(x) <= b`.
Returns:
A list of callables, each mapping a Tensor of size `b x q x m` to a
tensor of size `b x q`, where `m` is the number of outputs of the model.
Negative values imply feasibility. The callables support broadcasting
(e.g. for calling on a tensor of shape `mc_samples x b x q x m`).
Example:
>>> # constrain `f(x)[0] <= 0`
>>> A = torch.tensor([[1., 0.]])
>>> b = torch.tensor([[0.]])
>>> outcome_constraints = get_outcome_constraint_transforms((A, b))
"""
if outcome_constraints is None:
return None
A, b = outcome_constraints
def _oc(a: Tensor, rhs: Tensor, Y: Tensor) -> Tensor:
r"""Evaluate constraints.
Note: einsum multiples Y by a and sums over the `m`-dimension. Einsum
is ~2x faster than using `(Y * a.view(1, 1, -1)).sum(dim-1)`.
Args:
a: `m`-dim tensor of weights for the outcomes
rhs: Singleton tensor containing the outcome constraint value
Y: `... x b x q x m` tensor of function values
Returns:
A `... x b x q`-dim tensor where negative values imply feasibility
"""
lhs = torch.einsum("...m, m", [Y, a])
return lhs - rhs
return [partial(_oc, a, rhs) for a, rhs in zip(A, b)]
def get_monotonicity_constraints(
d: int,
descending: bool = False,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
) -> Tuple[Tensor, Tensor]:
"""Returns a system of linear inequalities `(A, b)` that generically encodes order
constraints on the elements of a `d`-dimsensional space, i.e. `A @ x < b` implies
`x[i] < x[i + 1]` for a `d`-dimensional vector `x`.
Idea: Could encode `A` as sparse matrix, if it is supported well.
Args:
d: Dimensionality of the constraint space, i.e. number of monotonic parameters.
descending: If True, forces the elements of a vector to be monotonically de-
creasing and be monotonically increasing otherwise.
dtype: The dtype of the returned Tensors.
device: The device of the returned Tensors.
Returns:
A tuple of Tensors `(A, b)` representing the monotonicity constraint as a system
of linear inequalities `A @ x < b`. `A` is `(d - 1) x d`-dimensional and `b` is
`(d - 1) x 1`-dimensional.
"""
A = torch.zeros(d - 1, d, dtype=dtype, device=device)
idx = torch.arange(d - 1)
A[idx, idx] = 1
A[idx, idx + 1] = -1
b = torch.zeros(d - 1, 1, dtype=dtype, device=device)
if descending:
A = -A
return A, b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.