python_code
stringlengths
0
229k
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Core abstractions and generic optimizers.""" from __future__ import annotations import re from dataclasses import dataclass, replace from enum import auto, Enum from itertools import count from sys import maxsize from time import monotonic from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from botorch.optim.closures import NdarrayOptimizationClosure from botorch.optim.utils.numpy_utils import get_bounds_as_ndarray from botorch.optim.utils.timeout import minimize_with_timeout from numpy import asarray, float64 as np_float64, ndarray from torch import Tensor from torch.optim.adam import Adam from torch.optim.optimizer import Optimizer try: from torch.optim.lr_scheduler import LRScheduler except ImportError: # pragma: no cover from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # pragma: no cover _LBFGSB_MAXITER_MAXFUN_REGEX = re.compile( # regex for maxiter and maxfun messages "TOTAL NO. of (ITERATIONS REACHED LIMIT|f AND g EVALUATIONS EXCEEDS LIMIT)" ) class OptimizationStatus(int, Enum): RUNNING = auto() # incomplete SUCCESS = auto() # optimizer converged FAILURE = auto() # terminated abnormally STOPPED = auto() # stopped due to user provided criterion @dataclass class OptimizationResult: step: int fval: Union[float, int] status: OptimizationStatus runtime: Optional[float] = None message: Optional[str] = None def scipy_minimize( closure: Union[ Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]], NdarrayOptimizationClosure, ], parameters: Dict[str, Tensor], bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None, x0: Optional[ndarray] = None, method: str = "L-BFGS-B", options: Optional[Dict[str, Any]] = None, timeout_sec: Optional[float] = None, ) -> OptimizationResult: r"""Generic scipy.optimize.minimize-based optimization routine. Args: closure: Callable that returns a tensor and an iterable of gradient tensors or NdarrayOptimizationClosure instance. parameters: A dictionary of tensors to be optimized. bounds: A dictionary mapping parameter names to lower and upper bounds. callback: A callable taking `parameters` and an OptimizationResult as arguments. x0: An optional initialization vector passed to scipy.optimize.minimize. method: Solver type, passed along to scipy.minimize. options: Dictionary of solver options, passed along to scipy.minimize. timeout_sec: Timeout in seconds to wait before aborting the optimization loop if not converged (will return the best found solution thus far). Returns: An OptimizationResult summarizing the final state of the run. """ start_time = monotonic() wrapped_closure = ( closure if isinstance(closure, NdarrayOptimizationClosure) else NdarrayOptimizationClosure(closure, parameters) ) if bounds is None: bounds_np = None else: bounds_np = get_bounds_as_ndarray(parameters, bounds) if callback is None: wrapped_callback = None else: call_counter = count(1) # callbacks are typically made at the end of each iter def wrapped_callback(x: ndarray): result = OptimizationResult( step=next(call_counter), fval=float(wrapped_closure(x)[0]), status=OptimizationStatus.RUNNING, runtime=monotonic() - start_time, ) return callback(parameters, result) # pyre-ignore [29] raw = minimize_with_timeout( wrapped_closure, wrapped_closure.state if x0 is None else x0.astype(np_float64, copy=False), jac=True, bounds=bounds_np, method=method, options=options, callback=wrapped_callback, timeout_sec=timeout_sec, ) # Post-processing and outcome handling wrapped_closure.state = asarray(raw.x) # set parameter state to optimal values msg = raw.message if isinstance(raw.message, str) else raw.message.decode("ascii") if raw.success: status = OptimizationStatus.SUCCESS else: status = ( # Check whether we stopped due to reaching maxfun or maxiter OptimizationStatus.STOPPED if _LBFGSB_MAXITER_MAXFUN_REGEX.search(msg) or "Optimization timed out after" in msg else OptimizationStatus.FAILURE ) return OptimizationResult( fval=raw.fun, step=raw.nit, status=status, message=msg, runtime=monotonic() - start_time, ) def torch_minimize( closure: Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]], parameters: Dict[str, Tensor], bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None, optimizer: Union[Optimizer, Callable[[List[Tensor]], Optimizer]] = Adam, scheduler: Optional[Union[LRScheduler, Callable[[Optimizer], LRScheduler]]] = None, step_limit: Optional[int] = None, timeout_sec: Optional[float] = None, stopping_criterion: Optional[Callable[[Tensor], bool]] = None, ) -> OptimizationResult: r"""Generic torch.optim-based optimization routine. Args: closure: Callable that returns a tensor and an iterable of gradient tensors. Responsible for setting relevant parameters' `grad` attributes. parameters: A dictionary of tensors to be optimized. bounds: An optional dictionary of bounds for elements of `parameters`. callback: A callable taking `parameters` and an OptimizationResult as arguments. optimizer: A `torch.optim.Optimizer` instance or a factory that takes a list of parameters and returns an `Optimizer` instance. scheduler: A `torch.optim.lr_scheduler._LRScheduler` instance or a factory that takes a `Optimizer` instance and returns a `_LRSchedule` instance. step_limit: Integer specifying a maximum number of optimization steps. One of `step_limit`, `stopping_criterion`, or `timeout_sec` must be passed. timeout_sec: Timeout in seconds before terminating the optimization loop. One of `step_limit`, `stopping_criterion`, or `timeout_sec` must be passed. stopping_criterion: A StoppingCriterion for the optimization loop. Returns: An OptimizationResult summarizing the final state of the run. """ result: OptimizationResult start_time = monotonic() if step_limit is None: if stopping_criterion is None and timeout_sec is None: raise RuntimeError("No termination conditions were given.") step_limit = maxsize if not isinstance(optimizer, Optimizer): optimizer = optimizer(list(parameters.values())) if not (scheduler is None or isinstance(scheduler, LRScheduler)): scheduler = scheduler(optimizer) _bounds = ( {} if bounds is None else {name: limits for name, limits in bounds.items() if name in parameters} ) for step in range(1, step_limit + 1): fval, _ = closure() runtime = monotonic() - start_time result = OptimizationResult( step=step, fval=fval.detach().cpu().item(), status=OptimizationStatus.RUNNING, runtime=runtime, ) # TODO: Update stopping_criterion API to return a message. if stopping_criterion and stopping_criterion(fval): result.status = OptimizationStatus.STOPPED result.message = "`torch_minimize` stopped due to `stopping_criterion`." if timeout_sec is not None and runtime >= timeout_sec: result.status = OptimizationStatus.STOPPED result.message = ( f"`torch_minimize` stopped due to timeout after {runtime} seconds." ) if callback: callback(parameters, result) if result.status != OptimizationStatus.RUNNING: break optimizer.step() for name, (lower, upper) in _bounds.items(): parameters[name].data = parameters[name].clamp(min=lower, max=upper) if scheduler: scheduler.step() if result.status != OptimizationStatus.RUNNING: return replace(result, runtime=monotonic() - start_time) # Account for final parameter update when stopping due to step_limit return OptimizationResult( step=step, fval=closure()[0].detach().cpu().item(), status=OptimizationStatus.STOPPED, runtime=monotonic() - start_time, message=f"`torch_minimize` stopped after reaching step_limit={step_limit}.", )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" A converter that simplifies using numpy-based optimizers with generic torch `nn.Module` classes. This enables using a `scipy.optim.minimize` optimizer for optimizing module parameters. """ from __future__ import annotations from collections import OrderedDict from math import inf from numbers import Number from typing import Dict, List, Optional, Set, Tuple from warnings import warn import numpy as np import torch from botorch.optim.utils import ( _get_extra_mll_args, _handle_numerical_errors, get_name_filter, get_parameters_and_bounds, TorchAttr, ) from gpytorch.mlls import MarginalLogLikelihood from torch.nn import Module def module_to_array( module: Module, bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, exclude: Optional[Set[str]] = None, ) -> Tuple[np.ndarray, Dict[str, TorchAttr], Optional[np.ndarray]]: r"""Extract named parameters from a module into a numpy array. Only extracts parameters with requires_grad, since it is meant for optimizing. Args: module: A module with parameters. May specify parameter constraints in a `named_parameters_and_constraints` method. bounds: A dictionary mapping parameter names t lower and upper bounds. of lower and upper bounds. Bounds specified here take precedence over bounds on the same parameters specified in the constraints registered with the module. exclude: A list of parameter names that are to be excluded from extraction. Returns: 3-element tuple containing - The parameter values as a numpy array. - An ordered dictionary with the name and tensor attributes of each parameter. - A `2 x n_params` numpy array with lower and upper bounds if at least one constraint is finite, and None otherwise. Example: >>> mll = ExactMarginalLogLikelihood(model.likelihood, model) >>> parameter_array, property_dict, bounds_out = module_to_array(mll) """ warn( "`module_to_array` is marked for deprecation, consider using " "`get_parameters_and_bounds`, `get_parameters_as_ndarray_1d`, or " "`get_bounds_as_ndarray` instead.", DeprecationWarning, ) param_dict, bounds_dict = get_parameters_and_bounds( module=module, name_filter=None if exclude is None else get_name_filter(exclude), requires_grad=True, ) if bounds is not None: bounds_dict.update(bounds) # Record tensor metadata and read parameter values to the tape param_tape: List[Number] = [] property_dict = OrderedDict() with torch.no_grad(): for name, param in param_dict.items(): property_dict[name] = TorchAttr(param.shape, param.dtype, param.device) param_tape.extend(param.view(-1).cpu().double().tolist()) # Extract lower and upper bounds start = 0 bounds_np = None params_np = np.asarray(param_tape) for name, param in param_dict.items(): numel = param.numel() if name in bounds_dict: for row, bound in enumerate(bounds_dict[name]): if bound is None: continue if torch.is_tensor(bound): if (bound == (2 * row - 1) * inf).all(): continue bound = bound.detach().cpu() elif bound == (2 * row - 1) * inf: continue if bounds_np is None: bounds_np = np.full((2, len(params_np)), ((-inf,), (inf,))) bounds_np[row, start : start + numel] = bound start += numel return params_np, property_dict, bounds_np def set_params_with_array( module: Module, x: np.ndarray, property_dict: Dict[str, TorchAttr] ) -> Module: r"""Set module parameters with values from numpy array. Args: module: Module with parameters to be set x: Numpy array with parameter values property_dict: Dictionary of parameter names and torch attributes as returned by module_to_array. Returns: Module: module with parameters updated in-place. Example: >>> mll = ExactMarginalLogLikelihood(model.likelihood, model) >>> parameter_array, property_dict, bounds_out = module_to_array(mll) >>> parameter_array += 0.1 # perturb parameters (for example only) >>> mll = set_params_with_array(mll, parameter_array, property_dict) """ warn( "`_set_params_with_array` is marked for deprecation, consider using " "`set_parameters_from_ndarray_1d` instead.", DeprecationWarning, ) param_dict = OrderedDict(module.named_parameters()) start_idx = 0 for p_name, attrs in property_dict.items(): # Construct the new tensor if len(attrs.shape) == 0: # deal with scalar tensors end_idx = start_idx + 1 new_data = torch.tensor( x[start_idx], dtype=attrs.dtype, device=attrs.device ) else: end_idx = start_idx + np.prod(attrs.shape) new_data = torch.tensor( x[start_idx:end_idx], dtype=attrs.dtype, device=attrs.device ).view(*attrs.shape) start_idx = end_idx # Update corresponding parameter in-place. Disable autograd to update. param_dict[p_name].requires_grad_(False) param_dict[p_name].copy_(new_data) param_dict[p_name].requires_grad_(True) return module def _scipy_objective_and_grad( x: np.ndarray, mll: MarginalLogLikelihood, property_dict: Dict[str, TorchAttr] ) -> Tuple[float, np.ndarray]: r"""Get objective and gradient in format that scipy expects. Args: x: The (flattened) input parameters. mll: The MarginalLogLikelihood module to evaluate. property_dict: The property dictionary required to "unflatten" the input parameter vector, as generated by `module_to_array`. Returns: 2-element tuple containing - The objective value. - The gradient of the objective. """ warn("`_scipy_objective_and_grad` is marked for deprecation.", DeprecationWarning) mll = set_params_with_array(mll, x, property_dict) train_inputs, train_targets = mll.model.train_inputs, mll.model.train_targets mll.zero_grad() try: # catch linear algebra errors in gpytorch output = mll.model(*train_inputs) args = [output, train_targets] + _get_extra_mll_args(mll) loss = -mll(*args).sum() except RuntimeError as e: return _handle_numerical_errors(error=e, x=x) loss.backward() i = 0 param_dict = OrderedDict(mll.named_parameters()) grad = np.zeros(sum([tattr.shape.numel() for tattr in property_dict.values()])) for p_name in property_dict: t = param_dict[p_name] size = t.numel() if t.requires_grad and t.grad is not None: grad[i : i + size] = t.grad.detach().view(-1).cpu().double().clone().numpy() i += size mll.zero_grad() return loss.item(), grad
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from abc import ABC, abstractmethod import torch from torch import Tensor class StoppingCriterion(ABC): r"""Base class for evaluating optimization convergence. Stopping criteria are implemented as a objects rather than a function, so that they can keep track of past function values between optimization steps. :meta private: """ @abstractmethod def evaluate(self, fvals: Tensor) -> bool: r"""Evaluate the stopping criterion. Args: fvals: tensor containing function values for the current iteration. If `fvals` contains more than one element, then the stopping criterion is evaluated element-wise and True is returned if the stopping criterion is true for all elements. Returns: Stopping indicator (if True, stop the optimziation). """ pass # pragma: no cover def __call__(self, fvals: Tensor) -> bool: return self.evaluate(fvals) class ExpMAStoppingCriterion(StoppingCriterion): r"""Exponential moving average stopping criterion. Computes an exponentially weighted moving average over window length `n_window` and checks whether the relative decrease in this moving average between steps is less than a provided tolerance level. That is, in iteration `i`, it computes v[i,j] := fvals[i - n_window + j] * w[j] for all `j = 0, ..., n_window`, where `w[j] = exp(-eta * (1 - j / n_window))`. Letting `ma[i] := sum_j(v[i,j])`, the criterion evaluates to `True` whenever (ma[i-1] - ma[i]) / abs(ma[i-1]) < rel_tol (if minimize=True) (ma[i] - ma[i-1]) / abs(ma[i-1]) < rel_tol (if minimize=False) """ def __init__( self, maxiter: int = 10000, minimize: bool = True, n_window: int = 10, eta: float = 1.0, rel_tol: float = 1e-5, ) -> None: r"""Exponential moving average stopping criterion. Args: maxiter: Maximum number of iterations. minimize: If True, assume minimization. n_window: The size of the exponential moving average window. eta: The exponential decay factor in the weights. rel_tol: Relative tolerance for termination. """ self.maxiter = maxiter self.minimize = minimize self.n_window = n_window self.rel_tol = rel_tol self.iter = 0 weights = torch.exp(torch.linspace(-eta, 0, self.n_window)) self.weights = weights / weights.sum() self._prev_fvals = None def evaluate(self, fvals: Tensor) -> bool: r"""Evaluate the stopping criterion. Args: fvals: tensor containing function values for the current iteration. If `fvals` contains more than one element, then the stopping criterion is evaluated element-wise and True is returned if the stopping criterion is true for all elements. TODO: add support for utilizing gradient information Returns: Stopping indicator (if True, stop the optimziation). """ self.iter += 1 if self.iter == self.maxiter: return True if self._prev_fvals is None: self._prev_fvals = fvals.unsqueeze(0) else: self._prev_fvals = torch.cat( [self._prev_fvals[-self.n_window :], fvals.unsqueeze(0)] ) if self._prev_fvals.size(0) < self.n_window + 1: return False weights = self.weights weights = weights.to(fvals) if self._prev_fvals.ndim > 1: weights = weights.unsqueeze(-1) # TODO: Update the exp moving average efficiently prev_ma = (self._prev_fvals[:-1] * weights).sum(dim=0) ma = (self._prev_fvals[1:] * weights).sum(dim=0) # TODO: Handle approx. zero losses (normalize by min/max loss range) rel_delta = (prev_ma - ma) / prev_ma.abs() if not self.minimize: rel_delta = -rel_delta if torch.max(rel_delta) < self.rel_tol: return True return False
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" References .. [Regis] R. G. Regis, C. A. Shoemaker. Combining radial basis function surrogates and dynamic coordinate search in high-dimensional expensive black-box optimization, Engineering Optimization, 2013. """ from __future__ import annotations import warnings from math import ceil from typing import Callable, Dict, List, Optional, Tuple, Union import torch from botorch import settings from botorch.acquisition import analytic, monte_carlo, multi_objective from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.knowledge_gradient import ( _get_value_function, qKnowledgeGradient, ) from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError from botorch.exceptions.warnings import ( BadInitialCandidatesWarning, BotorchWarning, SamplingWarning, ) from botorch.models.model import Model from botorch.optim.utils import fix_features, get_X_baseline from botorch.utils.multi_objective.pareto import is_non_dominated from botorch.utils.sampling import ( batched_multinomial, draw_sobol_samples, get_polytope_samples, manual_seed, ) from botorch.utils.transforms import normalize, standardize, unnormalize from torch import Tensor from torch.distributions import Normal from torch.quasirandom import SobolEngine TGenInitialConditions = Callable[ [ # reasoning behind this annotation: contravariance qKnowledgeGradient, Tensor, int, int, int, Optional[Dict[int, float]], Optional[Dict[str, Union[bool, float, int]]], Optional[List[Tuple[Tensor, Tensor, float]]], Optional[List[Tuple[Tensor, Tensor, float]]], ], Optional[Tensor], ] def transform_constraints( constraints: Union[List[Tuple[Tensor, Tensor, float]], None], q: int, d: int ) -> List[Tuple[Tensor, Tensor, float]]: r"""Transform constraints to sample from a d*q-dimensional space instead of a d-dimensional state. This function assumes that constraints are the same for each input batch, and broadcasts the constraints accordingly to the input batch shape. Args: constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an (in-)equality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) (>)= rhs`. If `indices` is a 2-d Tensor, this supports specifying constraints across the points in the `q`-batch (inter-point constraints). If `None`, this function is a nullop and simply returns `None`. q: Size of the `q`-batch. d: Dimensionality of the problem. Returns: List[Tuple[Tensor, Tensor, float]]: List of transformed constraints. """ if constraints is None: return None transformed = [] for constraint in constraints: if len(constraint[0].shape) == 1: transformed += transform_intra_point_constraint(constraint, d, q) else: transformed.append(transform_inter_point_constraint(constraint, d)) return transformed def transform_intra_point_constraint( constraint: Tuple[Tensor, Tensor, float], d: int, q: int ) -> List[Tuple[Tensor, Tensor, float]]: r"""Transforms an intra-point/pointwise constraint from d-dimensional space to a d*q-dimesional space. Args: constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an (in-)equality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) (>)= rhs`. Here `indices` must be one-dimensional, and the constraint is applied to all points within the `q`-batch. d: Dimensionality of the problem. Raises: ValueError: If indices in the constraints are larger than the dimensionality d of the problem. Returns: List[Tuple[Tensor, Tensor, float]]: List of transformed constraints. """ indices, coefficients, rhs = constraint if indices.max() >= d: raise ValueError( f"Constraint indices cannot exceed the problem dimension {d=}." ) return [ ( torch.tensor( [i * d + j for j in indices], dtype=torch.int64, device=indices.device ), coefficients, rhs, ) for i in range(q) ] def transform_inter_point_constraint( constraint: Tuple[Tensor, Tensor, float], d: int ) -> Tuple[Tensor, Tensor, float]: r"""Transforms an inter-point constraint from d-dimensional space to a d*q dimesional space. Args: constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an (in-)equality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) (>)= rhs`. `indices` must be a 2-d Tensor, where in each row `indices[i] = (k_i, l_i)` the first index `k_i` corresponds to the `k_i`-th element of the `q`-batch and the second index `l_i` corresponds to the `l_i`-th feature of that element. Raises: ValueError: If indices in the constraints are larger than the dimensionality d of the problem. Returns: List[Tuple[Tensor, Tensor, float]]: Transformed constraint. """ indices, coefficients, rhs = constraint if indices[:, 1].max() >= d: raise ValueError( f"Constraint indices cannot exceed the problem dimension {d=}." ) return ( torch.tensor( [r[0] * d + r[1] for r in indices], dtype=torch.int64, device=indices.device ), coefficients, rhs, ) def sample_q_batches_from_polytope( n: int, q: int, bounds: Tensor, n_burnin: int, thinning: int, seed: int, inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, ) -> Tensor: r"""Samples `n` q-baches from a polytope of dimension `d`. Args: n: Number of q-batches to sample. q: Number of samples per q-batch bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`. n_burnin: The number of burn-in samples for the Markov chain sampler. thinning: The amount of thinning (number of steps to take between returning samples). seed: The random seed. inequality_constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. equality_constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) = rhs`. Returns: A `n x q x d`-dim tensor of samples. """ # check if inter-point constraints are present inter_point = any( len(indices.shape) > 1 for constraints in (inequality_constraints or [], equality_constraints or []) for indices, _, _ in constraints ) if inter_point: samples = get_polytope_samples( n=n, bounds=torch.hstack([bounds for _ in range(q)]), inequality_constraints=transform_constraints( constraints=inequality_constraints, q=q, d=bounds.shape[1] ), equality_constraints=transform_constraints( constraints=equality_constraints, q=q, d=bounds.shape[1] ), seed=seed, n_burnin=n_burnin, thinning=thinning * q, ) else: samples = get_polytope_samples( n=n * q, bounds=bounds, inequality_constraints=inequality_constraints, equality_constraints=equality_constraints, seed=seed, n_burnin=n_burnin, thinning=thinning, ) return samples.view(n, q, -1).cpu() def gen_batch_initial_conditions( acq_function: AcquisitionFunction, bounds: Tensor, q: int, num_restarts: int, raw_samples: int, fixed_features: Optional[Dict[int, float]] = None, options: Optional[Dict[str, Union[bool, float, int]]] = None, inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, generator: Optional[Callable[[int, int, int], Tensor]] = None, ) -> Tensor: r"""Generate a batch of initial conditions for random-restart optimziation. TODO: Support t-batches of initial conditions. Args: acq_function: The acquisition function to be optimized. bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`. q: The number of candidates to consider. num_restarts: The number of starting points for multistart acquisition function optimization. raw_samples: The number of raw samples to consider in the initialization heuristic. Note: if `sample_around_best` is True (the default is False), then `2 * raw_samples` samples are used. fixed_features: A map `{feature_index: value}` for features that should be fixed to a particular value during generation. options: Options for initial condition generation. For valid options see `initialize_q_batch` and `initialize_q_batch_nonneg`. If `options` contains a `nonnegative=True` entry, then `acq_function` is assumed to be non-negative (useful when using custom acquisition functions). In addition, an "init_batch_limit" option can be passed to specify the batch limit for the initialization. This is useful for avoiding memory limits when computing the batch posterior over raw samples. inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. equality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) = rhs`. generator: Callable for generating samples that are then further processed. It receives `n`, `q` and `seed` as arguments and returns a tensor of shape `n x q x d`. Returns: A `num_restarts x q x d` tensor of initial conditions. Example: >>> qEI = qExpectedImprovement(model, best_f=0.2) >>> bounds = torch.tensor([[0.], [1.]]) >>> Xinit = gen_batch_initial_conditions( >>> qEI, bounds, q=3, num_restarts=25, raw_samples=500 >>> ) """ if bounds.isinf().any(): raise NotImplementedError( "Currently only finite values in `bounds` are supported " "for generating initial conditions for optimization." ) options = options or {} sample_around_best = options.get("sample_around_best", False) if sample_around_best and equality_constraints: raise UnsupportedError( "Option 'sample_around_best' is not supported when equality" "constraints are present." ) if sample_around_best and generator: raise UnsupportedError( "Option 'sample_around_best' is not supported when custom " "generator is be used." ) seed: Optional[int] = options.get("seed") batch_limit: Optional[int] = options.get( "init_batch_limit", options.get("batch_limit") ) factor, max_factor = 1, 5 init_kwargs = {} device = bounds.device bounds_cpu = bounds.cpu() if "eta" in options: init_kwargs["eta"] = options.get("eta") if options.get("nonnegative") or is_nonnegative(acq_function): init_func = initialize_q_batch_nonneg if "alpha" in options: init_kwargs["alpha"] = options.get("alpha") else: init_func = initialize_q_batch q = 1 if q is None else q # the dimension the samples are drawn from effective_dim = bounds.shape[-1] * q if effective_dim > SobolEngine.MAXDIM and settings.debug.on(): warnings.warn( f"Sample dimension q*d={effective_dim} exceeding Sobol max dimension " f"({SobolEngine.MAXDIM}). Using iid samples instead.", SamplingWarning, ) while factor < max_factor: with warnings.catch_warnings(record=True) as ws: n = raw_samples * factor if generator is not None: X_rnd = generator(n, q, seed) elif inequality_constraints is None and equality_constraints is None: if effective_dim <= SobolEngine.MAXDIM: X_rnd = draw_sobol_samples(bounds=bounds_cpu, n=n, q=q, seed=seed) else: with manual_seed(seed): # load on cpu X_rnd_nlzd = torch.rand( n, q, bounds_cpu.shape[-1], dtype=bounds.dtype ) X_rnd = bounds_cpu[0] + (bounds_cpu[1] - bounds_cpu[0]) * X_rnd_nlzd else: X_rnd = sample_q_batches_from_polytope( n=n, q=q, bounds=bounds, n_burnin=options.get("n_burnin", 10000), thinning=options.get("thinning", 32), seed=seed, equality_constraints=equality_constraints, inequality_constraints=inequality_constraints, ) # sample points around best if sample_around_best: X_best_rnd = sample_points_around_best( acq_function=acq_function, n_discrete_points=n * q, sigma=options.get("sample_around_best_sigma", 1e-3), bounds=bounds, subset_sigma=options.get("sample_around_best_subset_sigma", 1e-1), prob_perturb=options.get("sample_around_best_prob_perturb"), ) if X_best_rnd is not None: X_rnd = torch.cat( [ X_rnd, X_best_rnd.view(n, q, bounds.shape[-1]).cpu(), ], dim=0, ) X_rnd = fix_features(X_rnd, fixed_features=fixed_features) with torch.no_grad(): if batch_limit is None: batch_limit = X_rnd.shape[0] Y_rnd_list = [] start_idx = 0 while start_idx < X_rnd.shape[0]: end_idx = min(start_idx + batch_limit, X_rnd.shape[0]) Y_rnd_curr = acq_function( X_rnd[start_idx:end_idx].to(device=device) ).cpu() Y_rnd_list.append(Y_rnd_curr) start_idx += batch_limit Y_rnd = torch.cat(Y_rnd_list) batch_initial_conditions = init_func( X=X_rnd, Y=Y_rnd, n=num_restarts, **init_kwargs ).to(device=device) if not any(issubclass(w.category, BadInitialCandidatesWarning) for w in ws): return batch_initial_conditions if factor < max_factor: factor += 1 if seed is not None: seed += 1 # make sure to sample different X_rnd warnings.warn( "Unable to find non-zero acquisition function values - initial conditions " "are being selected randomly.", BadInitialCandidatesWarning, ) return batch_initial_conditions def gen_one_shot_kg_initial_conditions( acq_function: qKnowledgeGradient, bounds: Tensor, q: int, num_restarts: int, raw_samples: int, fixed_features: Optional[Dict[int, float]] = None, options: Optional[Dict[str, Union[bool, float, int]]] = None, inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, ) -> Optional[Tensor]: r"""Generate a batch of smart initializations for qKnowledgeGradient. This function generates initial conditions for optimizing one-shot KG using the maximizer of the posterior objective. Intutively, the maximizer of the fantasized posterior will often be close to a maximizer of the current posterior. This function uses that fact to generate the initital conditions for the fantasy points. Specifically, a fraction of `1 - frac_random` (see options) is generated by sampling from the set of maximizers of the posterior objective (obtained via random restart optimization) according to a softmax transformation of their respective values. This means that this initialization strategy internally solves an acquisition function maximization problem. The remaining `frac_random` fantasy points as well as all `q` candidate points are chosen according to the standard initialization strategy in `gen_batch_initial_conditions`. Args: acq_function: The qKnowledgeGradient instance to be optimized. bounds: A `2 x d` tensor of lower and upper bounds for each column of task features. q: The number of candidates to consider. num_restarts: The number of starting points for multistart acquisition function optimization. raw_samples: The number of raw samples to consider in the initialization heuristic. fixed_features: A map `{feature_index: value}` for features that should be fixed to a particular value during generation. options: Options for initial condition generation. These contain all settings for the standard heuristic initialization from `gen_batch_initial_conditions`. In addition, they contain `frac_random` (the fraction of fully random fantasy points), `num_inner_restarts` and `raw_inner_samples` (the number of random restarts and raw samples for solving the posterior objective maximization problem, respectively) and `eta` (temperature parameter for sampling heuristic from posterior objective maximizers). inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. equality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) = rhs`. Returns: A `num_restarts x q' x d` tensor that can be used as initial conditions for `optimize_acqf()`. Here `q' = q + num_fantasies` is the total number of points (candidate points plus fantasy points). Example: >>> qKG = qKnowledgeGradient(model, num_fantasies=64) >>> bounds = torch.tensor([[0., 0.], [1., 1.]]) >>> Xinit = gen_one_shot_kg_initial_conditions( >>> qKG, bounds, q=3, num_restarts=10, raw_samples=512, >>> options={"frac_random": 0.25}, >>> ) """ options = options or {} frac_random: float = options.get("frac_random", 0.1) if not 0 < frac_random < 1: raise ValueError( f"frac_random must take on values in (0,1). Value: {frac_random}" ) q_aug = acq_function.get_augmented_q_batch_size(q=q) # TODO: Avoid unnecessary computation by not generating all candidates ics = gen_batch_initial_conditions( acq_function=acq_function, bounds=bounds, q=q_aug, num_restarts=num_restarts, raw_samples=raw_samples, fixed_features=fixed_features, options=options, inequality_constraints=inequality_constraints, equality_constraints=equality_constraints, ) # compute maximizer of the value function value_function = _get_value_function( model=acq_function.model, objective=acq_function.objective, posterior_transform=acq_function.posterior_transform, sampler=acq_function.inner_sampler, project=getattr(acq_function, "project", None), ) from botorch.optim.optimize import optimize_acqf fantasy_cands, fantasy_vals = optimize_acqf( acq_function=value_function, bounds=bounds, q=1, num_restarts=options.get("num_inner_restarts", 20), raw_samples=options.get("raw_inner_samples", 1024), fixed_features=fixed_features, return_best_only=False, inequality_constraints=inequality_constraints, equality_constraints=equality_constraints, ) # sampling from the optimizers n_value = int((1 - frac_random) * (q_aug - q)) # number of non-random ICs eta = options.get("eta", 2.0) weights = torch.exp(eta * standardize(fantasy_vals)) idx = torch.multinomial(weights, num_restarts * n_value, replacement=True) # set the respective initial conditions to the sampled optimizers ics[..., -n_value:, :] = fantasy_cands[idx, 0].view(num_restarts, n_value, -1) return ics def gen_value_function_initial_conditions( acq_function: AcquisitionFunction, bounds: Tensor, num_restarts: int, raw_samples: int, current_model: Model, fixed_features: Optional[Dict[int, float]] = None, options: Optional[Dict[str, Union[bool, float, int]]] = None, ) -> Tensor: r"""Generate a batch of smart initializations for optimizing the value function of qKnowledgeGradient. This function generates initial conditions for optimizing the inner problem of KG, i.e. its value function, using the maximizer of the posterior objective. Intutively, the maximizer of the fantasized posterior will often be close to a maximizer of the current posterior. This function uses that fact to generate the initital conditions for the fantasy points. Specifically, a fraction of `1 - frac_random` (see options) of raw samples is generated by sampling from the set of maximizers of the posterior objective (obtained via random restart optimization) according to a softmax transformation of their respective values. This means that this initialization strategy internally solves an acquisition function maximization problem. The remaining raw samples are generated using `draw_sobol_samples`. All raw samples are then evaluated, and the initial conditions are selected according to the standard initialization strategy in 'initialize_q_batch' individually for each inner problem. Args: acq_function: The value function instance to be optimized. bounds: A `2 x d` tensor of lower and upper bounds for each column of task features. num_restarts: The number of starting points for multistart acquisition function optimization. raw_samples: The number of raw samples to consider in the initialization heuristic. current_model: The model of the KG acquisition function that was used to generate the fantasy model of the value function. fixed_features: A map `{feature_index: value}` for features that should be fixed to a particular value during generation. options: Options for initial condition generation. These contain all settings for the standard heuristic initialization from `gen_batch_initial_conditions`. In addition, they contain `frac_random` (the fraction of fully random fantasy points), `num_inner_restarts` and `raw_inner_samples` (the number of random restarts and raw samples for solving the posterior objective maximization problem, respectively) and `eta` (temperature parameter for sampling heuristic from posterior objective maximizers). Returns: A `num_restarts x batch_shape x q x d` tensor that can be used as initial conditions for `optimize_acqf()`. Here `batch_shape` is the batch shape of value function model. Example: >>> fant_X = torch.rand(5, 1, 2) >>> fantasy_model = model.fantasize(fant_X, SobolQMCNormalSampler(16)) >>> value_function = PosteriorMean(fantasy_model) >>> bounds = torch.tensor([[0., 0.], [1., 1.]]) >>> Xinit = gen_value_function_initial_conditions( >>> value_function, bounds, num_restarts=10, raw_samples=512, >>> options={"frac_random": 0.25}, >>> ) """ options = options or {} seed: Optional[int] = options.get("seed") frac_random: float = options.get("frac_random", 0.6) if not 0 < frac_random < 1: raise ValueError( f"frac_random must take on values in (0,1). Value: {frac_random}" ) # compute maximizer of the current value function value_function = _get_value_function( model=current_model, objective=getattr(acq_function, "objective", None), posterior_transform=acq_function.posterior_transform, sampler=getattr(acq_function, "sampler", None), project=getattr(acq_function, "project", None), ) from botorch.optim.optimize import optimize_acqf fantasy_cands, fantasy_vals = optimize_acqf( acq_function=value_function, bounds=bounds, q=1, num_restarts=options.get("num_inner_restarts", 20), raw_samples=options.get("raw_inner_samples", 1024), fixed_features=fixed_features, return_best_only=False, options={ k: v for k, v in options.items() if k not in ("frac_random", "num_inner_restarts", "raw_inner_samples", "eta") }, ) batch_shape = acq_function.model.batch_shape # sampling from the optimizers n_value = int((1 - frac_random) * raw_samples) # number of non-random ICs if n_value > 0: eta = options.get("eta", 2.0) weights = torch.exp(eta * standardize(fantasy_vals)) idx = batched_multinomial( weights=weights.expand(*batch_shape, -1), num_samples=n_value, replacement=True, ).permute(-1, *range(len(batch_shape))) resampled = fantasy_cands[idx] else: resampled = torch.empty( 0, *batch_shape, 1, bounds.shape[-1], dtype=fantasy_cands.dtype, device=fantasy_cands.device, ) # add qMC samples randomized = draw_sobol_samples( bounds=bounds, n=raw_samples - n_value, q=1, batch_shape=batch_shape, seed=seed ).to(resampled) # full set of raw samples X_rnd = torch.cat([resampled, randomized], dim=0) X_rnd = fix_features(X_rnd, fixed_features=fixed_features) # evaluate the raw samples with torch.no_grad(): Y_rnd = acq_function(X_rnd) # select the restart points using the heuristic return initialize_q_batch( X=X_rnd, Y=Y_rnd, n=num_restarts, eta=options.get("eta", 2.0) ) def initialize_q_batch(X: Tensor, Y: Tensor, n: int, eta: float = 1.0) -> Tensor: r"""Heuristic for selecting initial conditions for candidate generation. This heuristic selects points from `X` (without replacement) with probability proportional to `exp(eta * Z)`, where `Z = (Y - mean(Y)) / std(Y)` and `eta` is a temperature parameter. When using an acquisiton function that is non-negative and possibly zero over large areas of the feature space (e.g. qEI), you should use `initialize_q_batch_nonneg` instead. Args: X: A `b x batch_shape x q x d` tensor of `b` - `batch_shape` samples of `q`-batches from a d`-dim feature space. Typically, these are generated using qMC sampling. Y: A tensor of `b x batch_shape` outcomes associated with the samples. Typically, this is the value of the batch acquisition function to be maximized. n: The number of initial condition to be generated. Must be less than `b`. eta: Temperature parameter for weighting samples. Returns: A `n x batch_shape x q x d` tensor of `n` - `batch_shape` `q`-batch initial conditions, where each batch of `n x q x d` samples is selected independently. Example: >>> # To get `n=10` starting points of q-batch size `q=3` >>> # for model with `d=6`: >>> qUCB = qUpperConfidenceBound(model, beta=0.1) >>> Xrnd = torch.rand(500, 3, 6) >>> Xinit = initialize_q_batch(Xrnd, qUCB(Xrnd), 10) """ n_samples = X.shape[0] batch_shape = X.shape[1:-2] or torch.Size() if n > n_samples: raise RuntimeError( f"n ({n}) cannot be larger than the number of " f"provided samples ({n_samples})" ) elif n == n_samples: return X Ystd = Y.std(dim=0) if torch.any(Ystd == 0): warnings.warn( "All acquisition values for raw samples points are the same for " "at least one batch. Choosing initial conditions at random.", BadInitialCandidatesWarning, ) return X[torch.randperm(n=n_samples, device=X.device)][:n] max_val, max_idx = torch.max(Y, dim=0) Z = (Y - Y.mean(dim=0)) / Ystd etaZ = eta * Z weights = torch.exp(etaZ) while torch.isinf(weights).any(): etaZ *= 0.5 weights = torch.exp(etaZ) if batch_shape == torch.Size(): idcs = torch.multinomial(weights, n) else: idcs = batched_multinomial( weights=weights.permute(*range(1, len(batch_shape) + 1), 0), num_samples=n ).permute(-1, *range(len(batch_shape))) # make sure we get the maximum if max_idx not in idcs: idcs[-1] = max_idx if batch_shape == torch.Size(): return X[idcs] else: return X.gather( dim=0, index=idcs.view(*idcs.shape, 1, 1).expand(n, *X.shape[1:]) ) def initialize_q_batch_nonneg( X: Tensor, Y: Tensor, n: int, eta: float = 1.0, alpha: float = 1e-4 ) -> Tensor: r"""Heuristic for selecting initial conditions for non-neg. acquisition functions. This function is similar to `initialize_q_batch`, but designed specifically for acquisition functions that are non-negative and possibly zero over large areas of the feature space (e.g. qEI). All samples for which `Y < alpha * max(Y)` will be ignored (assuming that `Y` contains at least one positive value). Args: X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim. feature space. Typically, these are generated using qMC. Y: A tensor of `b` outcomes associated with the samples. Typically, this is the value of the batch acquisition function to be maximized. n: The number of initial condition to be generated. Must be less than `b`. eta: Temperature parameter for weighting samples. alpha: The threshold (as a fraction of the maximum observed value) under which to ignore samples. All input samples for which `Y < alpha * max(Y)` will be ignored. Returns: A `n x q x d` tensor of `n` `q`-batch initial conditions. Example: >>> # To get `n=10` starting points of q-batch size `q=3` >>> # for model with `d=6`: >>> qEI = qExpectedImprovement(model, best_f=0.2) >>> Xrnd = torch.rand(500, 3, 6) >>> Xinit = initialize_q_batch(Xrnd, qEI(Xrnd), 10) """ n_samples = X.shape[0] if n > n_samples: raise RuntimeError("n cannot be larger than the number of provided samples") elif n == n_samples: return X max_val, max_idx = torch.max(Y, dim=0) if torch.any(max_val <= 0): warnings.warn( "All acquisition values for raw sampled points are nonpositive, so " "initial conditions are being selected randomly.", BadInitialCandidatesWarning, ) return X[torch.randperm(n=n_samples, device=X.device)][:n] # make sure there are at least `n` points with positive acquisition values pos = Y > 0 num_pos = pos.sum().item() if num_pos < n: # select all positive points and then fill remaining quota with randomly # selected points remaining_indices = (~pos).nonzero(as_tuple=False).view(-1) rand_indices = torch.randperm(remaining_indices.shape[0], device=Y.device) sampled_remaining_indices = remaining_indices[rand_indices[: n - num_pos]] pos[sampled_remaining_indices] = 1 return X[pos] # select points within alpha of max_val, iteratively decreasing alpha by a # factor of 10 as necessary alpha_pos = Y >= alpha * max_val while alpha_pos.sum() < n: alpha = 0.1 * alpha alpha_pos = Y >= alpha * max_val alpha_pos_idcs = torch.arange(len(Y), device=Y.device)[alpha_pos] weights = torch.exp(eta * (Y[alpha_pos] / max_val - 1)) idcs = alpha_pos_idcs[torch.multinomial(weights, n)] if max_idx not in idcs: idcs[-1] = max_idx return X[idcs] def sample_points_around_best( acq_function: AcquisitionFunction, n_discrete_points: int, sigma: float, bounds: Tensor, best_pct: float = 5.0, subset_sigma: float = 1e-1, prob_perturb: Optional[float] = None, ) -> Optional[Tensor]: r"""Find best points and sample nearby points. Args: acq_function: The acquisition function. n_discrete_points: The number of points to sample. sigma: The standard deviation of the additive gaussian noise for perturbing the best points. bounds: A `2 x d`-dim tensor containing the bounds. best_pct: The percentage of best points to perturb. subset_sigma: The standard deviation of the additive gaussian noise for perturbing a subset of dimensions of the best points. prob_perturb: The probability of perturbing each dimension. Returns: An optional `n_discrete_points x d`-dim tensor containing the sampled points. This is None if no baseline points are found. """ X = get_X_baseline(acq_function=acq_function) if X is None: return with torch.no_grad(): try: posterior = acq_function.model.posterior(X) except AttributeError: warnings.warn( "Failed to sample around previous best points.", BotorchWarning, ) return mean = posterior.mean while mean.ndim > 2: # take average over batch dims mean = mean.mean(dim=0) try: f_pred = acq_function.objective(mean) # Some acquisition functions do not have an objective # and for some acquisition functions the objective is None except (AttributeError, TypeError): f_pred = mean if hasattr(acq_function, "maximize"): # make sure that the optimiztaion direction is set properly if not acq_function.maximize: f_pred = -f_pred try: # handle constraints for EHVI-based acquisition functions constraints = acq_function.constraints if constraints is not None: neg_violation = -torch.stack( [c(mean).clamp_min(0.0) for c in constraints], dim=-1 ).sum(dim=-1) feas = neg_violation == 0 if feas.any(): f_pred[~feas] = float("-inf") else: # set objective equal to negative violation f_pred = neg_violation except AttributeError: pass if f_pred.ndim == mean.ndim and f_pred.shape[-1] > 1: # multi-objective # find pareto set is_pareto = is_non_dominated(f_pred) best_X = X[is_pareto] else: if f_pred.shape[-1] == 1: f_pred = f_pred.squeeze(-1) n_best = max(1, round(X.shape[0] * best_pct / 100)) # the view() is to ensure that best_idcs is not a scalar tensor best_idcs = torch.topk(f_pred, n_best).indices.view(-1) best_X = X[best_idcs] use_perturbed_sampling = best_X.shape[-1] >= 20 or prob_perturb is not None n_trunc_normal_points = ( n_discrete_points // 2 if use_perturbed_sampling else n_discrete_points ) perturbed_X = sample_truncated_normal_perturbations( X=best_X, n_discrete_points=n_trunc_normal_points, sigma=sigma, bounds=bounds, ) if use_perturbed_sampling: perturbed_subset_dims_X = sample_perturbed_subset_dims( X=best_X, bounds=bounds, # ensure that we return n_discrete_points n_discrete_points=n_discrete_points - n_trunc_normal_points, sigma=sigma, prob_perturb=prob_perturb, ) perturbed_X = torch.cat([perturbed_X, perturbed_subset_dims_X], dim=0) # shuffle points perm = torch.randperm(perturbed_X.shape[0], device=X.device) perturbed_X = perturbed_X[perm] return perturbed_X def sample_truncated_normal_perturbations( X: Tensor, n_discrete_points: int, sigma: float, bounds: Tensor, qmc: bool = True, ) -> Tensor: r"""Sample points around `X`. Sample perturbed points around `X` such that the added perturbations are sampled from N(0, sigma^2 I) and truncated to be within [0,1]^d. Args: X: A `n x d`-dim tensor starting points. n_discrete_points: The number of points to sample. sigma: The standard deviation of the additive gaussian noise for perturbing the points. bounds: A `2 x d`-dim tensor containing the bounds. qmc: A boolean indicating whether to use qmc. Returns: A `n_discrete_points x d`-dim tensor containing the sampled points. """ X = normalize(X, bounds=bounds) d = X.shape[1] # sample points from N(X_center, sigma^2 I), truncated to be within # [0, 1]^d. if X.shape[0] > 1: rand_indices = torch.randint(X.shape[0], (n_discrete_points,), device=X.device) X = X[rand_indices] if qmc: std_bounds = torch.zeros(2, d, dtype=X.dtype, device=X.device) std_bounds[1] = 1 u = draw_sobol_samples(bounds=std_bounds, n=n_discrete_points, q=1).squeeze(1) else: u = torch.rand((n_discrete_points, d), dtype=X.dtype, device=X.device) # compute bounds to sample from a = -X b = 1 - X # compute z-score of bounds alpha = a / sigma beta = b / sigma normal = Normal(0, 1) cdf_alpha = normal.cdf(alpha) # use inverse transform perturbation = normal.icdf(cdf_alpha + u * (normal.cdf(beta) - cdf_alpha)) * sigma # add perturbation and clip points that are still outside perturbed_X = (X + perturbation).clamp(0.0, 1.0) return unnormalize(perturbed_X, bounds=bounds) def sample_perturbed_subset_dims( X: Tensor, bounds: Tensor, n_discrete_points: int, sigma: float = 1e-1, qmc: bool = True, prob_perturb: Optional[float] = None, ) -> Tensor: r"""Sample around `X` by perturbing a subset of the dimensions. By default, dimensions are perturbed with probability equal to `min(20 / d, 1)`. As shown in [Regis]_, perturbing a small number of dimensions can be beneificial. The perturbations are sampled from N(0, sigma^2 I) and truncated to be within [0,1]^d. Args: X: A `n x d`-dim tensor starting points. `X` must be normalized to be within `[0, 1]^d`. bounds: The bounds to sample perturbed values from n_discrete_points: The number of points to sample. sigma: The standard deviation of the additive gaussian noise for perturbing the points. qmc: A boolean indicating whether to use qmc. prob_perturb: The probability of perturbing each dimension. If omitted, defaults to `min(20 / d, 1)`. Returns: A `n_discrete_points x d`-dim tensor containing the sampled points. """ if bounds.ndim != 2: raise BotorchTensorDimensionError("bounds must be a `2 x d`-dim tensor.") elif X.ndim != 2: raise BotorchTensorDimensionError("X must be a `n x d`-dim tensor.") d = bounds.shape[-1] if prob_perturb is None: # Only perturb a subset of the features prob_perturb = min(20.0 / d, 1.0) if X.shape[0] == 1: X_cand = X.repeat(n_discrete_points, 1) else: rand_indices = torch.randint(X.shape[0], (n_discrete_points,), device=X.device) X_cand = X[rand_indices] pert = sample_truncated_normal_perturbations( X=X_cand, n_discrete_points=n_discrete_points, sigma=sigma, bounds=bounds, qmc=qmc, ) # find cases where we are not perturbing any dimensions mask = ( torch.rand( n_discrete_points, d, dtype=bounds.dtype, device=bounds.device, ) <= prob_perturb ) ind = (~mask).all(dim=-1).nonzero() # perturb `n_perturb` of the dimensions n_perturb = ceil(d * prob_perturb) perturb_mask = torch.zeros(d, dtype=mask.dtype, device=mask.device) perturb_mask[:n_perturb].fill_(1) # TODO: use batched `torch.randperm` when available: # https://github.com/pytorch/pytorch/issues/42502 for idx in ind: mask[idx] = perturb_mask[torch.randperm(d, device=bounds.device)] # Create candidate points X_cand[mask] = pert[mask] return X_cand def is_nonnegative(acq_function: AcquisitionFunction) -> bool: r"""Determine whether a given acquisition function is non-negative. Args: acq_function: The `AcquisitionFunction` instance. Returns: True if `acq_function` is non-negative, False if not, or if the behavior is unknown (for custom acquisition functions). Example: >>> qEI = qExpectedImprovement(model, best_f=0.1) >>> is_nonnegative(qEI) # returns True """ return isinstance( acq_function, ( analytic.ExpectedImprovement, analytic.ConstrainedExpectedImprovement, analytic.ProbabilityOfImprovement, analytic.NoisyExpectedImprovement, monte_carlo.qExpectedImprovement, monte_carlo.qNoisyExpectedImprovement, monte_carlo.qProbabilityOfImprovement, multi_objective.analytic.ExpectedHypervolumeImprovement, multi_objective.monte_carlo.qExpectedHypervolumeImprovement, multi_objective.monte_carlo.qNoisyExpectedHypervolumeImprovement, ), )
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Dict, Optional, Tuple, Union import torch from botorch.acquisition import AcquisitionFunction from botorch.optim.homotopy import Homotopy from botorch.optim.optimize import optimize_acqf from torch import Tensor def prune_candidates( candidates: Tensor, acq_values: Tensor, prune_tolerance: float ) -> Tensor: r"""Prune candidates based on their distance to other candidates. Args: candidates: An `n x d` tensor of candidates. acq_values: An `n` tensor of candidate values. prune_tolerance: The minimum distance to prune candidates. Returns: An `m x d` tensor of pruned candidates. """ if candidates.ndim != 2: raise ValueError("`candidates` must be of size `n x d`.") if acq_values.ndim != 1 or len(acq_values) != candidates.shape[0]: raise ValueError("`acq_values` must be of size `n`.") if prune_tolerance < 0: raise ValueError("`prune_tolerance` must be >= 0.") sorted_inds = acq_values.argsort(descending=True) candidates = candidates[sorted_inds] candidates_new = candidates[:1, :] for i in range(1, candidates.shape[0]): if ( torch.cdist(candidates[i : i + 1, :], candidates_new).min() > prune_tolerance ): candidates_new = torch.cat( [candidates_new, candidates[i : i + 1, :]], dim=-2 ) return candidates_new def optimize_acqf_homotopy( acq_function: AcquisitionFunction, bounds: Tensor, q: int, homotopy: Homotopy, num_restarts: int, raw_samples: Optional[int] = None, fixed_features: Optional[Dict[int, float]] = None, options: Optional[Dict[str, Union[bool, float, int, str]]] = None, final_options: Optional[Dict[str, Union[bool, float, int, str]]] = None, batch_initial_conditions: Optional[Tensor] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, prune_tolerance: float = 1e-4, ) -> Tuple[Tensor, Tensor]: r"""Generate a set of candidates via multi-start optimization. Args: acq_function: An AcquisitionFunction. bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`. q: The number of candidates. homotopy: Homotopy object that will make the necessary modifications to the problem when calling `step()`. num_restarts: The number of starting points for multistart acquisition function optimization. raw_samples: The number of samples for initialization. This is required if `batch_initial_conditions` is not specified. fixed_features: A map `{feature_index: value}` for features that should be fixed to a particular value during generation. options: Options for candidate generation. final_options: Options for candidate generation in the last homotopy step. batch_initial_conditions: A tensor to specify the initial conditions. Set this if you do not want to use default initialization strategy. post_processing_func: Post processing function (such as roundingor clamping) that is applied before choosing the final candidate. """ candidate_list, acq_value_list = [], [] if q > 1: base_X_pending = acq_function.X_pending for _ in range(q): candidates = batch_initial_conditions homotopy.restart() while not homotopy.should_stop: candidates, acq_values = optimize_acqf( q=1, acq_function=acq_function, bounds=bounds, num_restarts=num_restarts, batch_initial_conditions=candidates, raw_samples=raw_samples, fixed_features=fixed_features, return_best_only=False, options=options, ) homotopy.step() # Prune candidates candidates = prune_candidates( candidates=candidates.squeeze(1), acq_values=acq_values, prune_tolerance=prune_tolerance, ).unsqueeze(1) # Optimize one more time with the final options candidates, acq_values = optimize_acqf( q=1, acq_function=acq_function, bounds=bounds, num_restarts=num_restarts, batch_initial_conditions=candidates, return_best_only=False, options=final_options, ) # Post-process the candidates and grab the best candidate if post_processing_func is not None: candidates = post_processing_func(candidates) acq_values = acq_function(candidates) best = torch.argmax(acq_values.view(-1), dim=0) candidate, acq_value = candidates[best], acq_values[best] # Keep the new candidate and update the pending points candidate_list.append(candidate) acq_value_list.append(acq_value) selected_candidates = torch.cat(candidate_list, dim=-2) if q > 1: acq_function.set_X_pending( torch.cat([base_X_pending, selected_candidates], dim=-2) if base_X_pending is not None else selected_candidates ) if q > 1: # Reset acq_function to previous X_pending state acq_function.set_X_pending(base_X_pending) homotopy.reset() # Reset the homotopy parameters return selected_candidates, torch.stack(acq_value_list)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Utility functions for constrained optimization. """ from __future__ import annotations from functools import partial from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError from scipy.optimize import Bounds from torch import Tensor ScipyConstraintDict = Dict[ str, Union[str, Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]] ] NLC_TOL = -1e-6 def make_scipy_bounds( X: Tensor, lower_bounds: Optional[Union[float, Tensor]] = None, upper_bounds: Optional[Union[float, Tensor]] = None, ) -> Optional[Bounds]: r"""Creates a scipy Bounds object for optimziation Args: X: `... x d` tensor lower_bounds: Lower bounds on each column (last dimension) of `X`. If this is a single float, then all columns have the same bound. upper_bounds: Lower bounds on each column (last dimension) of `X`. If this is a single float, then all columns have the same bound. Returns: A scipy `Bounds` object if either lower_bounds or upper_bounds is not None, and None otherwise. Example: >>> X = torch.rand(5, 2) >>> scipy_bounds = make_scipy_bounds(X, 0.1, 0.8) """ if lower_bounds is None and upper_bounds is None: return None def _expand(bounds: Union[float, Tensor], X: Tensor, lower: bool) -> Tensor: if bounds is None: ebounds = torch.full_like(X, float("-inf" if lower else "inf")) else: if not torch.is_tensor(bounds): bounds = torch.tensor(bounds) ebounds = bounds.expand_as(X) return _arrayify(ebounds).flatten() lb = _expand(bounds=lower_bounds, X=X, lower=True) ub = _expand(bounds=upper_bounds, X=X, lower=False) return Bounds(lb=lb, ub=ub, keep_feasible=True) def make_scipy_linear_constraints( shapeX: torch.Size, inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, ) -> List[ScipyConstraintDict]: r"""Generate scipy constraints from torch representation. Args: shapeX: The shape of the torch.Tensor to optimize over (i.e. `(b) x q x d`) inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`, where `indices` is a single-dimensional index tensor (long dtype) containing indices into the last dimension of `X`, `coefficients` is a single-dimensional tensor of coefficients of the same length, and rhs is a scalar. equality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) == rhs` (with `indices` and `coefficients` of the same form as in `inequality_constraints`). Returns: A list of dictionaries containing callables for constraint function values and Jacobians and a string indicating the associated constraint type ("eq", "ineq"), as expected by `scipy.minimize`. This function assumes that constraints are the same for each input batch, and broadcasts the constraints accordingly to the input batch shape. This function does support constraints across elements of a q-batch if the indices are a 2-d Tensor. Example: The following will enforce that `x[1] + 0.5 x[3] >= -0.1` for each `x` in both elements of the q-batch, and each of the 3 t-batches: >>> constraints = make_scipy_linear_constraints( >>> torch.Size([3, 2, 4]), >>> [(torch.tensor([1, 3]), torch.tensor([1.0, 0.5]), -0.1)], >>> ) The following will enforce that `x[0, 1] + 0.5 x[1, 3] >= -0.1` where x[0, :] is the first element of the q-batch and x[1, :] is the second element of the q-batch, for each of the 3 t-batches: >>> constraints = make_scipy_linear_constraints( >>> torch.size([3, 2, 4]) >>> [(torch.tensor([[0, 1], [1, 3]), torch.tensor([1.0, 0.5]), -0.1)], >>> ) """ constraints = [] if inequality_constraints is not None: for indcs, coeffs, rhs in inequality_constraints: constraints += _make_linear_constraints( indices=indcs, coefficients=coeffs, rhs=rhs, shapeX=shapeX, eq=False ) if equality_constraints is not None: for indcs, coeffs, rhs in equality_constraints: constraints += _make_linear_constraints( indices=indcs, coefficients=coeffs, rhs=rhs, shapeX=shapeX, eq=True ) return constraints def eval_lin_constraint( x: np.ndarray, flat_idxr: List[int], coeffs: np.ndarray, rhs: float ) -> np.float64: r"""Evaluate a single linear constraint. Args: x: The input array. flat_idxr: The indices in `x` to consider. coeffs: The coefficients corresponding to the indices. rhs: The right-hand-side of the constraint. Returns: The evaluted constraint: `\sum_i (coeffs[i] * x[i]) - rhs` """ return np.sum(x[flat_idxr] * coeffs, -1) - rhs def lin_constraint_jac( x: np.ndarray, flat_idxr: List[int], coeffs: np.ndarray, n: int ) -> np.ndarray: r"""Return the Jacobian associated with a linear constraint. Args: x: The input array. flat_idxr: The indices for the elements of x that appear in the constraint. coeffs: The coefficients corresponding to the indices. n: number of elements Returns: The Jacobian. """ # TODO: Use sparse representation (not sure if scipy optim supports that) jac = np.zeros(n) jac[flat_idxr] = coeffs return jac def _arrayify(X: Tensor) -> np.ndarray: r"""Convert a torch.Tensor (any dtype or device) to a numpy (double) array. Args: X: The input tensor. Returns: A numpy array of double dtype with the same shape and data as `X`. """ return X.cpu().detach().contiguous().double().clone().numpy() def _validate_linear_constraints_shape_input(shapeX: torch.Size) -> torch.Size: """ Validate `shapeX` input to `_make_linear_constraints`. Check that it has either 2 or 3 dimensions, and add a scalar batch dimension if it is only 2d. """ if len(shapeX) not in (2, 3): raise UnsupportedError( f"`shapeX` must be `(b) x q x d` (at least two-dimensional). It is " f"{shapeX}." ) if len(shapeX) == 2: shapeX = torch.Size([1, *shapeX]) return shapeX def _validate_linear_constraints_indices_input(indices: Tensor, q: int, d: int) -> None: if indices.dim() > 2: raise UnsupportedError( "Linear constraints supported only on individual candidates and " "across q-batches, not across general batch shapes." ) elif indices.dim() == 2: if indices[:, 0].max() > q - 1: raise RuntimeError(f"Index out of bounds for {q}-batch") if indices[:, 1].max() > d - 1: raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor") elif indices.dim() == 1: if indices.max() > d - 1: raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor") else: raise ValueError("`indices` must be at least one-dimensional") def _make_linear_constraints( indices: Tensor, coefficients: Tensor, rhs: float, shapeX: torch.Size, eq: bool = False, ) -> List[ScipyConstraintDict]: r"""Create linear constraints to be used by `scipy.minimize`. Encodes constraints of the form `\sum_i (coefficients[i] * X[..., indices[i]]) ? rhs` where `?` can be designated either as `>=` by setting `eq=False`, or as `=` by setting `eq=True`. If indices is one-dimensional, the constraints are broadcasted across all elements of the q-batch. If indices is two-dimensional, then constraints are applied across elements of a q-batch. In either case, constraints are created for all t-batches. Args: indices: A tensor of shape `c` or `c x 2`, where c is the number of terms in the constraint. If single-dimensional, contains the indices of the dimensions of the feature space that occur in the linear constraint. If two-dimensional, contains pairs of indices of the q-batch (0) and the feature space (1) that occur in the linear constraint. coefficients: A single-dimensional tensor of coefficients with the same number of elements as `indices`. rhs: The right hand side of the constraint. shapeX: The shape of the torch tensor to construct the constraints for (i.e. `(b) x q x d`). Must have two or three dimensions. eq: If True, return an equality constraint, o/w return an inequality constraint (indicated by "eq" / "ineq" value of the `type` key). Returns: A list of constraint dictionaries with the following keys - "type": Indicates the type of the constraint ("eq" if `eq=True`, "ineq" o/w) - "fun": A callable evaluating the constraint value on `x`, a flattened version of the input tensor `X`, returning a scalar. - "jac": A callable evaluating the constraint's Jacobian on `x`, a flattened version of the input tensor `X`, returning a numpy array. >>> shapeX = torch.Size([3, 5, 4]) >>> constraints = _make_linear_constraints( ... indices=torch.tensor([1., 2.]), ... coefficients=torch.tensor([-0.5, 1.3]), ... rhs=0.49, ... shapeX=shapeX, ... eq=True ... ) >>> len(constraints) 15 >>> constraints[0].keys() dict_keys(['type', 'fun', 'jac']) >>> x = np.arange(60).reshape(shapeX) >>> constraints[0]["fun"](x) 1.61 # 1 * -0.5 + 2 * 1.3 - 0.49 >>> constraints[0]["jac"](x) [0., -0.5, 1.3, 0., 0., ...] >>> constraints[1]["fun"](x) # 4.81 """ shapeX = _validate_linear_constraints_shape_input(shapeX) b, q, d = shapeX _validate_linear_constraints_indices_input(indices, q, d) n = shapeX.numel() constraints: List[ScipyConstraintDict] = [] coeffs = _arrayify(coefficients) ctype = "eq" if eq else "ineq" offsets = [q * d, d] if indices.dim() == 2: # indices has two dimensions (potential constraints across q-batch elements) # rule is [i, j, k] is at # i * offsets[0] + j * offsets[1] + k for i in range(b): list_ind = (idx.tolist() for idx in indices) idxr = [i * offsets[0] + idx[0] * offsets[1] + idx[1] for idx in list_ind] fun = partial( eval_lin_constraint, flat_idxr=idxr, coeffs=coeffs, rhs=float(rhs) ) jac = partial(lin_constraint_jac, flat_idxr=idxr, coeffs=coeffs, n=n) constraints.append({"type": ctype, "fun": fun, "jac": jac}) elif indices.dim() == 1: # indices is one-dim - broadcast constraints across q-batches and t-batches for i in range(b): for j in range(q): idxr = (i * offsets[0] + j * offsets[1] + indices).tolist() fun = partial( eval_lin_constraint, flat_idxr=idxr, coeffs=coeffs, rhs=float(rhs) ) jac = partial(lin_constraint_jac, flat_idxr=idxr, coeffs=coeffs, n=n) constraints.append({"type": ctype, "fun": fun, "jac": jac}) return constraints def _generate_unfixed_nonlin_constraints( constraints: Optional[List[Callable[[Tensor], Tensor]]], fixed_features: Dict[int, float], dimension: int, ) -> Optional[List[Callable[[Tensor], Tensor]]]: """Given a dictionary of fixed features, returns a list of callables for nonlinear inequality constraints expecting only a tensor with the non-fixed features as input. """ if not constraints: return constraints selector = [] idx_X, idx_f = 0, dimension - len(fixed_features) for i in range(dimension): if i in fixed_features.keys(): selector.append(idx_f) idx_f += 1 else: selector.append(idx_X) idx_X += 1 values = torch.tensor(list(fixed_features.values()), dtype=torch.double) def _wrap_nonlin_constraint( constraint: Callable[[Tensor], Tensor] ) -> Callable[[Tensor], Tensor]: def new_nonlin_constraint(X: Tensor) -> Tensor: ivalues = values.to(X).expand(*X.shape[:-1], len(fixed_features)) X_perm = torch.cat([X, ivalues], dim=-1) return constraint(X_perm[..., selector]) return new_nonlin_constraint return [ _wrap_nonlin_constraint(constraint=constraint) for constraint in constraints ] def _generate_unfixed_lin_constraints( constraints: Optional[List[Tuple[Tensor, Tensor, float]]], fixed_features: Dict[int, float], dimension: int, eq: bool, ) -> Optional[List[Tuple[Tensor, Tensor, float]]]: # If constraints is None or an empty list, then return itself if not constraints: return constraints # replace_index generates the new indices for the unfixed dimensions # after eliminating the fixed dimensions. # Example: dimension = 5, ff.keys() = [1, 3], replace_index = {0: 0, 2: 1, 4: 2} unfixed_keys = sorted(set(range(dimension)) - set(fixed_features)) unfixed_keys = torch.tensor(unfixed_keys).to(constraints[0][0]) replace_index = torch.arange(dimension - len(fixed_features)).to(constraints[0][0]) new_constraints = [] # parse constraints one-by-one for constraint_id, (indices, coefficients, rhs) in enumerate(constraints): new_rhs = rhs new_indices = [] new_coefficients = [] # the following unsqueeze is done to facilitate a simpler for-loop. indices_2dim = indices if indices.ndim == 2 else indices.unsqueeze(-1) for coefficient, index in zip(coefficients, indices_2dim): ffval_or_None = fixed_features.get(index[-1].item()) # if ffval_or_None is None, then the index is not fixed if ffval_or_None is None: new_indices.append(index) new_coefficients.append(coefficient) # otherwise, we "remove" the constraints corresponding to that index else: new_rhs = new_rhs - coefficient.item() * ffval_or_None # all indices were fixed, so the constraint is gone. if len(new_indices) == 0: if (eq and new_rhs != 0) or (not eq and new_rhs > 0): prefix = "Eq" if eq else "Ineq" raise CandidateGenerationError( f"{prefix}uality constraint {constraint_id} not met " "with fixed_features." ) else: # However, one key transformation has to be noted. # new_indices is with respect to the older (fuller) domain, and so it will # have to be converted using replace_index. new_indices = torch.stack(new_indices, dim=0) # generate new index location after the removal of fixed_features indices new_indices_dim_d = new_indices[:, -1].unsqueeze(-1) new_indices_dim_d = replace_index[ torch.nonzero(new_indices_dim_d == unfixed_keys, as_tuple=True)[1] ] new_indices[:, -1] = new_indices_dim_d # squeeze(-1) is a no-op if dim -1 is not singleton new_indices.squeeze_(-1) # convert new_coefficients to Tensor new_coefficients = torch.stack(new_coefficients) new_constraints.append((new_indices, new_coefficients, new_rhs)) return new_constraints def _make_f_and_grad_nonlinear_inequality_constraints( f_np_wrapper: Callable, nlc: Callable ) -> Tuple[Callable[[Tensor], Tensor], Callable[[Tensor], Tensor]]: """ Create callables for objective + grad for the nonlinear inequality constraints. The Scipy interface requires specifying separate callables and we use caching to avoid evaluating the same input twice. This caching onlh works if the returned functions are evaluated on the same input in immediate sequence (i.e., calling `f_obj(X_1)`, `f_grad(X_1)` will result in a single forward pass, while `f_obj(X_1)`, `f_grad(X_2)`, `f_obj(X_1)` will result in three forward passes). """ def f_obj_and_grad(x): obj, grad = f_np_wrapper(x, f=nlc) return obj, grad cache = {"X": None, "obj": None, "grad": None} def f_obj(X): X_c = cache["X"] if X_c is None or not np.array_equal(X_c, X): cache["X"] = X.copy() cache["obj"], cache["grad"] = f_obj_and_grad(X) return cache["obj"] def f_grad(X): X_c = cache["X"] if X_c is None or not np.array_equal(X_c, X): cache["X"] = X.copy() cache["obj"], cache["grad"] = f_obj_and_grad(X) return cache["grad"] return f_obj, f_grad def make_scipy_nonlinear_inequality_constraints( nonlinear_inequality_constraints: List[Callable], f_np_wrapper: Callable, x0: Tensor, ) -> List[Dict]: r"""Generate Scipy nonlinear inequality constraints from callables. Args: nonlinear_inequality_constraints: List of callables for the nonlinear inequality constraints. Each callable represents a constraint of the form >= 0 and takes a torch tensor of size (p x q x dim) and returns a torch tensor of size (p x q). f_np_wrapper: A wrapper function that given a constraint evaluates the value and gradient (using autograd) of a numpy input and returns both the objective and the gradient. x0: The starting point for SLSQP. We return this starting point in (rare) cases where SLSQP fails and thus require it to be feasible. Returns: A list of dictionaries containing callables for constraint function values and Jacobians and a string indicating the associated constraint type ("eq", "ineq"), as expected by `scipy.minimize`. """ if not isinstance(nonlinear_inequality_constraints, list): raise ValueError( "`nonlinear_inequality_constraints` must be a list of callables, " f"got {type(nonlinear_inequality_constraints)}." ) scipy_nonlinear_inequality_constraints = [] for nlc in nonlinear_inequality_constraints: if _arrayify(nlc(x0)).item() < NLC_TOL: raise ValueError( "`batch_initial_conditions` must satisfy the non-linear inequality " "constraints." ) f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints( f_np_wrapper=f_np_wrapper, nlc=nlc ) scipy_nonlinear_inequality_constraints.append( { "type": "ineq", "fun": f_obj, "jac": f_grad, } ) return scipy_nonlinear_inequality_constraints
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Utilities for fitting and manipulating models.""" from __future__ import annotations from re import Pattern from typing import ( Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple, Union, ) from warnings import warn import torch from botorch.exceptions.warnings import BotorchWarning from botorch.models.gpytorch import GPyTorchModel from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood from torch import Tensor from torch.nn import Module from torch.utils.data import DataLoader, TensorDataset class TorchAttr(NamedTuple): shape: torch.Size dtype: torch.dtype device: torch.device def _get_extra_mll_args( mll: MarginalLogLikelihood, ) -> Union[List[Tensor], List[List[Tensor]]]: r"""Obtain extra arguments for MarginalLogLikelihood objects. Get extra arguments (beyond the model output and training targets) required for the particular type of MarginalLogLikelihood for a forward pass. Args: mll: The MarginalLogLikelihood module. Returns: Extra arguments for the MarginalLogLikelihood. Returns an empty list if the mll type is unknown. """ warn("`_get_extra_mll_args` is marked for deprecation.", DeprecationWarning) if isinstance(mll, ExactMarginalLogLikelihood): return list(mll.model.train_inputs) elif isinstance(mll, SumMarginalLogLikelihood): return [list(x) for x in mll.model.train_inputs] return [] def get_data_loader( model: GPyTorchModel, batch_size: int = 1024, **kwargs: Any ) -> DataLoader: dataset = TensorDataset(*model.train_inputs, model.train_targets) return DataLoader( dataset=dataset, batch_size=min(batch_size, len(model.train_targets)), **kwargs ) def get_parameters( module: Module, requires_grad: Optional[bool] = None, name_filter: Optional[Callable[[str], bool]] = None, ) -> Dict[str, Tensor]: r"""Helper method for obtaining a module's parameters and their respective ranges. Args: module: The target module from which parameters are to be extracted. requires_grad: Optional Boolean used to filter parameters based on whether or not their require_grad attribute matches the user provided value. name_filter: Optional Boolean function used to filter parameters by name. Returns: A dictionary of parameters. """ parameters = {} for name, param in module.named_parameters(): if requires_grad is not None and param.requires_grad != requires_grad: continue if name_filter and not name_filter(name): continue parameters[name] = param return parameters def get_parameters_and_bounds( module: Module, requires_grad: Optional[bool] = None, name_filter: Optional[Callable[[str], bool]] = None, default_bounds: Tuple[float, float] = (-float("inf"), float("inf")), ) -> Tuple[Dict[str, Tensor], Dict[str, Tuple[Optional[float], Optional[float]]]]: r"""Helper method for obtaining a module's parameters and their respective ranges. Args: module: The target module from which parameters are to be extracted. name_filter: Optional Boolean function used to filter parameters by name. requires_grad: Optional Boolean used to filter parameters based on whether or not their require_grad attribute matches the user provided value. default_bounds: Default lower and upper bounds for constrained parameters with `None` typed bounds. Returns: A dictionary of parameters and a dictionary of parameter bounds. """ if hasattr(module, "named_parameters_and_constraints"): bounds = {} params = {} for name, param, constraint in module.named_parameters_and_constraints(): if (requires_grad is None or (param.requires_grad == requires_grad)) and ( name_filter is None or name_filter(name) ): params[name] = param if constraint is None: continue bounds[name] = tuple( default if bound is None else constraint.inverse_transform(bound) for (bound, default) in zip(constraint, default_bounds) ) return params, bounds params = get_parameters( module, requires_grad=requires_grad, name_filter=name_filter ) return params, {} def get_name_filter( patterns: Iterator[Union[Pattern, str]] ) -> Callable[[Union[str, Tuple[str, Any, ...]]], bool]: r"""Returns a binary function that filters strings (or iterables whose first element is a string) according to a bank of excluded patterns. Typically, used in conjunction with generators such as `module.named_parameters()`. Args: patterns: A collection of regular expressions or strings that define the set of names to be excluded. Returns: A binary function indicating whether or not an item should be filtered. """ names = set() _patterns = set() for pattern in patterns: if isinstance(pattern, str): names.add(pattern) elif isinstance(pattern, Pattern): _patterns.add(pattern) else: raise TypeError( "Expected `patterns` to contain `str` or `re.Pattern` typed elements, " f"but found {type(pattern)}." ) def name_filter(item: Union[str, Tuple[str, Any, ...]]) -> bool: name = item if isinstance(item, str) else next(iter(item)) if name in names: return False for pattern in _patterns: if pattern.search(name): return False return True return name_filter def sample_all_priors(model: GPyTorchModel, max_retries: int = 100) -> None: r"""Sample from hyperparameter priors (in-place). Args: model: A GPyTorchModel. """ for _, module, prior, closure, setting_closure in model.named_priors(): if setting_closure is None: raise RuntimeError( "Must provide inverse transform to be able to sample from prior." ) for i in range(max_retries): try: setting_closure(module, prior.sample(closure(module).shape)) break except NotImplementedError: warn( f"`rsample` not implemented for {type(prior)}. Skipping.", BotorchWarning, ) break except RuntimeError as e: if "out of bounds of its current constraints" in str(e): if i == max_retries - 1: raise RuntimeError( "Failed to sample a feasible parameter value " f"from the prior after {max_retries} attempts." ) else: raise e
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Utilities for interfacing Numpy and Torch.""" from __future__ import annotations from itertools import tee from typing import Callable, Dict, Iterator, Optional, Tuple, Union import numpy as np import torch from botorch.utils.types import NoneType from numpy import ndarray from torch import Tensor torch_to_numpy_dtype_dict = { torch.bool: bool, torch.uint8: np.uint8, torch.int8: np.int8, torch.int16: np.int16, torch.int32: np.int32, torch.int64: np.int64, torch.float16: np.float16, torch.float32: np.float32, torch.float64: np.float64, torch.complex64: np.complex64, torch.complex128: np.complex128, } def as_ndarray( values: Tensor, dtype: Optional[np.dtype] = None, inplace: bool = True ) -> ndarray: r"""Helper for going from torch.Tensor to numpy.ndarray. Args: values: Tensor to be converted to ndarray. dtype: Optional numpy.dtype for the converted tensor. inplace: Boolean indicating whether memory should be shared if possible. Returns: An ndarray with the same data as `values`. """ with torch.no_grad(): out = values.cpu() # maybe transfer to cpu # Determine whether or not to `clone` if ( # cond 1: are we not in `inplace` mode? not inplace # cond 2: did we already copy when calling `cpu` above? and out.device == values.device # cond 3: will we copy when calling `astype` below? and (dtype is None or out.dtype == torch_to_numpy_dtype_dict[dtype]) ): out = out.clone() # Convert to ndarray and maybe cast to `dtype` out = out.numpy() return out.astype(dtype, copy=False) def get_tensors_as_ndarray_1d( tensors: Union[Iterator[Tensor], Dict[str, Tensor]], out: Optional[ndarray] = None, dtype: Optional[Union[np.dtype, str]] = None, as_array: Callable[[Tensor], ndarray] = as_ndarray, ) -> ndarray: # Create a pair of iterators, one for setup and one for data transfer named_tensors_iter, named_tensors_iter2 = tee( iter(tensors.items()) if isinstance(tensors, dict) else enumerate(tensors), 2 ) # Use `named_tensors_iter` to get size of `out` and `dtype` when None try: name, tnsr = next(named_tensors_iter) except StopIteration: raise RuntimeError(f"Argument `tensors` with type {type(tensors)} is empty.") size = tnsr.numel() + sum(tnsr.numel() for _, tnsr in named_tensors_iter) dtype = torch_to_numpy_dtype_dict[tnsr.dtype] if dtype is None else dtype # Preallocate or validate `out` if out is None: # use first tensor as a reference when `dtype` is None out = np.empty([size], dtype=dtype) elif out.ndim != 1: raise ValueError(f"Expected a vector for `out`, but out.shape={out.shape}.") elif out.size != size: raise ValueError( f"Size of `parameters` ({size}) does not match size of `out` ({out.size})." ) # Use `named_tensors_iter2` to transfer data from `tensors` to `out` index = 0 for name, tnsr in named_tensors_iter2: try: size = tnsr.numel() out[index : index + size] = as_array(tnsr.view(-1)) index += size except Exception as e: raise RuntimeError( "`get_tensors_as_ndarray_1d` failed while copying values from " f"tensor {name}; rethrowing original exception." ) from e return out def set_tensors_from_ndarray_1d( tensors: Union[Iterator[Tensor], Dict[str, Tensor]], array: ndarray, as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor, ) -> None: r"""Sets the values of one more tensors based off of a vector of assignments.""" named_tensors_iter = ( iter(tensors.items()) if isinstance(tensors, dict) else enumerate(tensors) ) with torch.no_grad(): index = 0 for name, tnsr in named_tensors_iter: try: size = tnsr.numel() vals = array[index : index + size] if tnsr.ndim else array[index] tnsr.copy_(as_tensor(vals).to(tnsr).view(tnsr.shape).to(tnsr)) index += size except Exception as e: raise RuntimeError( "`set_tensors_from_ndarray_1d` failed while copying values to " f"tensor {name}; rethrowing original exception." ) from e def get_bounds_as_ndarray( parameters: Dict[str, Tensor], bounds: Dict[ str, Tuple[Union[float, Tensor, NoneType], Union[float, Tensor, NoneType]] ], ) -> Optional[np.ndarray]: r"""Helper method for converting bounds into an ndarray. Args: parameters: A dictionary of parameters. bounds: A dictionary of (optional) lower and upper bounds. Returns: An ndarray of bounds. """ inf = float("inf") full_size = sum(param.numel() for param in parameters.values()) out = np.full((full_size, 2), (-inf, inf)) index = 0 for name, param in parameters.items(): size = param.numel() if name in bounds: lower, upper = bounds[name] lower = -inf if lower is None else lower upper = inf if upper is None else upper if isinstance(lower, Tensor): lower = lower.cpu() if isinstance(upper, Tensor): upper = upper.cpu() out[index : index + size, 0] = lower out[index : index + size, 1] = upper index = index + size # If all bounds are +/- inf, return None. if np.isinf(out).all(): out = None return out
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import time from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union import numpy as np from botorch.exceptions.errors import OptimizationTimeoutError from scipy import optimize def minimize_with_timeout( fun: Callable[[np.ndarray, *Any], float], x0: np.ndarray, args: Tuple[Any, ...] = (), method: Optional[str] = None, jac: Optional[Union[str, Callable, bool]] = None, hess: Optional[Union[str, Callable, optimize.HessianUpdateStrategy]] = None, hessp: Optional[Callable] = None, bounds: Optional[Union[Sequence[Tuple[float, float]], optimize.Bounds]] = None, constraints=(), # Typing this properly is a s**t job tol: Optional[float] = None, callback: Optional[Callable] = None, options: Optional[Dict[str, Any]] = None, timeout_sec: Optional[float] = None, ) -> optimize.OptimizeResult: r"""Wrapper around scipy.optimize.minimize to support timeout. This method calls scipy.optimize.minimize with all arguments forwarded verbatim. The only difference is that if provided a `timeout_sec` argument, it will automatically stop the optimziation after the timeout is reached. Internally, this is achieved by automatically constructing a wrapper callback method that is injected to the scipy.optimize.minimize call and that keeps track of the runtime and the optimization variables at the current iteration. """ if timeout_sec: start_time = time.monotonic() callback_data = {"num_iterations": 0} # update from withing callback below def timeout_callback(xk: np.ndarray) -> bool: runtime = time.monotonic() - start_time callback_data["num_iterations"] += 1 if runtime > timeout_sec: raise OptimizationTimeoutError(current_x=xk, runtime=runtime) return False if callback is None: wrapped_callback = timeout_callback elif callable(method): raise NotImplementedError( "Custom callable not supported for `method` argument." ) elif method == "trust-constr": # special signature def wrapped_callback( xk: np.ndarray, state: optimize.OptimizeResult ) -> bool: # order here is important to make sure base callback gets executed return callback(xk, state) or timeout_callback(xk=xk) else: def wrapped_callback(xk: np.ndarray) -> None: timeout_callback(xk=xk) callback(xk) else: wrapped_callback = callback try: return optimize.minimize( fun=fun, x0=x0, args=args, method=method, jac=jac, hess=hess, hessp=hessp, bounds=bounds, constraints=constraints, tol=tol, callback=wrapped_callback, options=options, ) except OptimizationTimeoutError as e: msg = f"Optimization timed out after {e.runtime} seconds." current_fun, *_ = fun(e.current_x, *args) return optimize.OptimizeResult( fun=current_fun, x=e.current_x, nit=callback_data["num_iterations"], success=False, # same as when maxiter is reached status=1, # same as when L-BFGS-B reaches maxiter message=msg, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.optim.utils.acquisition_utils import ( columnwise_clamp, fix_features, get_X_baseline, ) from botorch.optim.utils.common import ( _filter_kwargs, _handle_numerical_errors, _warning_handler_template, ) from botorch.optim.utils.model_utils import ( _get_extra_mll_args, get_data_loader, get_name_filter, get_parameters, get_parameters_and_bounds, sample_all_priors, TorchAttr, ) from botorch.optim.utils.numpy_utils import ( as_ndarray, get_bounds_as_ndarray, get_tensors_as_ndarray_1d, set_tensors_from_ndarray_1d, ) from botorch.optim.utils.timeout import minimize_with_timeout __all__ = [ "_filter_kwargs", "_get_extra_mll_args", "_handle_numerical_errors", "_warning_handler_template", "as_ndarray", "columnwise_clamp", "fix_features", "get_name_filter", "get_bounds_as_ndarray", "get_data_loader", "get_parameters", "get_parameters_and_bounds", "get_tensors_as_ndarray_1d", "get_X_baseline", "minimize_with_timeout", "sample_all_priors", "set_tensors_from_ndarray_1d", "TorchAttr", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Utilities for maximizing acquisition functions.""" from __future__ import annotations from typing import Dict, Optional, Union from warnings import warn import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.exceptions.errors import BotorchError from botorch.exceptions.warnings import BotorchWarning from botorch.models.gpytorch import ModelListGPyTorchModel from torch import Tensor def columnwise_clamp( X: Tensor, lower: Optional[Union[float, Tensor]] = None, upper: Optional[Union[float, Tensor]] = None, raise_on_violation: bool = False, ) -> Tensor: r"""Clamp values of a Tensor in column-wise fashion (with support for t-batches). This function is useful in conjunction with optimizers from the torch.optim package, which don't natively handle constraints. If you apply this after a gradient step you can be fancy and call it "projected gradient descent". This funtion is also useful for post-processing candidates generated by the scipy optimizer that satisfy bounds only up to numerical accuracy. Args: X: The `b x n x d` input tensor. If 2-dimensional, `b` is assumed to be 1. lower: The column-wise lower bounds. If scalar, apply bound to all columns. upper: The column-wise upper bounds. If scalar, apply bound to all columns. raise_on_violation: If `True`, raise an exception when the elments in `X` are out of the specified bounds (up to numerical accuracy). This is useful for post-processing candidates generated by optimizers that satisfy imposed bounds only up to numerical accuracy. Returns: The clamped tensor. """ if lower is None and upper is None: return X if lower is not None: lower = torch.as_tensor(lower).expand_as(X).to(X) if upper is not None: upper = torch.as_tensor(upper).expand_as(X).to(X) if lower is not None and (lower > upper).any(): raise ValueError("Lower bounds cannot exceed upper bounds.") out = X.clamp(lower, upper) if raise_on_violation and not X.allclose(out): raise BotorchError("Original value(s) are out of bounds.") return out def fix_features( X: Tensor, fixed_features: Optional[Dict[int, Optional[float]]] = None ) -> Tensor: r"""Fix feature values in a Tensor. The fixed features will have zero gradient in downstream calculations. Args: X: input Tensor with shape `... x p`, where `p` is the number of features fixed_features: A dictionary with keys as column indices and values equal to what the feature should be set to in `X`. If the value is None, that column is just considered fixed. Keys should be in the range `[0, p - 1]`. Returns: The tensor X with fixed features. """ if fixed_features is None: return X columns = list(X.unbind(dim=-1)) for index, value in fixed_features.items(): if value is None: columns[index] = columns[index].detach() else: columns[index] = torch.full_like(columns[index], value) return torch.stack(columns, dim=-1) def get_X_baseline(acq_function: AcquisitionFunction) -> Optional[Tensor]: r"""Extract X_baseline from an acquisition function. This tries to find the baseline set of points. First, this checks if the acquisition function has an `X_baseline` attribute. If it does not, then this method attempts to use the model's `train_inputs` as `X_baseline`. Args: acq_function: The acquisition function. Returns An optional `n x d`-dim tensor of baseline points. This is None if no baseline points are found. """ try: X = acq_function.X_baseline # if there are no baseline points, use training points if X.shape[0] == 0: raise BotorchError except (BotorchError, AttributeError): try: # for entropy MOO methods model = acq_function.mo_model except AttributeError: try: # some acquisition functions do not have a model attribute # e.g. FixedFeatureAcquisitionFunction model = acq_function.model except AttributeError: warn("Failed to extract X_baseline.", BotorchWarning) return try: # Make sure we get the original train inputs. m = model.models[0] if isinstance(model, ModelListGPyTorchModel) else model if m._has_transformed_inputs: X = m._original_train_inputs else: X = m.train_inputs[0] except (BotorchError, AttributeError): warn("Failed to extract X_baseline.", BotorchWarning) return # just use one batch while X.ndim > 2: X = X[0] return X
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""General-purpose optimization utilities.""" from __future__ import annotations from inspect import signature from logging import debug as logging_debug from typing import Any, Callable, Optional, Tuple from warnings import warn, warn_explicit, WarningMessage import numpy as np from linear_operator.utils.errors import NanError, NotPSDError def _filter_kwargs(function: Callable, **kwargs: Any) -> Any: r"""Filter out kwargs that are not applicable for a given function. Return a copy of given kwargs dict with only the required kwargs.""" allowed_params = signature(function).parameters removed = {k for k in kwargs.keys() if k not in allowed_params} if len(removed) > 0: fn_descriptor = ( f" for function {function.__name__}" if hasattr(function, "__name__") else "" ) warn( f"Keyword arguments {list(removed)} will be ignored because they are" f" not allowed parameters{fn_descriptor}. Allowed " f"parameters are {list(allowed_params.keys())}." ) return {k: v for k, v in kwargs.items() if k not in removed} def _handle_numerical_errors( error: RuntimeError, x: np.ndarray, dtype: Optional[np.dtype] = None ) -> Tuple[np.ndarray, np.ndarray]: if isinstance(error, NotPSDError): raise error error_message = error.args[0] if len(error.args) > 0 else "" if ( isinstance(error, NanError) or "singular" in error_message # old pytorch message or "input is not positive-definite" in error_message # since pytorch #63864 ): _dtype = x.dtype if dtype is None else dtype return np.full((), "nan", dtype=_dtype), np.full_like(x, "nan", dtype=_dtype) raise error # pragma: nocover def _warning_handler_template( w: WarningMessage, debug: Optional[Callable[[WarningMessage], bool]] = None, rethrow: Optional[Callable[[WarningMessage], bool]] = None, ) -> bool: r"""Helper for making basic warning handlers. Typically used with functools.partial. Args: w: The WarningMessage to be resolved and filtered out or returned unresolved. debug: Optional callable used to specify that a warning should be resolved as a logging statement at the DEBUG level. rethrow: Optional callable used to specify that a warning should be resolved by rethrowing the warning. Returns: Boolean indicating whether or not the warning message was resolved. """ if debug and debug(w): logging_debug(str(w.message)) return True if rethrow and rethrow(w): warn_explicit(str(w.message), w.category, w.filename, w.lineno) return True return False
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.optim.closures.core import ( ForwardBackwardClosure, NdarrayOptimizationClosure, ) from botorch.optim.closures.model_closures import ( get_loss_closure, get_loss_closure_with_grads, ) __all__ = [ "ForwardBackwardClosure", "get_loss_closure", "get_loss_closure_with_grads", "NdarrayOptimizationClosure", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Core methods for building closures in torch and interfacing with numpy.""" from __future__ import annotations from functools import partial from typing import Any, Callable, Dict, Optional, Sequence, Tuple import torch from botorch.optim.utils import ( _handle_numerical_errors, get_tensors_as_ndarray_1d, set_tensors_from_ndarray_1d, ) from botorch.optim.utils.numpy_utils import as_ndarray from botorch.utils.context_managers import zero_grad_ctx from numpy import float64 as np_float64, full as np_full, ndarray, zeros as np_zeros from torch import Tensor class ForwardBackwardClosure: r"""Wrapper for fused forward and backward closures.""" def __init__( self, forward: Callable[[], Tensor], parameters: Dict[str, Tensor], backward: Callable[[Tensor], None] = Tensor.backward, reducer: Optional[Callable[[Tensor], Tensor]] = torch.sum, callback: Optional[Callable[[Tensor, Sequence[Optional[Tensor]]], None]] = None, context_manager: Callable = None, # pyre-ignore [9] ) -> None: r"""Initializes a ForwardBackwardClosure instance. Args: closure: Callable that returns a tensor. parameters: A dictionary of tensors whose `grad` fields are to be returned. backward: Callable that takes the (reduced) output of `forward` and sets the `grad` attributes of tensors in `parameters`. reducer: Optional callable used to reduce the output of the forward pass. callback: Optional callable that takes the reduced output of `forward` and the gradients of `parameters` as positional arguments. context_manager: A ContextManager used to wrap each forward-backward call. When passed as `None`, `context_manager` defaults to a `zero_grad_ctx` that zeroes the gradients of `parameters` upon entry. """ if context_manager is None: context_manager = partial(zero_grad_ctx, parameters) self.forward = forward self.backward = backward self.parameters = parameters self.reducer = reducer self.callback = callback self.context_manager = context_manager def __call__(self, **kwargs: Any) -> Tuple[Tensor, Tuple[Optional[Tensor], ...]]: with self.context_manager(): values = self.forward(**kwargs) value = values if self.reducer is None else self.reducer(values) self.backward(value) grads = tuple(param.grad for param in self.parameters.values()) if self.callback: self.callback(value, grads) return value, grads class NdarrayOptimizationClosure: r"""Adds stateful behavior and a numpy.ndarray-typed API to a closure with an expected return type Tuple[Tensor, Union[Tensor, Sequence[Optional[Tensor]]]].""" def __init__( self, closure: Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]], parameters: Dict[str, Tensor], as_array: Callable[[Tensor], ndarray] = None, # pyre-ignore [9] as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor, get_state: Callable[[], ndarray] = None, # pyre-ignore [9] set_state: Callable[[ndarray], None] = None, # pyre-ignore [9] fill_value: float = 0.0, persistent: bool = True, ) -> None: r"""Initializes a NdarrayOptimizationClosure instance. Args: closure: A ForwardBackwardClosure instance. parameters: A dictionary of tensors representing the closure's state. Expected to correspond with the first `len(parameters)` optional gradient tensors returned by `closure`. as_array: Callable used to convert tensors to ndarrays. as_tensor: Callable used to convert ndarrays to tensors. get_state: Callable that returns the closure's state as an ndarray. When passed as `None`, defaults to calling `get_tensors_as_ndarray_1d` on `closure.parameters` while passing `as_array` (if given by the user). set_state: Callable that takes a 1-dimensional ndarray and sets the closure's state. When passed as `None`, `set_state` defaults to calling `set_tensors_from_ndarray_1d` with `closure.parameters` and a given ndarray while passing `as_tensor`. fill_value: Fill value for parameters whose gradients are None. In most cases, `fill_value` should either be zero or NaN. persistent: Boolean specifying whether an ndarray should be retained as a persistent buffer for gradients. """ if get_state is None: # Note: Numpy supports copying data between ndarrays with different dtypes. # Hence, our default behavior need not coerce the ndarray representations # of tensors in `parameters` to float64 when copying over data. _as_array = as_ndarray if as_array is None else as_array get_state = partial( get_tensors_as_ndarray_1d, tensors=parameters, dtype=np_float64, as_array=_as_array, ) if as_array is None: # per the note, do this after resolving `get_state` as_array = partial(as_ndarray, dtype=np_float64) if set_state is None: set_state = partial( set_tensors_from_ndarray_1d, parameters, as_tensor=as_tensor ) self.closure = closure self.parameters = parameters self.as_array = as_ndarray self.as_tensor = as_tensor self._get_state = get_state self._set_state = set_state self.fill_value = fill_value self.persistent = persistent self._gradient_ndarray: Optional[ndarray] = None def __call__( self, state: Optional[ndarray] = None, **kwargs: Any ) -> Tuple[ndarray, ndarray]: if state is not None: self.state = state try: value_tensor, grad_tensors = self.closure(**kwargs) value = self.as_array(value_tensor) grads = self._get_gradient_ndarray(fill_value=self.fill_value) index = 0 for param, grad in zip(self.parameters.values(), grad_tensors): size = param.numel() if grad is not None: grads[index : index + size] = self.as_array(grad.view(-1)) index += size except RuntimeError as e: value, grads = _handle_numerical_errors(e, x=self.state, dtype=np_float64) return value, grads @property def state(self) -> ndarray: return self._get_state() @state.setter def state(self, state: ndarray) -> None: self._set_state(state) def _get_gradient_ndarray(self, fill_value: Optional[float] = None) -> ndarray: if self.persistent and self._gradient_ndarray is not None: if fill_value is not None: self._gradient_ndarray.fill(fill_value) return self._gradient_ndarray size = sum(param.numel() for param in self.parameters.values()) array = ( np_zeros(size, dtype=np_float64) if fill_value is None or fill_value == 0.0 else np_full(size, fill_value, dtype=np_float64) ) if self.persistent: self._gradient_ndarray = array return array
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Utilities for building model-based closures.""" from __future__ import annotations from itertools import chain, repeat from typing import Any, Callable, Dict, Optional, Sequence, Tuple from botorch.optim.closures.core import ForwardBackwardClosure from botorch.utils.dispatcher import Dispatcher, type_bypassing_encoder from botorch.utils.types import NoneType from gpytorch.mlls import ( ExactMarginalLogLikelihood, MarginalLogLikelihood, SumMarginalLogLikelihood, ) from torch import Tensor from torch.utils.data import DataLoader GetLossClosure = Dispatcher("get_loss_closure", encoder=type_bypassing_encoder) GetLossClosureWithGrads = Dispatcher( "get_loss_closure_with_grads", encoder=type_bypassing_encoder ) def get_loss_closure( mll: MarginalLogLikelihood, data_loader: Optional[DataLoader] = None, **kwargs: Any, ) -> Callable[[], Tensor]: r"""Public API for GetLossClosure dispatcher. This method, and the dispatcher that powers it, acts as a clearing house for factory functions that define how `mll` is evaluated. Users may specify custom evaluation routines by registering a factory function with GetLossClosure. These factories should be registered using the type signature `Type[MarginalLogLikeLihood], Type[Likelihood], Type[Model], Type[DataLoader]`. The final argument, Type[DataLoader], is optional. Evaluation routines that obtain training data from, e.g., `mll.model` should register this argument as `type(None)`. Args: mll: A MarginalLogLikelihood instance whose negative defines the loss. data_loader: An optional DataLoader instance for cases where training data is passed in rather than obtained from `mll.model`. Returns: A closure that takes zero positional arguments and returns the negated value of `mll`. """ return GetLossClosure( mll, type(mll.likelihood), type(mll.model), data_loader, **kwargs ) def get_loss_closure_with_grads( mll: MarginalLogLikelihood, parameters: Dict[str, Tensor], data_loader: Optional[DataLoader] = None, backward: Callable[[Tensor], None] = Tensor.backward, reducer: Optional[Callable[[Tensor], Tensor]] = Tensor.sum, context_manager: Optional[Callable] = None, **kwargs: Any, ) -> Callable[[], Tuple[Tensor, Tuple[Tensor, ...]]]: r"""Public API for GetLossClosureWithGrads dispatcher. In most cases, this method simply adds a backward pass to a loss closure obtained by calling `get_loss_closure`. For further details, see `get_loss_closure`. Args: mll: A MarginalLogLikelihood instance whose negative defines the loss. parameters: A dictionary of tensors whose `grad` fields are to be returned. reducer: Optional callable used to reduce the output of the forward pass. data_loader: An optional DataLoader instance for cases where training data is passed in rather than obtained from `mll.model`. context_manager: An optional ContextManager used to wrap each forward-backward pass. Defaults to a `zero_grad_ctx` that zeroes the gradients of `parameters` upon entry. None may be passed as an alias for `nullcontext`. Returns: A closure that takes zero positional arguments and returns the reduced and negated value of `mll` along with the gradients of `parameters`. """ return GetLossClosureWithGrads( mll, type(mll.likelihood), type(mll.model), data_loader, parameters=parameters, reducer=reducer, backward=backward, context_manager=context_manager, **kwargs, ) @GetLossClosureWithGrads.register(object, object, object, object) def _get_loss_closure_with_grads_fallback( mll: MarginalLogLikelihood, _: object, __: object, data_loader: Optional[DataLoader], parameters: Dict[str, Tensor], reducer: Callable[[Tensor], Tensor] = Tensor.sum, backward: Callable[[Tensor], None] = Tensor.backward, context_manager: Callable = None, # pyre-ignore [9] **kwargs: Any, ) -> ForwardBackwardClosure: r"""Wraps a `loss_closure` with a ForwardBackwardClosure.""" loss_closure = get_loss_closure(mll, data_loader=data_loader, **kwargs) return ForwardBackwardClosure( forward=loss_closure, backward=backward, parameters=parameters, reducer=reducer, context_manager=context_manager, ) @GetLossClosure.register(MarginalLogLikelihood, object, object, DataLoader) def _get_loss_closure_fallback_external( mll: MarginalLogLikelihood, _: object, __: object, data_loader: DataLoader, **ignore: Any, ) -> Callable[[], Tensor]: r"""Fallback loss closure with externally provided data.""" batch_generator = chain.from_iterable(iter(data_loader) for _ in repeat(None)) def closure(**kwargs: Any) -> Tensor: batch = next(batch_generator) if not isinstance(batch, Sequence): raise TypeError( "Expected `data_loader` to generate a batch of tensors, " f"but found {type(batch)}." ) num_inputs = len(mll.model.train_inputs) model_output = mll.model(*batch[:num_inputs]) log_likelihood = mll(model_output, *batch[num_inputs:], **kwargs) return -log_likelihood return closure @GetLossClosure.register(MarginalLogLikelihood, object, object, NoneType) def _get_loss_closure_fallback_internal( mll: MarginalLogLikelihood, _: object, __: object, ___: NoneType, **ignore: Any ) -> Callable[[], Tensor]: r"""Fallback loss closure with internally managed data.""" def closure(**kwargs: Any) -> Tensor: model_output = mll.model(*mll.model.train_inputs) log_likelihood = mll(model_output, mll.model.train_targets, **kwargs) return -log_likelihood return closure @GetLossClosure.register(ExactMarginalLogLikelihood, object, object, NoneType) def _get_loss_closure_exact_internal( mll: ExactMarginalLogLikelihood, _: object, __: object, ___: NoneType, **ignore: Any ) -> Callable[[], Tensor]: r"""ExactMarginalLogLikelihood loss closure with internally managed data.""" def closure(**kwargs: Any) -> Tensor: model_output = mll.model(*mll.model.train_inputs) log_likelihood = mll( model_output, mll.model.train_targets, *mll.model.train_inputs, **kwargs ) return -log_likelihood return closure @GetLossClosure.register(SumMarginalLogLikelihood, object, object, NoneType) def _get_loss_closure_sum_internal( mll: SumMarginalLogLikelihood, _: object, __: object, ___: NoneType, **ignore: Any ) -> Callable[[], Tensor]: r"""SumMarginalLogLikelihood loss closure with internally managed data.""" def closure(**kwargs: Any) -> Tensor: model_output = mll.model(*mll.model.train_inputs) log_likelihood = mll( model_output, mll.model.train_targets, *map(list, mll.model.train_inputs), **kwargs, ) return -log_likelihood return closure
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from inspect import getsource, getsourcefile from typing import Any, Callable, Optional, Tuple, Type from multipledispatch.dispatcher import ( Dispatcher as MDDispatcher, MDNotImplementedError, # trivial subclass of NotImplementedError str_signature, ) def type_bypassing_encoder(arg: Any) -> Type: # Allow type variables to be passed as pre-encoded arguments return arg if isinstance(arg, type) else type(arg) class Dispatcher(MDDispatcher): r"""Clearing house for multiple dispatch functionality. This class extends `<multipledispatch.Dispatcher>` by: (i) generalizing the argument encoding convention during method lookup, (ii) implementing `__getitem__` as a dedicated method lookup function. """ def __init__( self, name: str, doc: Optional[str] = None, encoder: Callable[Any, Type] = type, ) -> None: """ Args: name: A string identifier for the `Dispatcher` instance. doc: A docstring for the multiply dispatched method(s). encoder: A callable that individually transforms the arguments passed at runtime in order to construct the key used for method lookup as `tuple(map(encoder, args))`. Defaults to `type`. """ super().__init__(name=name, doc=doc) self._encoder = encoder def __getitem__( self, args: Optional[Any] = None, types: Optional[Tuple[Type]] = None, ) -> Callable: r"""Method lookup. Args: args: A set of arguments that act as identifiers for a stored method. types: A tuple of types that encodes `args`. Returns: A callable corresponding to the given `args` or `types`. """ if types is None: if args is None: raise RuntimeError("One of `args` or `types` must be provided.") types = self.encode_args(args) elif args is not None: raise RuntimeError("Only one of `args` or `types` may be provided.") try: func = self._cache[types] except KeyError: func = self.dispatch(*types) if not func: msg = f"{self.name}: <{', '.join(cls.__name__ for cls in types)}" raise NotImplementedError(f"Could not find signature for {msg}") self._cache[types] = func return func def __call__(self, *args: Any, **kwargs: Any) -> Any: r"""Multiply dispatches a call to a collection of methods. Args: args: A set of arguments that act as identifiers for a stored method. kwargs: Optional keyword arguments passed to the retrieved method. Returns: The result of evaluating `func(*args, **kwargs)`, where `func` is the function obtained via method lookup. """ types = self.encode_args(args) func = self.__getitem__(types=types) try: return func(*args, **kwargs) except MDNotImplementedError: # Traverses registered methods in order, yields whenever a match is found funcs = self.dispatch_iter(*types) next(funcs) # burn first, same as self.__getitem__(types=types) for func in funcs: try: return func(*args, **kwargs) except MDNotImplementedError: pass raise NotImplementedError( f"Matching functions for {self.name:s}: {str_signature(types):s} " "found, but none completed successfully" ) def dispatch(self, *types: Type) -> Callable: r"""Method lookup strategy. Checks for an exact match before traversing the set of registered methods according to the current ordering. Args: types: A tuple of types that gets compared with the signatures of registered methods to determine compatibility. Returns: The first method encountered with a matching signature. """ if types in self.funcs: return self.funcs[types] try: return next(self.dispatch_iter(*types)) except StopIteration: return None def encode_args(self, args: Any) -> Tuple[Type]: r"""Converts arguments into a tuple of types used during method lookup.""" return tuple(map(self.encoder, args if isinstance(args, tuple) else (args,))) def _help(self, *args: Any) -> str: r"""Returns the retrieved method's docstring.""" return self.dispatch(*self.encode_args(args)).__doc__ def help(self, *args: Any, **kwargs: Any) -> None: r"""Prints the retrieved method's docstring.""" print(self._help(*args)) def _source(self, *args: Any) -> str: r"""Returns the retrieved method's source types as a string.""" func = self.dispatch(*self.encode_args(args)) if not func: raise TypeError("No function found") return f"File: {getsourcefile(func)}\n\n{getsource(func)}" def source(self, *args, **kwargs) -> None: r"""Prints the retrieved method's source types.""" print(self._source(*args)) @property def encoder(self) -> Callable[Any, Type]: return self._encoder
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Some basic data transformation helpers. """ from __future__ import annotations import warnings from functools import wraps from typing import Any, Callable, List, Optional, TYPE_CHECKING import torch from torch import Tensor if TYPE_CHECKING: from botorch.acquisition import AcquisitionFunction # pragma: no cover from botorch.model import Model # pragma: no cover def standardize(Y: Tensor) -> Tensor: r"""Standardizes (zero mean, unit variance) a tensor by dim=-2. If the tensor is single-dimensional, simply standardizes the tensor. If for some batch index all elements are equal (or if there is only a single data point), this function will return 0 for that batch index. Args: Y: A `batch_shape x n x m`-dim tensor. Returns: The standardized `Y`. Example: >>> Y = torch.rand(4, 3) >>> Y_standardized = standardize(Y) """ stddim = -1 if Y.dim() < 2 else -2 Y_std = Y.std(dim=stddim, keepdim=True) Y_std = Y_std.where(Y_std >= 1e-9, torch.full_like(Y_std, 1.0)) return (Y - Y.mean(dim=stddim, keepdim=True)) / Y_std def normalize(X: Tensor, bounds: Tensor) -> Tensor: r"""Min-max normalize X w.r.t. the provided bounds. Args: X: `... x d` tensor of data bounds: `2 x d` tensor of lower and upper bounds for each of the X's d columns. Returns: A `... x d`-dim tensor of normalized data, given by `(X - bounds[0]) / (bounds[1] - bounds[0])`. If all elements of `X` are contained within `bounds`, the normalized values will be contained within `[0, 1]^d`. Example: >>> X = torch.rand(4, 3) >>> bounds = torch.stack([torch.zeros(3), 0.5 * torch.ones(3)]) >>> X_normalized = normalize(X, bounds) """ return (X - bounds[0]) / (bounds[1] - bounds[0]) def unnormalize(X: Tensor, bounds: Tensor) -> Tensor: r"""Un-normalizes X w.r.t. the provided bounds. Args: X: `... x d` tensor of data bounds: `2 x d` tensor of lower and upper bounds for each of the X's d columns. Returns: A `... x d`-dim tensor of unnormalized data, given by `X * (bounds[1] - bounds[0]) + bounds[0]`. If all elements of `X` are contained in `[0, 1]^d`, the un-normalized values will be contained within `bounds`. Example: >>> X_normalized = torch.rand(4, 3) >>> bounds = torch.stack([torch.zeros(3), 0.5 * torch.ones(3)]) >>> X = unnormalize(X_normalized, bounds) """ return X * (bounds[1] - bounds[0]) + bounds[0] def normalize_indices(indices: Optional[List[int]], d: int) -> Optional[List[int]]: r"""Normalize a list of indices to ensure that they are positive. Args: indices: A list of indices (may contain negative indices for indexing "from the back"). d: The dimension of the tensor to index. Returns: A normalized list of indices such that each index is between `0` and `d-1`, or None if indices is None. """ if indices is None: return indices normalized_indices = [] for i in indices: if i < 0: i = i + d if i < 0 or i > d - 1: raise ValueError(f"Index {i} out of bounds for tensor or length {d}.") normalized_indices.append(i) return normalized_indices def _verify_output_shape(acqf: Any, X: Tensor, output: Tensor) -> bool: r""" Performs the output shape checks for `t_batch_mode_transform`. Output shape checks help in catching the errors due to AcquisitionFunction arguments with erroneous return shapes before these errors propagate further down the line. This method checks that the `output` shape matches either the t-batch shape of X or the `batch_shape` of `acqf.model`. Args: acqf: The AcquisitionFunction object being evaluated. X: The `... x q x d`-dim input tensor with an explicit t-batch. output: The return value of `acqf.method(X, ...)`. Returns: True if `output` has the correct shape, False otherwise. """ try: X_batch_shape = X.shape[:-2] if output.shape == X_batch_shape: return True if output.shape == torch.Size() and X_batch_shape == torch.Size([1]): # X has a batch shape of [1] which gets squeezed. return True # Cases with model batch shape involved. model_b_shape = acqf.model.batch_shape if output.shape == model_b_shape: # Simple inputs with batched model. return True model_b_dim = len(model_b_shape) if output.shape == X_batch_shape[:-model_b_dim] + model_b_shape and all( xs in [1, ms] for xs, ms in zip(X_batch_shape[-model_b_dim:], model_b_shape) ): # X has additional batch dimensions beyond the model batch shape. # For a batched model, some of the input dimensions might get broadcasted # to the model batch shape. In that case the acquisition function output # should replace the right-most batch dim of X with the model's batch shape. return True return False except (AttributeError, NotImplementedError): # acqf does not have model or acqf.model does not define `batch_shape` warnings.warn( "Output shape checks failed! Expected output shape to match t-batch shape" f"of X, but got output with shape {output.shape} for X with shape" f"{X.shape}. Make sure that this is the intended behavior!", RuntimeWarning, ) return True def is_fully_bayesian(model: Model) -> bool: r"""Check if at least one model is a SaasFullyBayesianSingleTaskGP Args: model: A BoTorch model (may be a `ModelList` or `ModelListGP`) d: The dimension of the tensor to index. Returns: True if at least one model is a `SaasFullyBayesianSingleTaskGP` """ from botorch.models import ModelList from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP full_bayesian_model_cls = ( SaasFullyBayesianSingleTaskGP, SaasFullyBayesianMultiTaskGP, ) if isinstance(model, full_bayesian_model_cls) or getattr( model, "is_fully_bayesian", False ): return True elif isinstance(model, ModelList): for m in model.models: if is_fully_bayesian(m): return True return False def t_batch_mode_transform( expected_q: Optional[int] = None, assert_output_shape: bool = True, ) -> Callable[ [Callable[[AcquisitionFunction, Any], Any]], Callable[[AcquisitionFunction, Any], Any], ]: r"""Factory for decorators enabling consistent t-batch behavior. This method creates decorators for instance methods to transform an input tensor `X` to t-batch mode (i.e. with at least 3 dimensions). This assumes the tensor has a q-batch dimension. The decorator also checks the q-batch size if `expected_q` is provided, and the output shape if `assert_output_shape` is `True`. Args: expected_q: The expected q-batch size of `X`. If specified, this will raise an AssertionError if `X`'s q-batch size does not equal expected_q. assert_output_shape: If `True`, this will raise an AssertionError if the output shape does not match either the t-batch shape of `X`, or the `acqf.model.batch_shape` for acquisition functions using batched models. Returns: The decorated instance method. Example: >>> class ExampleClass: >>> @t_batch_mode_transform(expected_q=1) >>> def single_q_method(self, X): >>> ... >>> >>> @t_batch_mode_transform() >>> def arbitrary_q_method(self, X): >>> ... """ def decorator( method: Callable[[AcquisitionFunction, Any], Any], ) -> Callable[[AcquisitionFunction, Any], Any]: @wraps(method) def decorated( acqf: AcquisitionFunction, X: Any, *args: Any, **kwargs: Any ) -> Any: # Allow using acquisition functions for other inputs (e.g. lists of strings) if not isinstance(X, Tensor): return method(acqf, X, *args, **kwargs) if X.dim() < 2: raise ValueError( f"{type(acqf).__name__} requires X to have at least 2 dimensions," f" but received X with only {X.dim()} dimensions." ) elif expected_q is not None and X.shape[-2] != expected_q: raise AssertionError( f"Expected X to be `batch_shape x q={expected_q} x d`, but" f" got X with shape {X.shape}." ) # add t-batch dim X = X if X.dim() > 2 else X.unsqueeze(0) output = method(acqf, X, *args, **kwargs) if hasattr(acqf, "model") and is_fully_bayesian(acqf.model): output = output.mean(dim=-1) if assert_output_shape and not _verify_output_shape( acqf=acqf, X=X, output=output, ): raise AssertionError( "Expected the output shape to match either the t-batch shape of " "X, or the `model.batch_shape` in the case of acquisition " "functions using batch models; but got output with shape " f"{output.shape} for X with shape {X.shape}." ) return output return decorated return decorator def concatenate_pending_points( method: Callable[[Any, Tensor], Any] ) -> Callable[[Any, Tensor], Any]: r"""Decorator concatenating X_pending into an acquisition function's argument. This decorator works on the `forward` method of acquisition functions taking a tensor `X` as the argument. If the acquisition function has an `X_pending` attribute (that is not `None`), this is concatenated into the input `X`, appropriately expanding the pending points to match the batch shape of `X`. Example: >>> class ExampleAcquisitionFunction: >>> @concatenate_pending_points >>> @t_batch_mode_transform() >>> def forward(self, X): >>> ... """ @wraps(method) def decorated(cls: Any, X: Tensor, **kwargs: Any) -> Any: if cls.X_pending is not None: X = torch.cat([X, match_batch_shape(cls.X_pending, X)], dim=-2) return method(cls, X, **kwargs) return decorated def match_batch_shape(X: Tensor, Y: Tensor) -> Tensor: r"""Matches the batch dimension of a tensor to that of another tensor. Args: X: A `batch_shape_X x q x d` tensor, whose batch dimensions that correspond to batch dimensions of `Y` are to be matched to those (if compatible). Y: A `batch_shape_Y x q' x d` tensor. Returns: A `batch_shape_Y x q x d` tensor containing the data of `X` expanded to the batch dimensions of `Y` (if compatible). For instance, if `X` is `b'' x b' x q x d` and `Y` is `b x q x d`, then the returned tensor is `b'' x b x q x d`. Example: >>> X = torch.rand(2, 1, 5, 3) >>> Y = torch.rand(2, 6, 4, 3) >>> X_matched = match_batch_shape(X, Y) >>> X_matched.shape torch.Size([2, 6, 5, 3]) """ return X.expand(X.shape[: -(Y.dim())] + Y.shape[:-2] + X.shape[-2:]) def convert_to_target_pre_hook(module, *args): r"""Pre-hook for automatically calling `.to(X)` on module prior to `forward`""" module.to(args[0][0])
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import torch from botorch.exceptions.errors import BotorchError from botorch.posteriors.base_samples import _reshape_base_samples_non_interleaved from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.distributions.multitask_multivariate_normal import ( MultitaskMultivariateNormal, ) from linear_operator.operators import BlockDiagLinearOperator, LinearOperator from linear_operator.utils.cholesky import psd_safe_cholesky from linear_operator.utils.errors import NanError from torch import Tensor def extract_batch_covar(mt_mvn: MultitaskMultivariateNormal) -> LinearOperator: r"""Extract a batched independent covariance matrix from an MTMVN. Args: mt_mvn: A multi-task multivariate normal with a block diagonal covariance matrix. Returns: A lazy covariance matrix consisting of a batch of the blocks of the diagonal of the MultitaskMultivariateNormal. """ lazy_covar = mt_mvn.lazy_covariance_matrix if not isinstance(lazy_covar, BlockDiagLinearOperator): raise BotorchError( f"Expected BlockDiagLinearOperator, but got {type(lazy_covar)}." ) return lazy_covar.base_linear_op def _reshape_base_samples( base_samples: Tensor, sample_shape: torch.Size, posterior: GPyTorchPosterior ) -> Tensor: r"""Manipulate shape of base_samples to match `MultivariateNormal.rsample`. This ensure that base_samples are used in the same way as in gpytorch.distributions.MultivariateNormal. For CBD, it is important to ensure that the same base samples are used for the in-sample points here and in the cached box decompositions. Args: base_samples: The base samples. sample_shape: The sample shape. posterior: The joint posterior is over (X_baseline, X). Returns: Reshaped and expanded base samples. """ mvn = posterior.distribution loc = mvn.loc peshape = posterior._extended_shape() base_samples = base_samples.view( sample_shape + torch.Size([1] * (loc.ndim - 1)) + peshape[-2:] ).expand(sample_shape + loc.shape[:-1] + peshape[-2:]) if posterior._is_mt: base_samples = _reshape_base_samples_non_interleaved( mvn=posterior.distribution, base_samples=base_samples, sample_shape=sample_shape, ) base_samples = base_samples.reshape( -1, *loc.shape[:-1], mvn.lazy_covariance_matrix.shape[-1] ) base_samples = base_samples.permute(*range(1, loc.dim() + 1), 0) return base_samples.reshape( *peshape[:-2], peshape[-1], peshape[-2], *sample_shape, ) def sample_cached_cholesky( posterior: GPyTorchPosterior, baseline_L: Tensor, q: int, base_samples: Tensor, sample_shape: torch.Size, max_tries: int = 6, ) -> Tensor: r"""Get posterior samples at the `q` new points from the joint multi-output posterior. Args: posterior: The joint posterior is over (X_baseline, X). baseline_L: The baseline lower triangular cholesky factor. q: The number of new points in X. base_samples: The base samples. sample_shape: The sample shape. max_tries: The number of tries for computing the Cholesky decomposition with increasing jitter. Returns: A `sample_shape x batch_shape x q x m`-dim tensor of posterior samples at the new points. """ # compute bottom left covariance block mvn = posterior.distribution lazy_covar = ( extract_batch_covar(mt_mvn=mvn) if isinstance(mvn, MultitaskMultivariateNormal) else mvn.lazy_covariance_matrix ) # Get the `q` new rows of the batched covariance matrix bottom_rows = lazy_covar[..., -q:, :].to_dense() # The covariance in block form is: # [K(X_baseline, X_baseline), K(X_baseline, X)] # [K(X, X_baseline), K(X, X)] # bl := K(X, X_baseline) # br := K(X, X) # Get bottom right block of new covariance bl, br = bottom_rows.split([bottom_rows.shape[-1] - q, q], dim=-1) # Solve Ax = b # where A = K(X_baseline, X_baseline) and b = K(X, X_baseline)^T # and bl_chol := x^T # bl_chol is the new `(batch_shape) x q x n`-dim bottom left block # of the cholesky decomposition bl_chol = torch.linalg.solve_triangular( baseline_L, bl.transpose(-2, -1), upper=False ).transpose(-2, -1) # Compute the new bottom right block of the Cholesky # decomposition via: # Cholesky(K(X, X) - bl_chol @ bl_chol^T) br_to_chol = br - bl_chol @ bl_chol.transpose(-2, -1) # TODO: technically we should make sure that we add a # consistent nugget to the cached covariance and the new block br_chol = psd_safe_cholesky(br_to_chol, max_tries=max_tries) # Create a `(batch_shape) x q x (n+q)`-dim tensor containing the # `q` new bottom rows of the Cholesky decomposition new_Lq = torch.cat([bl_chol, br_chol], dim=-1) mean = posterior.distribution.mean base_samples = _reshape_base_samples( base_samples=base_samples, sample_shape=sample_shape, posterior=posterior, ) if not isinstance(posterior.distribution, MultitaskMultivariateNormal): # add output dim mean = mean.unsqueeze(-1) # add batch dim corresponding to output dim new_Lq = new_Lq.unsqueeze(-3) new_mean = mean[..., -q:, :] res = ( new_Lq.matmul(base_samples) .add(new_mean.transpose(-1, -2).unsqueeze(-1)) .permute(-1, *range(posterior.distribution.loc.dim() - 1), -2, -3) .contiguous() ) contains_nans = torch.isnan(res).any() contains_infs = torch.isinf(res).any() if contains_nans or contains_infs: suffix_args = [] if contains_nans: suffix_args.append("nans") if contains_infs: suffix_args.append("infs") suffix = " and ".join(suffix_args) raise NanError(f"Samples contain {suffix}.") return res
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Helpers for handling objectives. """ from __future__ import annotations import warnings from typing import Callable, List, Optional, Union import torch from botorch.utils.safe_math import log_fatmoid, logexpit from torch import Tensor def get_objective_weights_transform( weights: Optional[Tensor], ) -> Callable[[Tensor, Optional[Tensor]], Tensor]: r"""Create a linear objective callable from a set of weights. Create a callable mapping a Tensor of size `b x q x m` and an (optional) Tensor of size `b x q x d` to a Tensor of size `b x q`, where `m` is the number of outputs of the model using scalarization via the objective weights. This callable supports broadcasting (e.g. for calling on a tensor of shape `mc_samples x b x q x m`). For `m = 1`, the objective weight is used to determine the optimization direction. Args: weights: a 1-dimensional Tensor containing a weight for each task. If not provided, the identity mapping is used. Returns: Transform function using the objective weights. Example: >>> weights = torch.tensor([0.75, 0.25]) >>> transform = get_objective_weights_transform(weights) """ # if no weights provided, just extract the single output if weights is None: return lambda Y: Y.squeeze(-1) def _objective(Y: Tensor, X: Optional[Tensor] = None): r"""Evaluate objective. Note: einsum multiples Y by weights and sums over the `m`-dimension. Einsum is ~2x faster than using `(Y * weights.view(1, 1, -1)).sum(dim-1)`. Args: Y: A `... x b x q x m` tensor of function values. Returns: A `... x b x q`-dim tensor of objective values. """ return torch.einsum("...m, m", [Y, weights]) return _objective def apply_constraints_nonnegative_soft( obj: Tensor, constraints: List[Callable[[Tensor], Tensor]], samples: Tensor, eta: Union[Tensor, float], ) -> Tensor: r"""Applies constraints to a non-negative objective. This function uses a sigmoid approximation to an indicator function for each constraint. Args: obj: A `n_samples x b x q (x m')`-dim Tensor of objective values. constraints: A list of callables, each mapping a Tensor of size `b x q x m` to a Tensor of size `b x q`, where negative values imply feasibility. This callable must support broadcasting. Only relevant for multi- output models (`m` > 1). samples: A `n_samples x b x q x m` Tensor of samples drawn from the posterior. eta: The temperature parameter for the sigmoid function. Can be either a float or a 1-dim tensor. In case of a float the same eta is used for every constraint in constraints. In case of a tensor the length of the tensor must match the number of provided constraints. The i-th constraint is then estimated with the i-th eta value. Returns: A `n_samples x b x q (x m')`-dim tensor of feasibility-weighted objectives. """ w = compute_smoothed_feasibility_indicator( constraints=constraints, samples=samples, eta=eta ) if obj.dim() == samples.dim(): w = w.unsqueeze(-1) # Need to unsqueeze to accommodate the outcome dimension. return obj.clamp_min(0).mul(w) # Enforce non-negativity of obj, apply constraints. def compute_feasibility_indicator( constraints: Optional[List[Callable[[Tensor], Tensor]]], samples: Tensor, ) -> Tensor: r"""Computes the feasibility of a list of constraints given posterior samples. Args: constraints: A list of callables, each mapping a batch_shape x q x m`-dim Tensor to a `batch_shape x q`-dim Tensor, where negative values imply feasibility. samples: A batch_shape x q x m`-dim Tensor of posterior samples. Returns: A `batch_shape x q`-dim tensor of Boolean feasibility values. """ ind = torch.ones(samples.shape[:-1], dtype=torch.bool, device=samples.device) if constraints is not None: for constraint in constraints: ind = ind.logical_and(constraint(samples) < 0) return ind def compute_smoothed_feasibility_indicator( constraints: List[Callable[[Tensor], Tensor]], samples: Tensor, eta: Union[Tensor, float], log: bool = False, fat: bool = False, ) -> Tensor: r"""Computes the smoothed feasibility indicator of a list of constraints. Given posterior samples, using a sigmoid to smoothly approximate the feasibility indicator of each individual constraint to ensure differentiability and high gradient signal. The `fat` and `log` options improve the numerical behavior of the smooth approximation. NOTE: *Negative* constraint values are associated with feasibility. Args: constraints: A list of callables, each mapping a Tensor of size `b x q x m` to a Tensor of size `b x q`, where negative values imply feasibility. This callable must support broadcasting. Only relevant for multi- output models (`m` > 1). samples: A `n_samples x b x q x m` Tensor of samples drawn from the posterior. eta: The temperature parameter for the sigmoid function. Can be either a float or a 1-dim tensor. In case of a float the same eta is used for every constraint in constraints. In case of a tensor the length of the tensor must match the number of provided constraints. The i-th constraint is then estimated with the i-th eta value. log: Toggles the computation of the log-feasibility indicator. fat: Toggles the computation of the fat-tailed feasibility indicator. Returns: A `n_samples x b x q`-dim tensor of feasibility indicator values. """ if type(eta) is not Tensor: eta = torch.full((len(constraints),), eta) if len(eta) != len(constraints): raise ValueError( "Number of provided constraints and number of provided etas do not match." ) if not (eta > 0).all(): raise ValueError("eta must be positive.") is_feasible = torch.zeros_like(samples[..., 0]) log_sigmoid = log_fatmoid if fat else logexpit for constraint, e in zip(constraints, eta): is_feasible = is_feasible + log_sigmoid(-constraint(samples) / e) return is_feasible if log else is_feasible.exp() # TODO: deprecate this function def soft_eval_constraint(lhs: Tensor, eta: float = 1e-3) -> Tensor: r"""Element-wise evaluation of a constraint in a 'soft' fashion `value(x) = 1 / (1 + exp(x / eta))` Args: lhs: The left hand side of the constraint `lhs <= 0`. eta: The temperature parameter of the softmax function. As eta decreases, this approximates the Heaviside step function. Returns: Element-wise 'soft' feasibility indicator of the same shape as `lhs`. For each element `x`, `value(x) -> 0` as `x` becomes positive, and `value(x) -> 1` as x becomes negative. """ warnings.warn( "`soft_eval_constraint` is deprecated. Please consider `torch.utils.sigmoid` " + "with its `fat` and `log` options to compute feasibility indicators.", DeprecationWarning, ) if eta <= 0: raise ValueError("eta must be positive.") return torch.sigmoid(-lhs / eta) def apply_constraints( obj: Tensor, constraints: List[Callable[[Tensor], Tensor]], samples: Tensor, infeasible_cost: float, eta: Union[Tensor, float] = 1e-3, ) -> Tensor: r"""Apply constraints using an infeasible_cost `M` for negative objectives. This allows feasibility-weighting an objective for the case where the objective can be negative by using the following strategy: (1) Add `M` to make obj non-negative; (2) Apply constraints using the sigmoid approximation; (3) Shift by `-M`. Args: obj: A `n_samples x b x q (x m')`-dim Tensor of objective values. constraints: A list of callables, each mapping a Tensor of size `b x q x m` to a Tensor of size `b x q`, where negative values imply feasibility. This callable must support broadcasting. Only relevant for multi- output models (`m` > 1). samples: A `n_samples x b x q x m` Tensor of samples drawn from the posterior. infeasible_cost: The infeasible value. eta: The temperature parameter of the sigmoid function. Can be either a float or a 1-dim tensor. In case of a float the same eta is used for every constraint in constraints. In case of a tensor the length of the tensor must match the number of provided constraints. The i-th constraint is then estimated with the i-th eta value. Returns: A `n_samples x b x q (x m')`-dim tensor of feasibility-weighted objectives. """ # obj has dimensions n_samples x b x q (x m') obj = obj.add(infeasible_cost) # now it is nonnegative obj = apply_constraints_nonnegative_soft( obj=obj, constraints=constraints, samples=samples, eta=eta, ) return obj.add(-infeasible_cost)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Representations for different kinds of data.""" from __future__ import annotations from abc import ABC, abstractmethod from dataclasses import dataclass, fields from typing import Any from torch import device as Device, dtype as Dtype, LongTensor, Size, Tensor class BotorchContainer(ABC): r"""Abstract base class for BoTorch's data containers. A BotorchContainer represents a tensor, which should be the sole object returned by its `__call__` method. Said tensor is expected to consist of one or more "events" (e.g. data points or feature vectors), whose shape is given by the required `event_shape` field. Notice: Once version 3.10 becomes standard, this class should be reworked to take advantage of dataclasses' `kw_only` flag. :meta private: """ event_shape: Size def __post_init__(self, validate_init: bool = True) -> None: if validate_init: self._validate() @abstractmethod def __call__(self) -> Tensor: raise NotImplementedError @abstractmethod def __eq__(self, other: Any) -> bool: raise NotImplementedError @property @abstractmethod def shape(self) -> Size: raise NotImplementedError @property @abstractmethod def device(self) -> Device: raise NotImplementedError @property @abstractmethod def dtype(self) -> Dtype: raise NotImplementedError def _validate(self) -> None: for field in fields(self): if field.name == "event_shape": return raise AttributeError("Missing required field `event_shape`.") @dataclass(eq=False) class DenseContainer(BotorchContainer): r"""Basic representation of data stored as a dense Tensor.""" values: Tensor event_shape: Size def __call__(self) -> Tensor: """Returns a dense tensor representation of the container's contents.""" return self.values def __eq__(self, other: Any) -> bool: return ( type(other) is type(self) and self.shape == other.shape and self.values.equal(other.values) ) @property def shape(self) -> Size: return self.values.shape @property def device(self) -> Device: return self.values.device @property def dtype(self) -> Dtype: return self.values.dtype def _validate(self) -> None: super()._validate() for a, b in zip(reversed(self.event_shape), reversed(self.values.shape)): if a != b: raise ValueError( f"Shape of `values` {self.values.shape} incompatible with " f"`event shape` {self.event_shape}." ) @dataclass(eq=False) class SliceContainer(BotorchContainer): r"""Represent data points formed by concatenating (n-1)-dimensional slices taken from the leading dimension of an n-dimensional source tensor.""" values: Tensor indices: LongTensor event_shape: Size def __call__(self) -> Tensor: flat = self.values.index_select(dim=0, index=self.indices.view(-1)) return flat.view(*self.indices.shape[:-1], -1, *self.values.shape[2:]) def __eq__(self, other: Any) -> bool: return ( type(other) is type(self) and self.values.equal(other.values) and self.indices.equal(other.indices) ) @property def shape(self) -> Size: return self.indices.shape[:-1] + self.event_shape @property def device(self) -> Device: return self.values.device @property def dtype(self) -> Dtype: return self.values.dtype def _validate(self) -> None: super()._validate() values = self.values indices = self.indices assert indices.ndim > 1 assert (-1 < indices.min()) & (indices.max() < len(values)) event_shape = self.event_shape _event_shape = (indices.shape[-1] * values.shape[1],) + values.shape[2:] if event_shape != _event_shape: raise ValueError( f"Shapes of `values` {values.shape} and `indices` " f"{indices.shape} incompatible with `event_shape` {event_shape}." )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from functools import lru_cache from numbers import Number from typing import Iterator, Optional, Tuple, Union import torch from torch import Tensor @lru_cache(maxsize=None) def get_constants( values: Union[Number, Iterator[Number]], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Union[Tensor, Tuple[Tensor, ...]]: r"""Returns scalar-valued Tensors containing each of the given constants. Used to expedite tensor operations involving scalar arithmetic. Note that the returned Tensors should not be modified in-place.""" if isinstance(values, Number): return torch.full((), values, dtype=dtype, device=device) return tuple(torch.full((), val, dtype=dtype, device=device) for val in values) def get_constants_like( values: Union[Number, Iterator[Number]], ref: Tensor, ) -> Union[Tensor, Iterator[Tensor]]: return get_constants(values, device=ref.device, dtype=ref.dtype)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Discretization (rounding) functions for acquisition optimization. References .. [Daulton2022bopr] S. Daulton, X. Wan, D. Eriksson, M. Balandat, M. A. Osborne, E. Bakshy. Bayesian Optimization over Discrete and Mixed Spaces via Probabilistic Reparameterization. Advances in Neural Information Processing Systems 35, 2022. """ from __future__ import annotations import torch from torch import Tensor from torch.autograd import Function from torch.nn.functional import one_hot def approximate_round(X: Tensor, tau: float = 1e-3) -> Tensor: r"""Diffentiable approximate rounding function. This method is a piecewise approximation of a rounding function where each piece is a hyperbolic tangent function. Args: X: The tensor to round to the nearest integer (element-wise). tau: A temperature hyperparameter. Returns: The approximately rounded input tensor. """ offset = X.floor() scaled_remainder = (X - offset - 0.5) / tau rounding_component = (torch.tanh(scaled_remainder) + 1) / 2 return offset + rounding_component class IdentitySTEFunction(Function): """Base class for functions using straight through gradient estimators. This class approximates the gradient with the identity function. """ @staticmethod def backward(ctx, grad_output: Tensor) -> Tensor: r"""Use a straight-through estimator the gradient. This uses the identity function. Args: grad_output: A tensor of gradients. Returns: The provided tensor. """ return grad_output class RoundSTE(IdentitySTEFunction): r"""Round the input tensor and use a straight-through gradient estimator. [Daulton2022bopr]_ proposes using this in acquisition optimization. """ @staticmethod def forward(ctx, X: Tensor) -> Tensor: r"""Round the input tensor element-wise. Args: X: The tensor to be rounded. Returns: A tensor where each element is rounded to the nearest integer. """ return X.round() class OneHotArgmaxSTE(IdentitySTEFunction): r"""Discretize a continuous relaxation of a one-hot encoded categorical. This returns a one-hot encoded categorical and use a straight-through gradient estimator via an identity function. [Daulton2022bopr]_ proposes using this in acquisition optimization. """ @staticmethod def forward(ctx, X: Tensor) -> Tensor: r"""Discretize the input tensor. This applies a argmax along the last dimensions of the input tensor and one-hot encodes the result. Args: X: The tensor to be rounded. Returns: A tensor where each element is rounded to the nearest integer. """ return one_hot(X.argmax(dim=-1), num_classes=X.shape[-1]).to(X)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Representations for different kinds of datasets.""" from __future__ import annotations import warnings from itertools import count, repeat from typing import Any, Dict, Hashable, Iterable, Optional, TypeVar, Union import torch from botorch.utils.containers import BotorchContainer, SliceContainer from torch import long, ones, Tensor T = TypeVar("T") MaybeIterable = Union[T, Iterable[T]] class SupervisedDataset: r"""Base class for datasets consisting of labelled pairs `(X, Y)` and an optional `Yvar` that stipulates observations variances so that `Y[i] ~ N(f(X[i]), Yvar[i])`. Example: .. code-block:: python X = torch.rand(16, 2) Y = torch.rand(16, 1) A = SupervisedDataset(X, Y) B = SupervisedDataset( DenseContainer(X, event_shape=X.shape[-1:]), DenseContainer(Y, event_shape=Y.shape[-1:]), ) assert A == B """ def __init__( self, X: Union[BotorchContainer, Tensor], Y: Union[BotorchContainer, Tensor], Yvar: Union[BotorchContainer, Tensor, None] = None, validate_init: bool = True, ) -> None: r"""Constructs a `SupervisedDataset`. Args: X: A `Tensor` or `BotorchContainer` representing the input features. Y: A `Tensor` or `BotorchContainer` representing the outcomes. Yvar: An optional `Tensor` or `BotorchContainer` representing the observation noise. validate_init: If `True`, validates the input shapes. """ self._X = X self._Y = Y self._Yvar = Yvar if validate_init: self._validate() @property def X(self) -> Tensor: if isinstance(self._X, Tensor): return self._X return self._X() @property def Y(self) -> Tensor: if isinstance(self._Y, Tensor): return self._Y return self._Y() @property def Yvar(self) -> Optional[Tensor]: if self._Yvar is None or isinstance(self._Yvar, Tensor): return self._Yvar return self._Yvar() def _validate(self) -> None: shape_X = self.X.shape if isinstance(self._X, BotorchContainer): shape_X = shape_X[: len(shape_X) - len(self._X.event_shape)] else: shape_X = shape_X[:-1] shape_Y = self.Y.shape if isinstance(self._Y, BotorchContainer): shape_Y = shape_Y[: len(shape_Y) - len(self._Y.event_shape)] else: shape_Y = shape_Y[:-1] if shape_X != shape_Y: raise ValueError("Batch dimensions of `X` and `Y` are incompatible.") if self.Yvar is not None and self.Yvar.shape != self.Y.shape: raise ValueError("Shapes of `Y` and `Yvar` are incompatible.") @classmethod def dict_from_iter( cls, X: MaybeIterable[Union[BotorchContainer, Tensor]], Y: MaybeIterable[Union[BotorchContainer, Tensor]], Yvar: Optional[MaybeIterable[Union[BotorchContainer, Tensor]]] = None, *, keys: Optional[Iterable[Hashable]] = None, ) -> Dict[Hashable, SupervisedDataset]: r"""Returns a dictionary of `SupervisedDataset` from iterables.""" single_X = isinstance(X, (Tensor, BotorchContainer)) single_Y = isinstance(Y, (Tensor, BotorchContainer)) if single_X: X = (X,) if single_Y else repeat(X) if single_Y: Y = (Y,) if single_X else repeat(Y) Yvar = repeat(Yvar) if isinstance(Yvar, (Tensor, BotorchContainer)) else Yvar # Pass in Yvar only if it is not None. iterables = (X, Y) if Yvar is None else (X, Y, Yvar) return { elements[0]: cls(*elements[1:]) for elements in zip(keys or count(), *iterables) } def __eq__(self, other: Any) -> bool: return ( type(other) is type(self) and torch.equal(self.X, other.X) and torch.equal(self.Y, other.Y) and ( other.Yvar is None if self.Yvar is None else torch.equal(self.Yvar, other.Yvar) ) ) class FixedNoiseDataset(SupervisedDataset): r"""A SupervisedDataset with an additional field `Yvar` that stipulates observations variances so that `Y[i] ~ N(f(X[i]), Yvar[i])`. NOTE: This is deprecated. Use `SupervisedDataset` instead. """ def __init__( self, X: Union[BotorchContainer, Tensor], Y: Union[BotorchContainer, Tensor], Yvar: Union[BotorchContainer, Tensor], validate_init: bool = True, ) -> None: r"""Initialize a `FixedNoiseDataset` -- deprecated!""" warnings.warn( "`FixedNoiseDataset` is deprecated. Use `SupervisedDataset` instead.", DeprecationWarning, ) super().__init__(X=X, Y=Y, Yvar=Yvar, validate_init=validate_init) class RankingDataset(SupervisedDataset): r"""A SupervisedDataset whose labelled pairs `(x, y)` consist of m-ary combinations `x ∈ Z^{m}` of elements from a ground set `Z = (z_1, ...)` and ranking vectors `y {0, ..., m - 1}^{m}` with properties: a) Ranks start at zero, i.e. min(y) = 0. b) Sorted ranks are contiguous unless one or more ties are present. c) `k` ranks are skipped after a `k`-way tie. Example: .. code-block:: python X = SliceContainer( values=torch.rand(16, 2), indices=torch.stack([torch.randperm(16)[:3] for _ in range(8)]), event_shape=torch.Size([3 * 2]), ) Y = DenseContainer( torch.stack([torch.randperm(3) for _ in range(8)]), event_shape=torch.Size([3]) ) dataset = RankingDataset(X, Y) """ def __init__( self, X: SliceContainer, Y: Union[BotorchContainer, Tensor], validate_init: bool = True, ) -> None: r"""Construct a `RankingDataset`. Args: X: A `SliceContainer` representing the input features being ranked. Y: A `Tensor` or `BotorchContainer` representing the rankings. validate_init: If `True`, validates the input shapes. """ super().__init__(X=X, Y=Y, Yvar=None, validate_init=validate_init) def _validate(self) -> None: super()._validate() Y = self.Y arity = self._X.indices.shape[-1] if Y.min() < 0 or Y.max() >= arity: raise ValueError("Invalid ranking(s): out-of-bounds ranks detected.") # Ensure that rankings are well-defined Y_sort = Y.sort(descending=False, dim=-1).values y_incr = ones([], dtype=long) y_prev = None for i, y in enumerate(Y_sort.unbind(dim=-1)): if i == 0: if (y != 0).any(): raise ValueError("Invalid ranking(s): missing zero-th rank.") y_prev = y continue y_diff = y - y_prev y_prev = y # Either a tie or next ranking when accounting for previous ties if not ((y_diff == 0) | (y_diff == y_incr)).all(): raise ValueError("Invalid ranking(s): ranks not skipped after ties.") # Same as: torch.where(y_diff == 0, y_incr + 1, 1) y_incr = y_incr - y_diff + 1
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.utils.constraints import get_outcome_constraint_transforms from botorch.utils.feasible_volume import estimate_feasible_volume from botorch.utils.objective import apply_constraints, get_objective_weights_transform from botorch.utils.rounding import approximate_round from botorch.utils.sampling import ( batched_multinomial, draw_sobol_normal_samples, draw_sobol_samples, manual_seed, ) from botorch.utils.transforms import standardize, t_batch_mode_transform __all__ = [ "apply_constraints", "approximate_round", "batched_multinomial", "draw_sobol_normal_samples", "draw_sobol_samples", "estimate_feasible_volume", "get_objective_weights_transform", "get_outcome_constraint_transforms", "manual_seed", "standardize", "t_batch_mode_transform", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations NoneType = type(None) # stop gap for the return of NoneType in 3.10 class _DefaultType(type): r""" Private class whose sole instance `DEFAULT` is as a special indicator representing that a default value should be assigned to an argument. Typically used in cases where `None` is an allowed argument. """ DEFAULT = _DefaultType("DEFAULT", (), {})
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Utilities for optimization. """ from __future__ import annotations from contextlib import contextmanager from typing import Any, Callable, Dict, Generator, Iterable, NamedTuple, Optional, Union from torch import device as Device, dtype as Dtype, Tensor from torch.nn import Module class TensorCheckpoint(NamedTuple): values: Tensor device: Optional[Device] = None dtype: Optional[Dtype] = None @contextmanager def delattr_ctx( instance: object, *attrs: str, enforce_hasattr: bool = False ) -> Generator[None, None, None]: r"""Contextmanager for temporarily deleting attributes.""" try: cache = {} for key in attrs: if hasattr(instance, key): cache[key] = getattr(instance, key) delattr(instance, key) elif enforce_hasattr: raise ValueError( f"Attribute {key} missing from {type(instance)} instance." ) yield finally: for key, cached_val in cache.items(): setattr(instance, key, cached_val) @contextmanager def requires_grad_ctx( module: Module, assignments: Dict[str, bool] ) -> Generator[None, None, None]: r"""Contextmanager for temporarily setting the requires_grad field of a module's parameters.""" try: cache = {} for name, mode in assignments.items(): parameter = module.get_parameter(name) cache[name] = parameter.requires_grad parameter.requires_grad_(mode) yield finally: for name, mode in cache.items(): module.get_parameter(name).requires_grad_(mode) @contextmanager def parameter_rollback_ctx( parameters: Dict[str, Tensor], checkpoint: Optional[Dict[str, TensorCheckpoint]] = None, **tkwargs: Any, ) -> Generator[Dict[str, TensorCheckpoint], None, None]: r"""Contextmanager that exits by rolling back a module's state_dict. Args: module: Module instance. name_filter: Optional Boolean function used to filter items by name. checkpoint: Optional cache of values and tensor metadata specifying the rollback state for the module (or some subset thereof). **tkwargs: Keyword arguments passed to `torch.Tensor.to` when copying data from each tensor in `module.state_dict()` to the internally created checkpoint. Only adhered to when the `checkpoint` argument is None. Yields: A dictionary of TensorCheckpoints for the module's state_dict. Any in-places changes to the checkpoint will be observed at rollback time. If the checkpoint is cleared, no rollback will occur. """ # Create copies of the orginal values if checkpoint is None: checkpoint = { name: TensorCheckpoint( values=param.detach().to(**tkwargs).clone(), device=param.device, dtype=param.dtype, ) for name, param in parameters.items() } try: # yield the checkpoint dictionary to the user yield checkpoint finally: # restore original values of tracked parameters if checkpoint: for name, param in parameters.items(): if name in checkpoint: values, device, dtype = checkpoint[name] param.data.copy_(values.to(device=device, dtype=dtype)) @contextmanager def module_rollback_ctx( module: Module, name_filter: Optional[Callable[[str], bool]] = None, checkpoint: Optional[Dict[str, TensorCheckpoint]] = None, **tkwargs: Any, ) -> Generator[Dict[str, TensorCheckpoint], None, None]: r"""Contextmanager that exits by rolling back a module's state_dict. Args: module: Module instance. name_filter: Optional Boolean function used to filter items by name. checkpoint: Optional cache of values and tensor metadata specifying the rollback state for the module (or some subset thereof). **tkwargs: Keyword arguments passed to `torch.Tensor.to` when copying data from each tensor in `module.state_dict()` to the internally created checkpoint. Only adhered to when the `checkpoint` argument is None. Yields: A dictionary of TensorCheckpoints for the module's state_dict. Any in-places changes to the checkpoint will be observed at rollback time. If the checkpoint is cleared, no rollback will occur. """ # Create copies of the orginal values if checkpoint is None: checkpoint = { name: TensorCheckpoint( values=values.detach().to(**tkwargs).clone(), device=values.device, dtype=values.dtype, ) for name, values in module.state_dict().items() if name_filter is None or name_filter(name) } try: # yield the checkpoint dictionary to the user yield checkpoint finally: # restore original values of tracked parameters if checkpoint: state_dict = module.state_dict() for key, (values, device, dtype) in checkpoint.items(): tnsr = state_dict.get(key) if tnsr is None: state_dict[key] = values.to(device=device, dtype=dtype) else: tnsr[...] = values.to(device=device, dtype=dtype) module.load_state_dict(state_dict) @contextmanager def zero_grad_ctx( parameters: Union[Dict[str, Tensor], Iterable[Tensor]], zero_on_enter: bool = True, zero_on_exit: bool = False, ) -> Generator[None, None, None]: def zero_() -> None: for param in ( parameters.values() if isinstance(parameters, dict) else parameters ): if param.grad is not None: param.grad.zero_() if zero_on_enter: zero_() try: yield finally: if zero_on_exit: zero_()
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Utilities for MC and qMC sampling. References .. [Trikalinos2014polytope] T. A. Trikalinos and G. van Valkenhoef. Efficient sampling from uniform density n-polytopes. Technical report, Brown University, 2014. """ from __future__ import annotations from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Generator, Iterable, List, Optional, Tuple, TYPE_CHECKING import numpy as np import scipy import torch from botorch.exceptions.errors import BotorchError from botorch.sampling.qmc import NormalQMCEngine from botorch.utils.transforms import unnormalize from scipy.spatial import Delaunay, HalfspaceIntersection from torch import LongTensor, Tensor from torch.quasirandom import SobolEngine if TYPE_CHECKING: from botorch.sampling.pathwise.paths import SamplePath # pragma: no cover @contextmanager def manual_seed(seed: Optional[int] = None) -> Generator[None, None, None]: r"""Contextmanager for manual setting the torch.random seed. Args: seed: The seed to set the random number generator to. Returns: Generator Example: >>> with manual_seed(1234): >>> X = torch.rand(3) """ old_state = torch.random.get_rng_state() try: if seed is not None: torch.random.manual_seed(seed) yield finally: if seed is not None: torch.random.set_rng_state(old_state) def draw_sobol_samples( bounds: Tensor, n: int, q: int, batch_shape: Optional[Iterable[int], torch.Size] = None, seed: Optional[int] = None, ) -> Tensor: r"""Draw qMC samples from the box defined by bounds. Args: bounds: A `2 x d` dimensional tensor specifying box constraints on a `d`-dimensional space, where bounds[0, :] and bounds[1, :] correspond to lower and upper bounds, respectively. n: The number of (q-batch) samples. As a best practice, use powers of 2. q: The size of each q-batch. batch_shape: The batch shape of the samples. If given, returns samples of shape `n x batch_shape x q x d`, where each batch is an `n x q x d`-dim tensor of qMC samples. seed: The seed used for initializing Owen scrambling. If None (default), use a random seed. Returns: A `n x batch_shape x q x d`-dim tensor of qMC samples from the box defined by bounds. Example: >>> bounds = torch.stack([torch.zeros(3), torch.ones(3)]) >>> samples = draw_sobol_samples(bounds, 16, 2) """ batch_shape = batch_shape or torch.Size() batch_size = int(torch.prod(torch.tensor(batch_shape))) d = bounds.shape[-1] lower = bounds[0] rng = bounds[1] - bounds[0] sobol_engine = SobolEngine(q * d, scramble=True, seed=seed) samples_raw = sobol_engine.draw(batch_size * n, dtype=lower.dtype) samples_raw = samples_raw.view(*batch_shape, n, q, d).to(device=lower.device) if batch_shape != torch.Size(): samples_raw = samples_raw.permute(-3, *range(len(batch_shape)), -2, -1) return lower + rng * samples_raw def draw_sobol_normal_samples( d: int, n: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, seed: Optional[int] = None, ) -> Tensor: r"""Draw qMC samples from a multi-variate standard normal N(0, I_d). A primary use-case for this functionality is to compute an QMC average of f(X) over X where each element of X is drawn N(0, 1). Args: d: The dimension of the normal distribution. n: The number of samples to return. As a best practice, use powers of 2. device: The torch device. dtype: The torch dtype. seed: The seed used for initializing Owen scrambling. If None (default), use a random seed. Returns: A tensor of qMC standard normal samples with dimension `n x d` with device and dtype specified by the input. Example: >>> samples = draw_sobol_normal_samples(2, 16) """ normal_qmc_engine = NormalQMCEngine(d=d, seed=seed, inv_transform=True) samples = normal_qmc_engine.draw(n, dtype=torch.float if dtype is None else dtype) return samples.to(device=device) def sample_hypersphere( d: int, n: int = 1, qmc: bool = False, seed: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tensor: r"""Sample uniformly from a unit d-sphere. Args: d: The dimension of the hypersphere. n: The number of samples to return. qmc: If True, use QMC Sobol sampling (instead of i.i.d. uniform). seed: If provided, use as a seed for the RNG. device: The torch device. dtype: The torch dtype. Returns: An `n x d` tensor of uniform samples from from the d-hypersphere. Example: >>> sample_hypersphere(d=5, n=10) """ dtype = torch.float if dtype is None else dtype if d == 1: rnd = torch.randint(0, 2, (n, 1), device=device, dtype=dtype) return 2 * rnd - 1 if qmc: rnd = draw_sobol_normal_samples(d=d, n=n, device=device, dtype=dtype, seed=seed) else: with manual_seed(seed=seed): rnd = torch.randn(n, d, dtype=dtype) samples = rnd / torch.linalg.norm(rnd, dim=-1, keepdim=True) if device is not None: samples = samples.to(device) return samples def sample_simplex( d: int, n: int = 1, qmc: bool = False, seed: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tensor: r"""Sample uniformly from a d-simplex. Args: d: The dimension of the simplex. n: The number of samples to return. qmc: If True, use QMC Sobol sampling (instead of i.i.d. uniform). seed: If provided, use as a seed for the RNG. device: The torch device. dtype: The torch dtype. Returns: An `n x d` tensor of uniform samples from from the d-simplex. Example: >>> sample_simplex(d=3, n=10) """ dtype = torch.float if dtype is None else dtype if d == 1: return torch.ones(n, 1, device=device, dtype=dtype) if qmc: sobol_engine = SobolEngine(d - 1, scramble=True, seed=seed) rnd = sobol_engine.draw(n, dtype=dtype) else: with manual_seed(seed=seed): rnd = torch.rand(n, d - 1, dtype=dtype) srnd, _ = torch.sort(rnd, dim=-1) zeros = torch.zeros(n, 1, dtype=dtype) ones = torch.ones(n, 1, dtype=dtype) srnd = torch.cat([zeros, srnd, ones], dim=-1) if device is not None: srnd = srnd.to(device) return srnd[..., 1:] - srnd[..., :-1] def sample_polytope( A: Tensor, b: Tensor, x0: Tensor, n: int = 10000, n0: int = 100, seed: Optional[int] = None, ) -> Tensor: r""" Hit and run sampler from uniform sampling points from a polytope, described via inequality constraints A*x<=b. Args: A: A Tensor describing inequality constraints so that all samples satisfy Ax<=b. b: A Tensor describing the inequality constraints so that all samples satisfy Ax<=b. x0: A `d`-dim Tensor representing a starting point of the chain satisfying the constraints. n: The number of resulting samples kept in the output. n0: The number of burn-in samples. The chain will produce n+n0 samples but the first n0 samples are not saved. seed: The seed for the sampler. If omitted, use a random seed. Returns: (n, d) dim Tensor containing the resulting samples. """ n_tot = n + n0 seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item() with manual_seed(seed=seed): rands = torch.rand(n_tot, dtype=A.dtype, device=A.device) # pre-sample samples from hypersphere d = x0.size(0) # uniform samples from unit ball in d dims Rs = sample_hypersphere( d=d, n=n_tot, dtype=A.dtype, device=A.device, seed=seed ).unsqueeze(-1) # compute matprods in batch ARs = (A @ Rs).squeeze(-1) out = torch.empty(n, A.size(-1), dtype=A.dtype, device=A.device) x = x0.clone() for i, (ar, r, rnd) in enumerate(zip(ARs, Rs, rands)): # given x, the next point in the chain is x+alpha*r # it also satisfies A(x+alpha*r)<=b which implies A*alpha*r<=b-Ax # so alpha<=(b-Ax)/ar for ar>0, and alpha>=(b-Ax)/ar for ar<0. # b - A @ x is always >= 0, clamping for numerical tolerances w = (b - A @ x).squeeze().clamp(min=0.0) / ar pos = w >= 0 alpha_max = w[pos].min() # important to include equality here in cases x is at the boundary # of the polytope neg = w <= 0 alpha_min = w[neg].max() # alpha~Unif[alpha_min, alpha_max] alpha = alpha_min + rnd * (alpha_max - alpha_min) x = x + alpha * r if i >= n0: # save samples after burn-in period out[i - n0] = x.squeeze() return out def batched_multinomial( weights: Tensor, num_samples: int, replacement: bool = False, generator: Optional[torch.Generator] = None, out: Optional[Tensor] = None, ) -> LongTensor: r"""Sample from multinomial with an arbitrary number of batch dimensions. Args: weights: A `batch_shape x num_categories` tensor of weights. For each batch index `i, j, ...`, this functions samples from a multinomial with `input` `weights[i, j, ..., :]`. Note that the weights need not sum to one, but must be non-negative, finite and have a non-zero sum. num_samples: The number of samples to draw for each batch index. Must be smaller than `num_categories` if `replacement=False`. replacement: If True, samples are drawn with replacement. generator: A a pseudorandom number generator for sampling. out: The output tensor (optional). If provided, must be of size `batch_shape x num_samples`. Returns: A `batch_shape x num_samples` tensor of samples. This is a thin wrapper around `torch.multinomial` that allows weight (`input`) tensors with an arbitrary number of batch dimensions (`torch.multinomial` only allows a single batch dimension). The calling signature is the same as for `torch.multinomial`. Example: >>> weights = torch.rand(2, 3, 10) >>> samples = batched_multinomial(weights, 4) # shape is 2 x 3 x 4 """ batch_shape, n_categories = weights.shape[:-1], weights.size(-1) flat_samples = torch.multinomial( input=weights.view(-1, n_categories), num_samples=num_samples, replacement=replacement, generator=generator, out=None if out is None else out.view(-1, num_samples), ) return flat_samples.view(*batch_shape, num_samples) def _convert_bounds_to_inequality_constraints(bounds: Tensor) -> Tuple[Tensor, Tensor]: r"""Convert bounds into inequality constraints of the form Ax <= b. Args: bounds: A `2 x d`-dim tensor of bounds Returns: A two-element tuple containing - A: A `2d x d`-dim tensor of coefficients - b: A `2d x 1`-dim tensor containing the right hand side """ d = bounds.shape[-1] eye = torch.eye(d, dtype=bounds.dtype, device=bounds.device) lower, upper = bounds lower_finite, upper_finite = bounds.isfinite() A = torch.cat([-eye[lower_finite], eye[upper_finite]], dim=0) b = torch.cat([-lower[lower_finite], upper[upper_finite]], dim=0).unsqueeze(-1) return A, b def find_interior_point( A: np.ndarray, b: np.ndarray, A_eq: Optional[np.ndarray] = None, b_eq: Optional[np.ndarray] = None, ) -> np.ndarray: r"""Find an interior point of a polytope via linear programming. Args: A: A `n_ineq x d`-dim numpy array containing the coefficients of the constraint inequalities. b: A `n_ineq x 1`-dim numpy array containing the right hand sides of the constraint inequalities. A_eq: A `n_eq x d`-dim numpy array containing the coefficients of the constraint equalities. b_eq: A `n_eq x 1`-dim numpy array containing the right hand sides of the constraint equalities. Returns: A `d`-dim numpy array containing an interior point of the polytope. This function will raise a ValueError if there is no such point. This method solves the following Linear Program: min -s subject to A @ x <= b - 2 * s, s >= 0, A_eq @ x = b_eq In case the polytope is unbounded, then it will also constrain the slack variable `s` to `s<=1`. """ # augment inequality constraints: A @ (x, s) <= b d = A.shape[-1] ncon = A.shape[-2] + 1 c = np.zeros(d + 1) c[-1] = -1 b_ub = np.zeros(ncon) b_ub[:-1] = b.reshape(-1) A_ub = np.zeros((ncon, d + 1)) A_ub[:-1, :-1] = A A_ub[:-1, -1] = 2.0 A_ub[-1, -1] = -1.0 result = scipy.optimize.linprog( c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=(None, None), method="highs", ) if result.status == 3: # problem is unbounded - to find a bounded solution we constrain the # slack variable `s` to `s <= 1.0`. A_s = np.concatenate([np.zeros((1, d)), np.ones((1, 1))], axis=-1) A_ub = np.concatenate([A_ub, A_s], axis=0) b_ub = np.concatenate([b_ub, np.ones(1)], axis=-1) result = scipy.optimize.linprog( c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=(None, None), method="highs", ) if result.status == 2: raise ValueError( "No feasible point found. Constraint polytope appears empty. " + "Check your constraints." ) elif result.status > 0: raise ValueError( "Problem checking constraint specification. " + "linprog status: {}".format(result.message) ) # the x in the result is really (x, s) return result.x[:-1] class PolytopeSampler(ABC): r""" Base class for samplers that sample points from a polytope. :meta private: """ def __init__( self, inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, equality_constraints: Optional[Tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, ) -> None: r""" Args: inequality_constraints: Tensors `(A, b)` describing inequality constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con` the number of inequalities and `d` the dimension of the sample space. equality_constraints: Tensors `(C, d)` describing the equality constraints `C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a `n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities. bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means that the respective dimension is unbounded above (below). interior_point: A `d x 1`-dim Tensor presenting a point in the (relative) interior of the polytope. If omitted, determined automatically by solving a Linear Program. """ if inequality_constraints is None: if bounds is None: raise BotorchError( "PolytopeSampler requires either inequality constraints or bounds." ) A = torch.empty( 0, bounds.shape[-1], dtype=bounds.dtype, device=bounds.device ) b = torch.empty(0, 1, dtype=bounds.dtype, device=bounds.device) else: A, b = inequality_constraints if bounds is not None: # add inequality constraints for bounds # TODO: make sure there are not deduplicate constraints A2, b2 = _convert_bounds_to_inequality_constraints(bounds=bounds) A = torch.cat([A, A2], dim=0) b = torch.cat([b, b2], dim=0) self.A = A self.b = b self.equality_constraints = equality_constraints if equality_constraints is not None: self.C, self.d = equality_constraints U, S, Vh = torch.linalg.svd(self.C) r = torch.nonzero(S).size(0) # rank of matrix C self.nullC = Vh[r:, :].transpose(-1, -2) # orthonormal null space of C, # satisfying # C @ nullC = 0 and nullC.T @ nullC = I # using the change of variables x=x0+nullC*y, # sample y satisfies A*nullC*y<=b-A*x0. # the linear constraint is automatically satisfied as x0 satisfies it. else: self.C = None self.d = None self.nullC = torch.eye( self.A.size(-1), dtype=self.A.dtype, device=self.A.device ) self.new_A = self.A @ self.nullC # doesn't depend on the initial point # initial point for the original, not transformed, problem if interior_point is not None: if self.feasible(interior_point): self.x0 = interior_point else: raise ValueError("The given input point is not feasible.") else: self.x0 = self.find_interior_point() def feasible(self, x: Tensor) -> bool: r"""Check whether a point is contained in the polytope. Args: x: A `d x 1`-dim Tensor. Returns: True if `x` is contained inside the polytope (incl. its boundary), False otherwise. """ ineq = (self.A @ x - self.b <= 0).all() if self.equality_constraints is not None: eq = (self.C @ x - self.d == 0).all() return ineq & eq return ineq def find_interior_point(self) -> Tensor: r"""Find an interior point of the polytope. Returns: A `d x 1`-dim Tensor representing a point contained in the polytope. This function will raise a ValueError if there is no such point. """ if self.equality_constraints: # equality constraints: A_eq * (x, s) = b_eq A_eq = np.zeros((self.C.size(0), self.C.size(-1) + 1)) A_eq[:, :-1] = self.C.cpu().numpy() b_eq = self.d.cpu().numpy() else: A_eq = None b_eq = None x0 = find_interior_point( A=self.A.cpu().numpy(), b=self.b.cpu().numpy(), A_eq=A_eq, b_eq=b_eq ) return torch.from_numpy(x0).to(self.A).unsqueeze(-1) # -------- Abstract methods to be implemented by subclasses -------- # @abstractmethod def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor: r"""Draw samples from the polytope. Args: n: The number of samples. seed: The random seed. Returns: A `n x d` Tensor of samples from the polytope. """ pass # pragma: no cover class HitAndRunPolytopeSampler(PolytopeSampler): r"""A sampler for sampling from a polyope using a hit-and-run algorithm.""" def __init__( self, inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, equality_constraints: Optional[Tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, n_burnin: int = 0, ) -> None: r"""A sampler for sampling from a polyope using a hit-and-run algorithm. Args: inequality_constraints: Tensors `(A, b)` describing inequality constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con` the number of inequalities and `d` the dimension of the sample space. equality_constraints: Tensors `(C, d)` describing the equality constraints `C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a `n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities. bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means that the respective dimension is unbounded from above (below). interior_point: A `d x 1`-dim Tensor representing a point in the (relative) interior of the polytope. If omitted, determined automatically by solving a Linear Program. n_burnin: The number of burn in samples. """ super().__init__( inequality_constraints=inequality_constraints, equality_constraints=equality_constraints, bounds=bounds, interior_point=interior_point, ) self.n_burnin = n_burnin def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor: r"""Draw samples from the polytope. Args: n: The number of samples. seed: The random seed. Returns: A `n x d` Tensor of samples from the polytope. """ transformed_samples = sample_polytope( # run this on the cpu A=self.new_A.cpu(), b=(self.b - self.A @ self.x0).cpu(), x0=torch.zeros((self.nullC.size(1), 1), dtype=self.A.dtype), n=n, n0=self.n_burnin, seed=seed, ).to(self.b) init_shift = self.x0.transpose(-1, -2) samples = init_shift + transformed_samples @ self.nullC.transpose(-1, -2) # keep the last element of the resulting chain as # the beginning of the next chain self.x0 = samples[-1].reshape(-1, 1) # reset counter so there is no burn-in for subsequent samples self.n_burnin = 0 return samples class DelaunayPolytopeSampler(PolytopeSampler): r"""A polytope sampler using Delaunay triangulation. This sampler first enumerates the vertices of the constraint polytope and then uses a Delaunay triangulation to tesselate its convex hull. The sampling happens in two stages: 1. First, we sample from the set of hypertriangles generated by the Delaunay triangulation (i.e. which hyper-triangle to draw the sample from) with probabilities proportional to the triangle volumes. 2. Then, we sample uniformly from the chosen hypertriangle by sampling uniformly from the unit simplex of the appropriate dimension, and then computing the convex combination of the vertices of the hypertriangle according to that draw from the simplex. The best reference (not exactly the same, but functionally equivalent) is [Trikalinos2014polytope]_. A simple R implementation is available at https://github.com/gertvv/tesselample. """ def __init__( self, inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, equality_constraints: Optional[Tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, ) -> None: r"""Initialize DelaunayPolytopeSampler. Args: inequality_constraints: Tensors `(A, b)` describing inequality constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con` the number of inequalities and `d` the dimension of the sample space. equality_constraints: Tensors `(C, d)` describing the equality constraints `C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a `n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities. bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means that the respective dimension is unbounded from above (below). interior_point: A `d x 1`-dim Tensor representing a point in the (relative) interior of the polytope. If omitted, determined automatically by solving a Linear Program. Warning: The vertex enumeration performed in this algorithm can become extremely costly if there are a large number of inequalities. Similarly, the triangulation can get very expensive in high dimensions. Only use this algorithm for moderate dimensions / moderately complex constraint sets. An alternative is the `HitAndRunPolytopeSampler`. """ super().__init__( inequality_constraints=inequality_constraints, equality_constraints=equality_constraints, bounds=bounds, interior_point=interior_point, ) # shift coordinate system to be anchored at x0 new_b = self.b - self.A @ self.x0 if self.new_A.shape[-1] < 2: # if the polytope is in dim 1 (i.e. a line segment) Qhull won't work tshlds = new_b / self.new_A neg = self.new_A < 0 self.y_min = tshlds[neg].max() self.y_max = tshlds[~neg].min() self.dim = 1 else: # Qhull expects inputs of the form A @ x + b <= 0, so we need to negate here halfspaces = torch.cat([self.new_A, -new_b], dim=-1).cpu().numpy() vertices = HalfspaceIntersection( halfspaces=halfspaces, interior_point=np.zeros(self.new_A.shape[-1]) ).intersections self.dim = vertices.shape[-1] try: delaunay = Delaunay(vertices) except ValueError as e: if "Points cannot contain NaN" in str(e): raise ValueError("Polytope is unbounded.") raise e # pragma: no cover polytopes = torch.from_numpy( np.array([delaunay.points[s] for s in delaunay.simplices]), ).to(self.A) volumes = torch.stack([torch.det(p[1:] - p[0]).abs() for p in polytopes]) self._polytopes = polytopes self._p = volumes / volumes.sum() def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor: r"""Draw samples from the polytope. Args: n: The number of samples. seed: The random seed. Returns: A `n x d` Tensor of samples from the polytope. """ if self.dim == 1: with manual_seed(seed): e = torch.rand(n, 1, device=self.new_A.device, dtype=self.new_A.dtype) transformed_samples = self.y_min + (self.y_max - self.y_min) * e else: if seed is None: generator = None else: generator = torch.Generator(device=self.A.device) generator.manual_seed(seed) index_rvs = torch.multinomial( self._p, num_samples=n, replacement=True, generator=generator, ) simplex_rvs = sample_simplex( d=self.dim + 1, n=n, seed=seed, device=self.A.device, dtype=self.A.dtype ) transformed_samples = torch.stack( [rv @ self._polytopes[idx] for rv, idx in zip(simplex_rvs, index_rvs)] ) init_shift = self.x0.transpose(-1, -2) samples = init_shift + transformed_samples @ self.nullC.transpose(-1, -2) return samples def normalize_linear_constraints( bounds: Tensor, constraints: List[Tuple[Tensor, Tensor, float]] ) -> List[Tuple[Tensor, Tensor, float]]: r"""Normalize linear constraints to the unit cube. Args: bounds (Tensor): A `2 x d`-dim tensor containing the box bounds. constraints (List[Tuple[Tensor, Tensor, float]]): A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs` or `\sum_i (X[indices[i]] * coefficients[i]) = rhs`. """ new_constraints = [] for index, coefficient, rhs in constraints: lower, upper = bounds[:, index] s = upper - lower new_constraints.append( (index, s * coefficient, (rhs - torch.dot(coefficient, lower)).item()) ) return new_constraints def get_polytope_samples( n: int, bounds: Tensor, inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, seed: Optional[int] = None, thinning: int = 32, n_burnin: int = 10_000, ) -> Tensor: r"""Sample from polytope defined by box bounds and (in)equality constraints. This uses a hit-and-run Markov chain sampler. TODO: make this method return the sampler object, to avoid doing burn-in every time we draw samples. Args: n: The number of samples. bounds: A `2 x d`-dim tensor containing the box bounds. inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. equality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) = rhs`. seed: The random seed. thinning: The amount of thinning. n_burnin: The number of burn-in samples for the Markov chain sampler. Returns: A `n x d`-dim tensor of samples. """ # create tensors representing linear inequality constraints # of the form Ax >= b. if inequality_constraints: # normalize_linear_constraints is called to solve this issue: # https://github.com/pytorch/botorch/issues/1225 constraints = normalize_linear_constraints(bounds, inequality_constraints) A, b = sparse_to_dense_constraints( d=bounds.shape[-1], constraints=constraints, ) # Note the inequality constraints are of the form Ax >= b, # but PolytopeSampler expects inequality constraints of the # form Ax <= b, so we multiply by -1 below. dense_inequality_constraints = -A, -b else: dense_inequality_constraints = None if equality_constraints: constraints = normalize_linear_constraints(bounds, equality_constraints) dense_equality_constraints = sparse_to_dense_constraints( d=bounds.shape[-1], constraints=constraints ) else: dense_equality_constraints = None normalized_bounds = torch.zeros_like(bounds) normalized_bounds[1, :] = 1.0 polytope_sampler = HitAndRunPolytopeSampler( bounds=normalized_bounds, inequality_constraints=dense_inequality_constraints, equality_constraints=dense_equality_constraints, n_burnin=n_burnin, ) samples = polytope_sampler.draw(n=n * thinning, seed=seed)[::thinning] return bounds[0] + samples * (bounds[1] - bounds[0]) def sparse_to_dense_constraints( d: int, constraints: List[Tuple[Tensor, Tensor, float]], ) -> Tuple[Tensor, Tensor]: r"""Convert parameter constraints from a sparse format into a dense format. This method converts sparse triples of the form (indices, coefficients, rhs) to constraints of the form Ax >= b or Ax = b. Args: d: The input dimension. inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an (in)equality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs` or `\sum_i (X[indices[i]] * coefficients[i]) = rhs`. Returns: A two-element tuple containing: - A: A `n_constraints x d`-dim tensor of coefficients. - b: A `n_constraints x 1`-dim tensor of right hand sides. """ _t = constraints[0][1] A = torch.zeros(len(constraints), d, dtype=_t.dtype, device=_t.device) b = torch.zeros(len(constraints), 1, dtype=_t.dtype, device=_t.device) for i, (indices, coefficients, rhs) in enumerate(constraints): A[i, indices.long()] = coefficients b[i] = rhs return A, b def optimize_posterior_samples( paths: SamplePath, bounds: Tensor, candidates: Optional[Tensor] = None, raw_samples: Optional[int] = 1024, num_restarts: int = 20, maximize: bool = True, **kwargs: Any, ) -> Tuple[Tensor, Tensor]: r"""Cheaply maximizes posterior samples by random querying followed by vanilla gradient descent on the best num_restarts points. Args: paths: Random Fourier Feature-based sample paths from the GP bounds: The bounds on the search space. candidates: A priori good candidates (typically previous design points) which acts as extra initial guesses for the optimization routine. raw_samples: The number of samples with which to query the samples initially. num_restarts: The number of points selected for gradient-based optimization. maximize: Boolean indicating whether to maimize or minimize Returns: A two-element tuple containing: - X_opt: A `num_optima x [batch_size] x d`-dim tensor of optimal inputs x*. - f_opt: A `num_optima x [batch_size] x 1`-dim tensor of optimal outputs f*. """ if maximize: def path_func(x): return paths(x) else: def path_func(x): return -paths(x) candidate_set = unnormalize( SobolEngine(dimension=bounds.shape[1], scramble=True).draw(raw_samples), bounds ) # queries all samples on all candidates - output shape # raw_samples * num_optima * num_models candidate_queries = path_func(candidate_set) argtop_k = torch.topk(candidate_queries, num_restarts, dim=-1).indices X_top_k = candidate_set[argtop_k, :] # to avoid circular import, the import occurs here from botorch.generation.gen import gen_candidates_torch X_top_k, f_top_k = gen_candidates_torch( X_top_k, path_func, lower_bounds=bounds[0], upper_bounds=bounds[1], **kwargs ) f_opt, arg_opt = f_top_k.max(dim=-1, keepdim=True) # For each sample (and possibly for every model in the batch of models), this # retrieves the argmax. We flatten, pick out the indices and then reshape to # the original batch shapes (so instead of pickig out the argmax of a # (3, 7, num_restarts, D)) along the num_restarts dim, we pick it out of a # (21 , num_restarts, D) final_shape = candidate_queries.shape[:-1] X_opt = X_top_k.reshape(final_shape.numel(), num_restarts, -1)[ torch.arange(final_shape.numel()), arg_opt.flatten() ].reshape(*final_shape, -1) if not maximize: f_opt = -f_opt return X_opt, f_opt
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Helpers for handling input or outcome constraints. """ from __future__ import annotations from functools import partial from typing import Callable, List, Optional, Tuple import torch from torch import Tensor def get_outcome_constraint_transforms( outcome_constraints: Optional[Tuple[Tensor, Tensor]] ) -> Optional[List[Callable[[Tensor], Tensor]]]: r"""Create outcome constraint callables from outcome constraint tensors. Args: outcome_constraints: A tuple of `(A, b)`. For `k` outcome constraints and `m` outputs at `f(x)``, `A` is `k x m` and `b` is `k x 1` such that `A f(x) <= b`. Returns: A list of callables, each mapping a Tensor of size `b x q x m` to a tensor of size `b x q`, where `m` is the number of outputs of the model. Negative values imply feasibility. The callables support broadcasting (e.g. for calling on a tensor of shape `mc_samples x b x q x m`). Example: >>> # constrain `f(x)[0] <= 0` >>> A = torch.tensor([[1., 0.]]) >>> b = torch.tensor([[0.]]) >>> outcome_constraints = get_outcome_constraint_transforms((A, b)) """ if outcome_constraints is None: return None A, b = outcome_constraints def _oc(a: Tensor, rhs: Tensor, Y: Tensor) -> Tensor: r"""Evaluate constraints. Note: einsum multiples Y by a and sums over the `m`-dimension. Einsum is ~2x faster than using `(Y * a.view(1, 1, -1)).sum(dim-1)`. Args: a: `m`-dim tensor of weights for the outcomes rhs: Singleton tensor containing the outcome constraint value Y: `... x b x q x m` tensor of function values Returns: A `... x b x q`-dim tensor where negative values imply feasibility """ lhs = torch.einsum("...m, m", [Y, a]) return lhs - rhs return [partial(_oc, a, rhs) for a, rhs in zip(A, b)] def get_monotonicity_constraints( d: int, descending: bool = False, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ) -> Tuple[Tensor, Tensor]: """Returns a system of linear inequalities `(A, b)` that generically encodes order constraints on the elements of a `d`-dimsensional space, i.e. `A @ x < b` implies `x[i] < x[i + 1]` for a `d`-dimensional vector `x`. Idea: Could encode `A` as sparse matrix, if it is supported well. Args: d: Dimensionality of the constraint space, i.e. number of monotonic parameters. descending: If True, forces the elements of a vector to be monotonically de- creasing and be monotonically increasing otherwise. dtype: The dtype of the returned Tensors. device: The device of the returned Tensors. Returns: A tuple of Tensors `(A, b)` representing the monotonicity constraint as a system of linear inequalities `A @ x < b`. `A` is `(d - 1) x d`-dimensional and `b` is `(d - 1) x 1`-dimensional. """ A = torch.zeros(d - 1, d, dtype=dtype, device=device) idx = torch.arange(d - 1) A[idx, idx] = 1 A[idx, idx + 1] = -1 b = torch.zeros(d - 1, 1, dtype=dtype, device=device) if descending: A = -A return A, b
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Special implementations of mathematical functions that solve numerical issues of naive implementations. .. [Maechler2012accurate] M. Mächler. Accurately Computing log (1 - exp (-| a|)) Assessed by the Rmpfr package. Technical report, 2012. """ from __future__ import annotations import math from typing import Callable, Tuple, Union import torch from botorch.exceptions import UnsupportedError from botorch.utils.constants import get_constants_like from torch import finfo, Tensor from torch.nn.functional import softplus _log2 = math.log(2) _inv_sqrt_3 = math.sqrt(1 / 3) TAU = 1.0 # default temperature parameter for smooth approximations to non-linearities ALPHA = 2.0 # default alpha parameter for the asymptotic power decay of _pareto # Unary ops def exp(x: Tensor, **kwargs) -> Tensor: info = finfo(x.dtype) maxexp = get_constants_like(math.log(info.max) - 1e-4, x) return torch.exp(x.clip(max=maxexp), **kwargs) def log(x: Tensor, **kwargs) -> Tensor: info = finfo(x.dtype) return torch.log(x.clip(min=info.tiny), **kwargs) # Binary ops def add(a: Tensor, b: Tensor, **kwargs) -> Tensor: _0 = get_constants_like(0, a) case = a.isinf() & b.isinf() & (a != b) return torch.where(case, _0, a + b) def sub(a: Tensor, b: Tensor) -> Tensor: _0 = get_constants_like(0, a) case = (a.isinf() & b.isinf()) & (a == b) return torch.where(case, _0, a - b) def div(a: Tensor, b: Tensor) -> Tensor: _0, _1 = get_constants_like(values=(0, 1), ref=a) case = ((a == _0) & (b == _0)) | (a.isinf() & a.isinf()) return torch.where(case, torch.where(a != b, -_1, _1), a / torch.where(case, _1, b)) def mul(a: Tensor, b: Tensor) -> Tensor: _0 = get_constants_like(values=0, ref=a) case = (a.isinf() & (b == _0)) | (b.isinf() & (a == _0)) return torch.where(case, _0, a * torch.where(case, _0, b)) def log1mexp(x: Tensor) -> Tensor: """Numerically accurate evaluation of log(1 - exp(x)) for x < 0. See [Maechler2012accurate]_ for details. """ log2 = get_constants_like(values=_log2, ref=x) is_small = -log2 < x # x < 0 return torch.where( is_small, (-x.expm1()).log(), (-x.exp()).log1p(), ) def log1pexp(x: Tensor) -> Tensor: """Numerically accurate evaluation of log(1 + exp(x)). See [Maechler2012accurate]_ for details. """ mask = x <= 18 return torch.where( mask, (lambda z: z.exp().log1p())(x.masked_fill(~mask, 0)), (lambda z: z + (-z).exp())(x.masked_fill(mask, 0)), ) def logexpit(X: Tensor) -> Tensor: """Computes the logarithm of the expit (a.k.a. sigmoid) function.""" return -log1pexp(-X) def logplusexp(a: Tensor, b: Tensor) -> Tensor: """Computes log(exp(a) + exp(b)) similar to logsumexp.""" ab = torch.stack(torch.broadcast_tensors(a, b), dim=-1) return logsumexp(ab, dim=-1) def logdiffexp(log_a: Tensor, log_b: Tensor) -> Tensor: """Computes log(b - a) accurately given log(a) and log(b). Assumes, log_b > log_a, i.e. b > a > 0. Args: log_a (Tensor): The logarithm of a, assumed to be less than log_b. log_b (Tensor): The logarithm of b, assumed to be larger than log_a. Returns: A Tensor of values corresponding to log(b - a). """ log_a, log_b = torch.broadcast_tensors(log_a, log_b) is_inf = log_b == -torch.inf # implies log_a == -torch.inf by assumption return log_b + log1mexp(log_a - log_b.masked_fill(is_inf, 0.0)) def logsumexp( x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False ) -> Tensor: """Version of logsumexp that has a well-behaved backward pass when x contains infinities. In particular, the gradient of the standard torch version becomes NaN 1) for any element that is positive infinity, and 2) for any slice that only contains negative infinities. This version returns a gradient of 1 for any positive infinities in case 1, and for all elements of the slice in case 2, in agreement with the asymptotic behavior of the function. Args: x: The Tensor to which to apply `logsumexp`. dim: An integer or a tuple of integers, representing the dimensions to reduce. keepdim: Whether to keep the reduced dimensions. Defaults to False. Returns: A Tensor representing the log of the summed exponentials of `x`. """ return _inf_max_helper(torch.logsumexp, x=x, dim=dim, keepdim=keepdim) def _inf_max_helper( max_fun: Callable[[Tensor], Tensor], x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool, ) -> Tensor: """Helper function that generalizes the treatment of infinities for approximations to the maximum operator, i.e., `max(X, dim, keepdim)`. At the point of writing of this function, it is used to define `logsumexp` and `fatmax`. Args: max_fun: The function that is used to smoothly penalize the difference of an element to the true maximum. x: The Tensor on which to compute the smooth approximation to the maximum. dim: The dimension(s) to reduce over. keepdim: Whether to keep the reduced dimension. Defaults to False. Returns: The Tensor representing the smooth approximation to the maximum over the specified dimensions. """ M = x.amax(dim=dim, keepdim=True) is_inf_max = torch.logical_and(*torch.broadcast_tensors(M.isinf(), x == M)) has_inf_max = _any(is_inf_max, dim=dim, keepdim=True) y_inf = x.masked_fill(~is_inf_max, 0.0) M_no_inf = M.masked_fill(M.isinf(), 0.0) y_no_inf = x.masked_fill(has_inf_max, 0.0) - M_no_inf res = torch.where( has_inf_max, y_inf.sum(dim=dim, keepdim=True), M_no_inf + max_fun(y_no_inf, dim=dim, keepdim=True), ) return res if keepdim else res.squeeze(dim) def _any(x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False) -> Tensor: """Extension of torch.any, which supports reducing over tuples of dimensions. Args: x: The Tensor to reduce over. dim: An integer or a tuple of integers, representing the dimensions to reduce. keepdim: Whether to keep the reduced dimensions. Defaults to False. Returns: The Tensor corresponding to `any` over the specified dimensions. """ if isinstance(dim, Tuple): for d in dim: x = x.any(dim=d, keepdim=True) else: x = x.any(dim, keepdim=True) return x if keepdim else x.squeeze(dim) def logmeanexp( X: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False ) -> Tensor: """Computes `log(mean(exp(X), dim=dim, keepdim=keepdim))`. Args: X: Values of which to compute the logmeanexp. dim: The dimension(s) over which to compute the mean. keepdim: If True, keeps the reduced dimensions. Returns: A Tensor of values corresponding to `log(mean(exp(X), dim=dim))`. """ n = X.shape[dim] if isinstance(dim, int) else math.prod(X.shape[i] for i in dim) return logsumexp(X, dim=dim, keepdim=keepdim) - math.log(n) def log_softplus(x: Tensor, tau: Union[float, Tensor] = TAU) -> Tensor: """Computes the logarithm of the softplus function with high numerical accuracy. Args: x: Input tensor, should have single or double precision floats. tau: Decreasing tau increases the tightness of the approximation to ReLU. Non-negative and defaults to 1.0. Returns: Tensor corresponding to `log(softplus(x))`. """ check_dtype_float32_or_float64(x) tau = torch.as_tensor(tau, dtype=x.dtype, device=x.device) # cutoff chosen to achieve accuracy to machine epsilon upper = 16 if x.dtype == torch.float32 else 32 lower = -15 if x.dtype == torch.float32 else -35 mask = x / tau > lower return torch.where( mask, softplus(x.masked_fill(~mask, lower), beta=(1 / tau), threshold=upper).log(), x / tau + tau.log(), ) def smooth_amax( X: Tensor, dim: Union[int, Tuple[int, ...]] = -1, keepdim: bool = False, tau: Union[float, Tensor] = 1.0, ) -> Tensor: """Computes a smooth approximation to `max(X, dim=dim)`, i.e the maximum value of `X` over dimension `dim`, using the logarithm of the `l_(1/tau)` norm of `exp(X)`. Note that when `X = log(U)` is the *logarithm* of an acquisition utility `U`, `logsumexp(log(U) / tau) * tau = log(sum(U^(1/tau))^tau) = log(norm(U, ord=(1/tau))` Args: X: A Tensor from which to compute the smoothed amax. dim: The dimensions to reduce over. keepdim: If True, keeps the reduced dimensions. tau: Temperature parameter controlling the smooth approximation to max operator, becomes tighter as tau goes to 0. Needs to be positive. Returns: A Tensor of smooth approximations to `max(X, dim=dim)`. """ # consider normalizing by log_n = math.log(X.shape[dim]) to reduce error return logsumexp(X / tau, dim=dim, keepdim=keepdim) * tau # ~ X.amax(dim=dim) def check_dtype_float32_or_float64(X: Tensor) -> None: if X.dtype != torch.float32 and X.dtype != torch.float64: raise UnsupportedError( f"Only dtypes float32 and float64 are supported, but received {X.dtype}." ) def log_fatplus(x: Tensor, tau: Union[float, Tensor] = TAU) -> Tensor: """Computes the logarithm of the fat-tailed softplus. NOTE: Separated out in case the complexity of the `log` implementation increases in the future. """ return fatplus(x, tau=tau).log() def fatplus(x: Tensor, tau: Union[float, Tensor] = TAU) -> Tensor: """Computes a fat-tailed approximation to `ReLU(x) = max(x, 0)` by linearly combining a regular softplus function and the density function of a Cauchy distribution. The coefficient `alpha` of the Cauchy density is chosen to guarantee monotonicity and convexity. Args: x: A Tensor on whose values to compute the smoothed function. tau: Temperature parameter controlling the smoothness of the approximation. Returns: A Tensor of values of the fat-tailed softplus. """ def _fatplus(x: Tensor) -> Tensor: alpha = 1e-1 # guarantees monotonicity and convexity (TODO: ref + Lemma 4) return softplus(x) + alpha * cauchy(x) return tau * _fatplus(x / tau) def fatmax( x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False, tau: Union[float, Tensor] = TAU, alpha: float = ALPHA, ) -> Tensor: """Computes a smooth approximation to amax(X, dim=dim) with a fat tail. Args: X: A Tensor from which to compute the smoothed amax. dim: The dimensions to reduce over. keepdim: If True, keeps the reduced dimensions. tau: Temperature parameter controlling the smooth approximation to max operator, becomes tighter as tau goes to 0. Needs to be positive. alpha: The exponent of the asymptotic power decay of the approximation. The default value is 2. Higher alpha parameters make the function behave more similarly to the standard logsumexp approximation to the max, so it is recommended to keep this value low or moderate, e.g. < 10. Returns: A Tensor of smooth approximations to `max(X, dim=dim)` with a fat tail. """ def max_fun( x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False ) -> Tensor: return tau * _pareto(-x / tau, alpha=alpha).sum(dim=dim, keepdim=keepdim).log() return _inf_max_helper(max_fun=max_fun, x=x, dim=dim, keepdim=keepdim) def fatmaximum( a: Tensor, b: Tensor, tau: Union[float, Tensor] = TAU, alpha: float = ALPHA ) -> Tensor: """Computes a smooth approximation to torch.maximum(a, b) with a fat tail. Args: a: The first Tensor from which to compute the smoothed component-wise maximum. b: The second Tensor from which to compute the smoothed component-wise maximum. tau: Temperature parameter controlling the smoothness of the approximation. A smaller tau corresponds to a tighter approximation that leads to a sharper objective landscape that might be more difficult to optimize. Returns: A smooth approximation of torch.maximum(a, b). """ return fatmax( torch.stack(torch.broadcast_tensors(a, b), dim=-1), dim=-1, keepdim=False, tau=tau, ) def fatminimum( a: Tensor, b: Tensor, tau: Union[float, Tensor] = TAU, alpha: float = ALPHA ) -> Tensor: """Computes a smooth approximation to torch.minimum(a, b) with a fat tail. Args: a: The first Tensor from which to compute the smoothed component-wise minimum. b: The second Tensor from which to compute the smoothed component-wise minimum. tau: Temperature parameter controlling the smoothness of the approximation. A smaller tau corresponds to a tighter approximation that leads to a sharper objective landscape that might be more difficult to optimize. Returns: A smooth approximation of torch.minimum(a, b). """ return -fatmaximum(-a, -b, tau=tau, alpha=alpha) def log_fatmoid(X: Tensor, tau: Union[float, Tensor] = 1.0) -> Tensor: """Computes the logarithm of the fatmoid. Separated out in case the implementation of the logarithm becomes more complex in the future to ensure numerical stability. """ return fatmoid(X, tau=tau).log() def fatmoid(X: Tensor, tau: Union[float, Tensor] = 1.0) -> Tensor: """Computes a twice continuously differentiable approximation to the Heaviside step function with a fat tail, i.e. `O(1 / x^2)` as `x` goes to -inf. Args: X: A Tensor from which to compute the smoothed step function. tau: Temperature parameter controlling the smoothness of the approximation. Returns: A tensor of fat-tailed approximations to the Heaviside step function. """ X = X / tau m = _inv_sqrt_3 # this defines the inflection point return torch.where( X < 0, 2 / 3 * cauchy(X - m), 1 - 2 / 3 * cauchy(X + m), ) def cauchy(x: Tensor) -> Tensor: """Computes a Lorentzian, i.e. an un-normalized Cauchy density function.""" return 1 / (1 + x.square()) def _pareto(x: Tensor, alpha: float, check: bool = True) -> Tensor: """Computes a rational polynomial that is 1) monotonically decreasing for `x > 0`, 2) is equal to 1 at `x = 0`, 3) has a first and second derivative of 1 at `x = 0`, and 4) has an asymptotic decay of `O(1 / x^alpha)`. These properties make it possible to use the function to define a smooth and fat-tailed approximation to the maximum, which enables better gradient propagation, see `fatmax` for details. Args: x: The input tensor. alpha: The exponent of the asymptotic decay. check: Whether to check if the input tensor only contains non-negative values. Returns: The tensor corresponding to the rational polynomial with the stated properties. """ if check and (x < 0).any(): raise ValueError("Argument `x` must be non-negative.") alpha = alpha / 2 # so that alpha stands for the power decay # choosing beta_0, beta_1 so that first and second derivatives at x = 0 are 1. beta_1 = 2 * alpha beta_0 = alpha * beta_1 return (beta_0 / (beta_0 + beta_1 * x + x.square())).pow(alpha) def sigmoid(X: Tensor, log: bool = False, fat: bool = False) -> Tensor: """A sigmoid function with an optional fat tail and evaluation in log space for better numerical behavior. Notably, the fat-tailed sigmoid can be used to remedy numerical underflow problems in the value and gradient of the canonical sigmoid. Args: X: The Tensor on which to evaluate the sigmoid. log: Toggles the evaluation of the log sigmoid. fat: Toggles the evaluation of the fat-tailed sigmoid. Returns: A Tensor of (log-)sigmoid values. """ Y = log_fatmoid(X) if fat else logexpit(X) return Y if log else Y.exp()
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import math import warnings from abc import abstractproperty from collections import OrderedDict from typing import Any, List, Optional, Tuple, Union from unittest import TestCase import torch from botorch import settings from botorch.acquisition.objective import PosteriorTransform from botorch.exceptions.warnings import BotorchTensorDimensionWarning, InputDataWarning from botorch.models.model import FantasizeMixin, Model from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.posteriors.posterior import Posterior from botorch.sampling.base import MCSampler from botorch.sampling.get_sampler import GetSampler from botorch.sampling.stochastic_samplers import StochasticSampler from botorch.test_functions.base import BaseTestProblem from botorch.utils.transforms import unnormalize from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal from linear_operator.operators import AddedDiagLinearOperator, DiagLinearOperator from torch import Tensor EMPTY_SIZE = torch.Size() class BotorchTestCase(TestCase): r"""Basic test case for Botorch. This 1. sets the default device to be `torch.device("cpu")` 2. ensures that no warnings are suppressed by default. """ device = torch.device("cpu") def setUp(self, suppress_input_warnings: bool = True) -> None: warnings.resetwarnings() settings.debug._set_state(False) warnings.simplefilter("always", append=True) if suppress_input_warnings: warnings.filterwarnings( "ignore", message="The model inputs are of type", category=UserWarning, ) warnings.filterwarnings( "ignore", message="Non-strict enforcement of botorch tensor conventions.", category=BotorchTensorDimensionWarning, ) warnings.filterwarnings( "ignore", message="Input data is not standardized.", category=InputDataWarning, ) warnings.filterwarnings( "ignore", message="Input data is not contained to the unit cube.", category=InputDataWarning, ) def assertAllClose( self, input: torch.Tensor, other: Union[torch.Tensor, float], rtol: float = 1e-05, atol: float = 1e-08, equal_nan: bool = False, ) -> None: r""" Calls torch.testing.assert_close, using the signature and default behavior of torch.allclose. Example output: AssertionError: Scalars are not close! Absolute difference: 1.0000034868717194 (up to 0.0001 allowed) Relative difference: 0.8348668001940709 (up to 1e-05 allowed) """ # Why not just use the signature and behavior of `torch.testing.assert_close`? # Because we used `torch.allclose` for testing in the past, and the two don't # behave exactly the same. In particular, `assert_close` requires both `atol` # and `rtol` to be set if either one is. torch.testing.assert_close( input, other, rtol=rtol, atol=atol, equal_nan=equal_nan, ) class BaseTestProblemTestCaseMixIn: def test_forward(self): for dtype in (torch.float, torch.double): for batch_shape in (torch.Size(), torch.Size([2]), torch.Size([2, 3])): for f in self.functions: f.to(device=self.device, dtype=dtype) X = torch.rand(*batch_shape, f.dim, device=self.device, dtype=dtype) X = f.bounds[0] + X * (f.bounds[1] - f.bounds[0]) res = f(X) f(X, noise=False) self.assertEqual(res.dtype, dtype) self.assertEqual(res.device.type, self.device.type) tail_shape = torch.Size( [f.num_objectives] if f.num_objectives > 1 else [] ) self.assertEqual(res.shape, batch_shape + tail_shape) @abstractproperty def functions(self) -> List[BaseTestProblem]: # The functions that should be tested. Typically defined as a class # attribute on the test case subclassing this class. pass # pragma: no cover class SyntheticTestFunctionTestCaseMixin: def test_optimal_value(self): for dtype in (torch.float, torch.double): for f in self.functions: f.to(device=self.device, dtype=dtype) try: optval = f.optimal_value optval_exp = -f._optimal_value if f.negate else f._optimal_value self.assertEqual(optval, optval_exp) except NotImplementedError: pass def test_optimizer(self): for dtype in (torch.float, torch.double): for f in self.functions: f.to(device=self.device, dtype=dtype) try: Xopt = f.optimizers.clone().requires_grad_(True) except NotImplementedError: continue res = f(Xopt, noise=False) # if we have optimizers, we have the optimal value res_exp = torch.full_like(res, f.optimal_value) self.assertAllClose(res, res_exp, atol=1e-3, rtol=1e-3) if f._check_grad_at_opt: grad = torch.autograd.grad([*res], Xopt)[0] self.assertLess(grad.abs().max().item(), 1e-3) class MultiObjectiveTestProblemTestCaseMixin: def test_attributes(self): for f in self.functions: self.assertTrue(hasattr(f, "dim")) self.assertTrue(hasattr(f, "num_objectives")) self.assertEqual(f.bounds.shape, torch.Size([2, f.dim])) def test_max_hv(self): for dtype in (torch.float, torch.double): for f in self.functions: f.to(device=self.device, dtype=dtype) if not hasattr(f, "_max_hv"): with self.assertRaises(NotImplementedError): f.max_hv else: self.assertEqual(f.max_hv, f._max_hv) def test_ref_point(self): for dtype in (torch.float, torch.double): for f in self.functions: f.to(dtype=dtype, device=self.device) self.assertTrue( torch.allclose( f.ref_point, torch.tensor(f._ref_point, dtype=dtype, device=self.device), ) ) class ConstrainedTestProblemTestCaseMixin: def test_num_constraints(self): for f in self.functions: self.assertTrue(hasattr(f, "num_constraints")) def test_evaluate_slack_true(self): for dtype in (torch.float, torch.double): for f in self.functions: f.to(device=self.device, dtype=dtype) X = unnormalize( torch.rand(1, f.dim, device=self.device, dtype=dtype), bounds=f.bounds, ) slack = f.evaluate_slack_true(X) self.assertEqual(slack.shape, torch.Size([1, f.num_constraints])) class MockPosterior(Posterior): r"""Mock object that implements dummy methods and feeds through specified outputs""" def __init__( self, mean=None, variance=None, samples=None, base_shape=None, batch_range=None ) -> None: r""" Args: mean: The mean of the posterior. variance: The variance of the posterior. samples: Samples to return from `rsample`, unless `base_samples` is provided. base_shape: If given, this is returned as `base_sample_shape`, and also used as the base of the `_extended_shape`. batch_range: If given, this is returned as `batch_range`. Defaults to (0, -2). """ self._mean = mean self._variance = variance self._samples = samples self._base_shape = base_shape self._batch_range = batch_range or (0, -2) @property def device(self) -> torch.device: for t in (self._mean, self._variance, self._samples): if torch.is_tensor(t): return t.device return torch.device("cpu") @property def dtype(self) -> torch.dtype: for t in (self._mean, self._variance, self._samples): if torch.is_tensor(t): return t.dtype return torch.float32 @property def batch_shape(self) -> torch.Size: for t in (self._mean, self._variance, self._samples): if torch.is_tensor(t): return t.shape[:-2] raise NotImplementedError # pragma: no cover def _extended_shape( self, sample_shape: torch.Size = torch.Size() # noqa: B008 ) -> torch.Size: return sample_shape + self.base_sample_shape @property def base_sample_shape(self) -> torch.Size: if self._base_shape is not None: return self._base_shape if self._samples is not None: return self._samples.shape if self._mean is not None: return self._mean.shape if self._variance is not None: return self._variance.shape return torch.Size() @property def batch_range(self) -> Tuple[int, int]: return self._batch_range @property def mean(self): return self._mean @property def variance(self): return self._variance def rsample( self, sample_shape: Optional[torch.Size] = None, base_samples: Optional[Tensor] = None, ) -> Tensor: """Mock sample by repeating self._samples. If base_samples is provided, do a shape check but return the same mock samples.""" if sample_shape is None: sample_shape = torch.Size() if sample_shape is not None and base_samples is not None: # check the base_samples shape is consistent with the sample_shape if base_samples.shape[: len(sample_shape)] != sample_shape: raise RuntimeError("sample_shape disagrees with base_samples.") return self._samples.expand(sample_shape + self._samples.shape) def rsample_from_base_samples( self, sample_shape: torch.Size, base_samples: Tensor, ) -> Tensor: return self.rsample(sample_shape, base_samples) @GetSampler.register(MockPosterior) def _get_sampler_mock( posterior: MockPosterior, sample_shape: torch.Size, **kwargs: Any ) -> MCSampler: r"""Get the dummy `StochasticSampler` for `MockPosterior`.""" return StochasticSampler(sample_shape=sample_shape, **kwargs) class MockModel(Model, FantasizeMixin): r"""Mock object that implements dummy methods and feeds through specified outputs""" def __init__(self, posterior: MockPosterior) -> None: # noqa: D107 super(Model, self).__init__() self._posterior = posterior def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, posterior_transform: Optional[PosteriorTransform] = None, observation_noise: bool = False, ) -> MockPosterior: if posterior_transform is not None: return posterior_transform(self._posterior) else: return self._posterior @property def num_outputs(self) -> int: extended_shape = self._posterior._extended_shape() return extended_shape[-1] if len(extended_shape) > 0 else 0 @property def batch_shape(self) -> torch.Size: extended_shape = self._posterior._extended_shape() return extended_shape[:-2] def state_dict(self) -> None: pass def load_state_dict( self, state_dict: Optional[OrderedDict] = None, strict: bool = False ) -> None: pass class MockAcquisitionFunction: r"""Mock acquisition function object that implements dummy methods.""" def __init__(self): # noqa: D107 self.model = None self.X_pending = None def __call__(self, X): return X[..., 0].max(dim=-1).values def set_X_pending(self, X_pending: Optional[Tensor] = None): self.X_pending = X_pending def _get_random_data( batch_shape: torch.Size, m: int, d: int = 1, n: int = 10, **tkwargs ) -> Tuple[Tensor, Tensor]: r"""Generate random data for testing purposes. Args: batch_shape: The batch shape of the data. m: The number of outputs. d: The dimension of the input. n: The number of data points. tkwargs: `device` and `dtype` tensor constructor kwargs. Returns: A tuple `(train_X, train_Y)` with randomly generated training data. """ rep_shape = batch_shape + torch.Size([1, 1]) train_x = torch.stack( [torch.linspace(0, 0.95, n, **tkwargs) for _ in range(d)], dim=-1 ) train_x = train_x + 0.05 * torch.rand_like(train_x).repeat(rep_shape) train_y = torch.sin(train_x[..., :1] * (2 * math.pi)) train_y = train_y + 0.2 * torch.randn(n, m, **tkwargs).repeat(rep_shape) return train_x, train_y def _get_test_posterior( batch_shape: torch.Size, q: int = 1, m: int = 1, interleaved: bool = True, lazy: bool = False, independent: bool = False, **tkwargs, ) -> GPyTorchPosterior: r"""Generate a Posterior for testing purposes. Args: batch_shape: The batch shape of the data. q: The number of candidates m: The number of outputs. interleaved: A boolean indicating the format of the MultitaskMultivariateNormal lazy: A boolean indicating if the posterior should be lazy independent: A boolean indicating whether the outputs are independent tkwargs: `device` and `dtype` tensor constructor kwargs. """ if independent: mvns = [] for _ in range(m): mean = torch.rand(*batch_shape, q, **tkwargs) a = torch.rand(*batch_shape, q, q, **tkwargs) covar = a @ a.transpose(-1, -2) flat_diag = torch.rand(*batch_shape, q, **tkwargs) covar = covar + torch.diag_embed(flat_diag) mvns.append(MultivariateNormal(mean, covar)) mtmvn = MultitaskMultivariateNormal.from_independent_mvns(mvns) else: mean = torch.rand(*batch_shape, q, m, **tkwargs) a = torch.rand(*batch_shape, q * m, q * m, **tkwargs) covar = a @ a.transpose(-1, -2) flat_diag = torch.rand(*batch_shape, q * m, **tkwargs) if lazy: covar = AddedDiagLinearOperator(covar, DiagLinearOperator(flat_diag)) else: covar = covar + torch.diag_embed(flat_diag) mtmvn = MultitaskMultivariateNormal(mean, covar, interleaved=interleaved) return GPyTorchPosterior(mtmvn) def _get_max_violation_of_bounds(samples: torch.Tensor, bounds: torch.Tensor) -> float: """ The maximum value by which samples lie outside bounds. A negative value indicates that all samples lie within bounds. Args: samples: An `n x q x d` - dimension tensor, as might be returned from `sample_q_batches_from_polytope`. bounds: A `2 x d` tensor of lower and upper bounds for each column. """ n, q, d = samples.shape samples = samples.reshape((n * q, d)) lower = samples.min(0).values upper = samples.max(0).values lower_dist = (bounds[0, :] - lower).max().item() upper_dist = (upper - bounds[1, :]).max().item() return max(lower_dist, upper_dist) def _get_max_violation_of_constraints( samples: torch.Tensor, constraints: Optional[List[Tuple[Tensor, Tensor, float]]], equality: bool, ) -> float: r""" Amount by which equality constraints are not obeyed. Args: samples: An `n x q x d` - dimension tensor, as might be returned from `sample_q_batches_from_polytope`. constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) = rhs`, or `>=` if `equality` is False. equality: Whether these are equality constraints (not inequality). """ n, q, d = samples.shape max_error = 0 if constraints is not None: for (ind, coef, rhs) in constraints: if ind.ndim == 1: constr = samples[:, :, ind] @ coef else: constr = samples[:, ind[:, 0], ind[:, 1]] @ coef if equality: error = (constr - rhs).abs().max() else: error = (rhs - constr).max() max_error = max(max_error, error) return max_error
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # NOTE: To be removed once (if) https://github.com/pytorch/pytorch/pull/37385 lands from __future__ import annotations import collections from collections import OrderedDict import torch from torch.nn import Module class BufferDict(Module): r"""Holds buffers in a dictionary. BufferDict can be indexed like a regular Python dictionary, but buffers it contains are properly registered, and will be visible by all Module methods. :class:`~torch.nn.BufferDict` is an **ordered** dictionary that respects * the order of insertion, and * in :meth:`~torch.nn.BufferDict.update`, the order of the merged ``OrderedDict`` or another :class:`~torch.nn.BufferDict` (the argument to :meth:`~torch.nn.BufferDict.update`). Note that :meth:`~torch.nn.BufferDict.update` with other unordered mapping types (e.g., Python's plain ``dict``) does not preserve the order of the merged mapping. Args: buffers (iterable, optional): a mapping (dictionary) of (string : :class:`~torch.Tensor`) or an iterable of key-value pairs of type (string, :class:`~torch.Tensor`) Example:: class MyModule(nn.Module): def __init__(self): super(MyModule, self).__init__() self.buffers = nn.BufferDict({ 'left': torch.randn(5, 10), 'right': torch.randn(5, 10) }) def forward(self, x, choice): x = self.buffers[choice].mm(x) return x """ def __init__(self, buffers=None): r""" Args: buffers: A mapping (dictionary) from string to :class:`~torch.Tensor`, or an iterable of key-value pairs of type (string, :class:`~torch.Tensor`). """ super(BufferDict, self).__init__() if buffers is not None: self.update(buffers) def __getitem__(self, key): return self._buffers[key] def __setitem__(self, key, buffer): self.register_buffer(key, buffer) def __delitem__(self, key): del self._buffers[key] def __len__(self): return len(self._buffers) def __iter__(self): return iter(self._buffers.keys()) def __contains__(self, key): return key in self._buffers def clear(self): """Remove all items from the BufferDict.""" self._buffers.clear() def pop(self, key): r"""Remove key from the BufferDict and return its buffer. Args: key (string): key to pop from the BufferDict """ v = self[key] del self[key] return v def keys(self): r"""Return an iterable of the BufferDict keys.""" return self._buffers.keys() def items(self): r"""Return an iterable of the BufferDict key/value pairs.""" return self._buffers.items() def values(self): r"""Return an iterable of the BufferDict values.""" return self._buffers.values() def update(self, buffers): r"""Update the :class:`~torch.nn.BufferDict` with the key-value pairs from a mapping or an iterable, overwriting existing keys. .. note:: If :attr:`buffers` is an ``OrderedDict``, a :class:`~torch.nn.BufferDict`, or an iterable of key-value pairs, the order of new elements in it is preserved. Args: buffers (iterable): a mapping (dictionary) from string to :class:`~torch.Tensor`, or an iterable of key-value pairs of type (string, :class:`~torch.Tensor`) """ if not isinstance(buffers, collections.abc.Iterable): raise TypeError( "BuffersDict.update should be called with an " "iterable of key/value pairs, but got " + type(buffers).__name__ ) if isinstance(buffers, collections.abc.Mapping): if isinstance(buffers, (OrderedDict, BufferDict)): for key, buffer in buffers.items(): self[key] = buffer else: for key, buffer in sorted(buffers.items()): self[key] = buffer else: for j, p in enumerate(buffers): if not isinstance(p, collections.abc.Iterable): raise TypeError( "BufferDict update sequence element " "#" + str(j) + " should be Iterable; is" + type(p).__name__ ) if not len(p) == 2: raise ValueError( "BufferDict update sequence element " "#" + str(j) + " has length " + str(len(p)) + "; 2 is required" ) self[p[0]] = p[1] def extra_repr(self): child_lines = [] for k, p in self._buffers.items(): size_str = "x".join(str(size) for size in p.size()) device_str = "" if not p.is_cuda else " (GPU {})".format(p.get_device()) parastr = "Buffer containing: [{} of size {}{}]".format( torch.typename(p), size_str, device_str ) child_lines.append(" (" + k + "): " + parastr) tmpstr = "\n".join(child_lines) return tmpstr def __call__(self, input): raise RuntimeError("BufferDict should not be called.")
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from copy import deepcopy from math import pi from typing import List, Optional import torch from botorch.models.converter import batched_to_model_list from botorch.models.deterministic import GenericDeterministicModel from botorch.models.model import Model, ModelList from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.multitask import MultiTaskGP from botorch.utils.sampling import manual_seed from botorch.utils.transforms import is_fully_bayesian from gpytorch.kernels import Kernel, MaternKernel, RBFKernel, ScaleKernel from linear_operator.utils.cholesky import psd_safe_cholesky from torch import Tensor from torch.distributions import MultivariateNormal from torch.nn import Module class GPDraw(Module): r"""Convenience wrapper for sampling a function from a GP prior. This wrapper implicitly defines the GP sample as a self-updating function by keeping track of the evaluated points and respective base samples used during the evaluation. This does not yet support multi-output models. """ def __init__(self, model: Model, seed: Optional[int] = None) -> None: r"""Construct a GP function sampler. Args: model: The Model defining the GP prior. """ super().__init__() self._model = deepcopy(model) self._num_outputs = self._model.num_outputs seed = torch.tensor( seed if seed is not None else torch.randint(0, 1000000, (1,)).item() ) self.register_buffer("_seed", seed) @property def Xs(self) -> Tensor: """A `(batch_shape) x n_eval x d`-dim tensor of locations at which the GP was evaluated (or `None` if the sample has never been evaluated). """ try: return self._Xs except AttributeError: return None @property def Ys(self) -> Tensor: """A `(batch_shape) x n_eval x d`-dim tensor of associated function values (or `None` if the sample has never been evaluated). """ try: return self._Ys except AttributeError: return None def forward(self, X: Tensor) -> Tensor: r"""Evaluate the GP sample function at a set of points X. Args: X: A `batch_shape x n x d`-dim tensor of points Returns: The value of the GP sample at the `n` points. """ if self.Xs is None: X_eval = X # first time, no previous evaluation points else: X_eval = torch.cat([self.Xs, X], dim=-2) posterior = self._model.posterior(X=X_eval) base_sample_shape = posterior.base_sample_shape if self._num_outputs == 1: # Needed to comply with base sample shape assumptions made here. base_sample_shape = base_sample_shape + (1,) # re-use old samples bs_shape = base_sample_shape[:-2] + X.shape[-2:-1] + base_sample_shape[-1:] with manual_seed(seed=int(self._seed)): new_base_samples = torch.randn(bs_shape, device=X.device, dtype=X.dtype) seed = self._seed + 1 if self.Xs is None: base_samples = new_base_samples else: base_samples = torch.cat([self._base_samples, new_base_samples], dim=-2) # TODO: Deduplicate repeated evaluations / deal with numerical degeneracies # that could lead to non-deterministic evaluations. We could use SVD- or # eigendecomposition-based sampling, but we probably don't want to use this # by default for performance reasonse. Ys = posterior.rsample_from_base_samples( torch.Size(), base_samples=base_samples.squeeze(-1) if self._num_outputs == 1 else base_samples, ) self.register_buffer("_Xs", X_eval) self.register_buffer("_Ys", Ys) self.register_buffer("_seed", seed) self.register_buffer("_base_samples", base_samples) return self.Ys[..., -(X.size(-2)) :, :] class RandomFourierFeatures(Module): """A class that represents Random Fourier Features.""" def __init__( self, kernel: Kernel, input_dim: int, num_rff_features: int, sample_shape: Optional[torch.Size] = None, ) -> None: r"""Initialize RandomFourierFeatures. Args: kernel: The GP kernel. input_dim: The input dimension to the GP kernel. num_rff_features: The number of Fourier features. sample_shape: The shape of a single sample. For a single-element `torch.Size` object, this is simply the number of RFF draws. """ if not isinstance(kernel, ScaleKernel): base_kernel = kernel outputscale = torch.tensor( 1.0, dtype=base_kernel.lengthscale.dtype, device=base_kernel.lengthscale.device, ) else: base_kernel = kernel.base_kernel outputscale = kernel.outputscale.detach().clone() if not isinstance(base_kernel, (MaternKernel, RBFKernel)): raise NotImplementedError("Only Matern and RBF kernels are supported.") super().__init__() self.kernel_batch_shape = base_kernel.batch_shape self.register_buffer("outputscale", outputscale) self.register_buffer("lengthscale", base_kernel.lengthscale.detach().clone()) self.sample_shape = torch.Size() if sample_shape is None else sample_shape self.register_buffer( "weights", self._get_weights( base_kernel=base_kernel, input_dim=input_dim, num_rff_features=num_rff_features, sample_shape=self.sample_shape, ), ) # initialize uniformly in [0, 2 * pi] self.register_buffer( "bias", 2 * pi * torch.rand( *self.sample_shape, *self.kernel_batch_shape, num_rff_features, dtype=base_kernel.lengthscale.dtype, device=base_kernel.lengthscale.device, ), ) def _get_weights( self, base_kernel: Kernel, input_dim: int, num_rff_features: int, sample_shape: Optional[torch.Size] = None, ) -> Tensor: r"""Sample weights for RFF. Args: kernel: The GP base kernel. input_dim: The input dimension to the GP kernel. num_rff_features: The number of Fourier features. sample_shape: The sample shape of weights. Returns: A tensor of weights with shape `(*sample_shape, *kernel_batch_shape, input_dim, num_rff_features)`. """ sample_shape = torch.Size() if sample_shape is None else sample_shape weights = torch.randn( *sample_shape, *self.kernel_batch_shape, input_dim, num_rff_features, dtype=base_kernel.lengthscale.dtype, device=base_kernel.lengthscale.device, ) if isinstance(base_kernel, MaternKernel): gamma_dist = torch.distributions.Gamma(base_kernel.nu, base_kernel.nu) gamma_samples = gamma_dist.sample(torch.Size([1, num_rff_features])).to( weights ) weights = torch.rsqrt(gamma_samples) * weights return weights def forward(self, X: Tensor) -> Tensor: """Get Fourier basis features for the provided inputs. Note that the right-most subset of the batch shape of `X` should be `(sample_shape) x (kernel_batch_shape)` if using either the `sample_shape` argument or a batched kernel. In other words, `X` should be of shape `(added_batch_shape) x (sample_shape) x (kernel_batch_shape) x n x input_dim`, where parantheses denote that the given batch shape can be empty. `X` can always be a tensor of shape `n x input_dim`, in which case broadcasting will take care of the batch shape. This will raise a `ValueError` if the batch shapes are not compatible. Args: X: Input tensor of shape `(batch_shape) x n x input_dim`. Returns: A Tensor of shape `(batch_shape) x n x rff`. If `X` does not have a `batch_shape`, the output `batch_shape` will be `(sample_shape) x (kernel_batch_shape)`. """ try: self._check_forward_X_shape_compatibility(X) except ValueError as e: # A workaround to support batched SAAS models. # TODO: Support batch evaluation of multi-sample RFFs as well. # Multi-sample RFFs have input batch as the 0-th dimension, # which is different than other posteriors which would have # the sample shape as the 0-th dimension. if len(self.kernel_batch_shape) == 1: X = X.unsqueeze(-3) self._check_forward_X_shape_compatibility(X) else: raise e # X is of shape (additional_batch_shape) x (sample_shape) # x (kernel_batch_shape) x n x d. # Weights is of shape (sample_shape) x (kernel_batch_shape) x d x num_rff. X_scaled = torch.div(X, self.lengthscale) batchmatmul = X_scaled @ self.weights bias = self.bias # Bias is of shape (sample_shape) x (kernel_batch_shape) x num_rff. # Batchmatmul is of shape (additional_batch_shape) x (sample_shape) # x (kernel_batch_shape) x n x num_rff. outputs = torch.cos(batchmatmul + bias.unsqueeze(-2)) # Make sure we divide at the correct (i.e., kernel's) batch dimension. if len(self.kernel_batch_shape) > 0: outputscale = self.outputscale.view(*self.kernel_batch_shape, 1, 1) else: outputscale = self.outputscale return torch.sqrt(2.0 * outputscale / self.weights.shape[-1]) * outputs def _check_forward_X_shape_compatibility(self, X: Tensor) -> None: r"""Check that the `batch_shape` of X, if any, is compatible with the `sample_shape` & `kernel_batch_shape`. """ full_batch_shape_X = X.shape[:-2] len_full_batch_shape_X = len(full_batch_shape_X) if len_full_batch_shape_X == 0: # Non-batched X. return expected_batch_shape = self.sample_shape + self.kernel_batch_shape # Check if they're broadcastable. for b_idx in range(min(len(expected_batch_shape), len_full_batch_shape_X)): neg_idx = -b_idx - 1 if ( full_batch_shape_X[neg_idx] != expected_batch_shape[neg_idx] and full_batch_shape_X[neg_idx] != 1 ): raise ValueError( "the batch shape of X is expected to follow the pattern: " f"`... x {tuple(expected_batch_shape)}`" ) def get_deterministic_model_multi_samples( weights: List[Tensor], bases: List[RandomFourierFeatures], ) -> GenericDeterministicModel: """ Get a batched deterministic model that batch evaluates `n_samples` function samples. This supports multi-output models as well. Args: weights: A list of weights with `num_outputs` elements. Each weight is of shape `(batch_shape_input) x n_samples x num_rff_features`, where `(batch_shape_input)` is the batch shape of the inputs used to obtain the posterior weights. bases: A list of `RandomFourierFeatures` with `num_outputs` elements. Each basis has a sample shape of `n_samples`. n_samples: The number of function samples. Returns: A batched `GenericDeterministicModel`s that batch evaluates `n_samples` function samples. """ eval_callables = [ get_eval_gp_sample_callable(w=w, basis=basis) for w, basis in zip(weights, bases) ] def evaluate_gps_X(X): return torch.cat([_f(X) for _f in eval_callables], dim=-1) return GenericDeterministicModel( f=evaluate_gps_X, num_outputs=len(weights), ) def get_eval_gp_sample_callable(w: Tensor, basis: RandomFourierFeatures) -> Tensor: def _f(X): return basis(X) @ w.unsqueeze(-1) return _f def get_deterministic_model( weights: List[Tensor], bases: List[RandomFourierFeatures] ) -> GenericDeterministicModel: """Get a deterministic model using the provided weights and bases for each output. Args: weights: A list of weights with `m` elements. bases: A list of `RandomFourierFeatures` with `m` elements. Returns: A deterministic model. """ callables = [ get_eval_gp_sample_callable(w=w, basis=basis) for w, basis in zip(weights, bases) ] def evaluate_gp_sample(X): return torch.cat([c(X) for c in callables], dim=-1) return GenericDeterministicModel(f=evaluate_gp_sample, num_outputs=len(weights)) def get_deterministic_model_list( weights: List[Tensor], bases: List[RandomFourierFeatures], ) -> ModelList: """Get a deterministic model list using the provided weights and bases for each output. Args: weights: A list of weights with `m` elements. bases: A list of `RandomFourierFeatures` with `m` elements. Returns: A deterministic model. """ samples = [] for w, basis in zip(weights, bases): sample = GenericDeterministicModel( f=get_eval_gp_sample_callable(w=w, basis=basis), num_outputs=1, ) samples.append(sample) return ModelList(*samples) def get_weights_posterior(X: Tensor, y: Tensor, sigma_sq: Tensor) -> MultivariateNormal: r"""Sample bayesian linear regression weights. Args: X: A tensor of inputs with shape `(*batch_shape, n num_rff_features)`. y: A tensor of outcomes with shape `(*batch_shape, n)`. sigma_sq: The likelihood noise variance. This should be a tensor with shape `kernel_batch_shape, 1, 1` if using a batched kernel. Otherwise, it should be a scalar tensor. Returns: The posterior distribution over the weights. """ with torch.no_grad(): X_trans = X.transpose(-2, -1) A = X_trans @ X + sigma_sq * torch.eye( X.shape[-1], dtype=X.dtype, device=X.device ) # mean is given by: m = S @ x.T @ y, where S = A_inv # compute inverse of A using solves # covariance is A_inv * sigma L_A = psd_safe_cholesky(A) # solve L_A @ u = I Iw = torch.eye(L_A.shape[-1], dtype=X.dtype, device=X.device) u = torch.linalg.solve_triangular(L_A, Iw, upper=False) # solve L_A^T @ S = u A_inv = torch.linalg.solve_triangular(L_A.transpose(-2, -1), u, upper=True) m = (A_inv @ X_trans @ y.unsqueeze(-1)).squeeze(-1) L = psd_safe_cholesky(A_inv * sigma_sq) return MultivariateNormal(loc=m, scale_tril=L) def get_gp_samples( model: Model, num_outputs: int, n_samples: int, num_rff_features: int = 512 ) -> GenericDeterministicModel: r"""Sample functions from GP posterior using RFFs. The returned `GenericDeterministicModel` effectively wraps `num_outputs` models, each of which has a batch shape of `n_samples`. Refer `get_deterministic_model_multi_samples` for more details. NOTE: If using input / outcome transforms, the gp samples must be accessed via the `gp_sample.posterior(X)` call. Otherwise, `gp_sample(X)` will produce bogus values that do not agree with the underlying `model`. It is also highly recommended to use outcome transforms to standardize the input data, since the gp samples do not work well when training outcomes are not zero-mean. Args: model: The model. num_outputs: The number of outputs. n_samples: The number of functions to be sampled IID. num_rff_features: The number of random Fourier features. Returns: A `GenericDeterministicModel` that evaluates `n_samples` sampled functions. If `n_samples > 1`, this will be a batched model. """ # Get transforms from the model. intf = getattr(model, "input_transform", None) octf = getattr(model, "outcome_transform", None) # Remove the outcome transform - leads to buggy draws. if octf is not None: del model.outcome_transform if intf is not None: del model.input_transform if num_outputs > 1: if not isinstance(model, ModelListGP): models = batched_to_model_list(model).models else: models = model.models else: models = [model] if isinstance(models[0], MultiTaskGP): raise NotImplementedError weights = [] bases = [] octfs = [] intfs = [] for m in range(num_outputs): train_X = models[m].train_inputs[0] train_targets = models[m].train_targets _model = models[m] _intf = getattr(_model, "input_transform", None) _octf = getattr(_model, "outcome_transform", None) # Remove the outcome transform - leads to buggy draws. if _octf is not None: del _model.outcome_transform octfs.append(_octf) intfs.append(_intf) # Get random Fourier features. # sample_shape controls the number of iid functions. basis = RandomFourierFeatures( kernel=_model.covar_module, input_dim=train_X.shape[-1], num_rff_features=num_rff_features, sample_shape=torch.Size([n_samples] if n_samples > 1 else []), ) bases.append(basis) phi_X = basis(train_X) # Sample weights from bayesian linear model. # weights.sample().shape == (n_samples, batch_shape_input, num_rff_features) sigma_sq = _model.likelihood.noise.mean(dim=-1, keepdim=True) if len(basis.kernel_batch_shape) > 0: sigma_sq = sigma_sq.unsqueeze(-2) mvn = get_weights_posterior( X=phi_X, y=train_targets, sigma_sq=sigma_sq, ) weights.append(mvn.sample()) # TODO: Ideally support RFFs for multi-outputs instead of having to # generate a basis for each output serially. if any(_octf is not None for _octf in octfs) or any( _intf is not None for _intf in intfs ): base_gp_samples = get_deterministic_model_list( weights=weights, bases=bases, ) for m in range(len(weights)): _octf = octfs[m] _intf = intfs[m] if _octf is not None: base_gp_samples.models[m].outcome_transform = _octf models[m].outcome_transform = _octf if _intf is not None: base_gp_samples.models[m].input_transform = _intf base_gp_samples.is_fully_bayesian = is_fully_bayesian(model=model) return base_gp_samples elif n_samples > 1: base_gp_samples = get_deterministic_model_multi_samples( weights=weights, bases=bases, ) else: base_gp_samples = get_deterministic_model( weights=weights, bases=bases, ) # Load the transforms on the models. if intf is not None: base_gp_samples.input_transform = intf model.input_transform = intf if octf is not None: base_gp_samples.outcome_transform = octf model.outcome_transform = octf base_gp_samples.is_fully_bayesian = is_fully_bayesian(model=model) return base_gp_samples
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Callable, List, Optional, Tuple import botorch.models.model as model import torch from botorch.logging import _get_logger from botorch.utils.sampling import manual_seed from torch import Tensor logger = _get_logger(name="Feasibility") def get_feasible_samples( samples: Tensor, inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, ) -> Tuple[Tensor, float]: r""" Checks which of the samples satisfy all of the inequality constraints. Args: samples: A `sample size x d` size tensor of feature samples, where d is a feature dimension. inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. Returns: 2-element tuple containing - Samples satisfying the linear constraints. - Estimated proportion of samples satisfying the linear constraints. """ if inequality_constraints is None: return samples, 1.0 nsamples = samples.size(0) feasible = torch.ones(nsamples, device=samples.device, dtype=torch.bool) for (indices, coefficients, rhs) in inequality_constraints: lhs = samples.index_select(1, indices) @ coefficients.to(dtype=samples.dtype) feasible &= lhs >= rhs feasible_samples = samples[feasible] p_linear = feasible_samples.size(0) / nsamples return feasible_samples, p_linear def get_outcome_feasibility_probability( model: model.Model, X: Tensor, outcome_constraints: List[Callable[[Tensor], Tensor]], threshold: float = 0.1, nsample_outcome: int = 1000, seed: Optional[int] = None, ) -> float: r""" Monte Carlo estimate of the feasible volume with respect to the outcome constraints. Args: model: The model used for sampling the posterior. X: A tensor of dimension `batch-shape x 1 x d`, where d is feature dimension. outcome_constraints: A list of callables, each mapping a Tensor of dimension `sample_shape x batch-shape x q x m` to a Tensor of dimension `sample_shape x batch-shape x q`, where negative values imply feasibility. threshold: A lower limit for the probability of posterior samples feasibility. nsample_outcome: The number of samples from the model posterior. seed: The seed for the posterior sampler. If omitted, use a random seed. Returns: Estimated proportion of features for which posterior samples satisfy given outcome constraints with probability above or equal to the given threshold. """ if outcome_constraints is None: return 1.0 from botorch.sampling.get_sampler import get_sampler seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item() posterior = model.posterior(X) # posterior consists of batch_shape marginals sampler = get_sampler( posterior=posterior, sample_shape=torch.Size([nsample_outcome]), seed=seed ) # size of samples: (num outcome samples, batch_shape, 1, outcome dim) samples = sampler(posterior) feasible = torch.ones(samples.shape[:-1], dtype=torch.bool, device=samples.device) # a sample passes if each constraint applied to the sample # produces a non-negative tensor for oc in outcome_constraints: # broadcasted evaluation of the outcome constraints feasible &= oc(samples) <= 0 # proportion of feasibile samples for each of the elements of X # summation is done across feasible outcome samples p_feas = feasible.sum(0).float() / feasible.size(0) # proportion of features leading to the posterior outcome # satisfying the given outcome constraints # with at probability above a given threshold p_outcome = (p_feas >= threshold).sum().item() / X.size(0) return p_outcome def estimate_feasible_volume( bounds: Tensor, model: model.Model, outcome_constraints: List[Callable[[Tensor], Tensor]], inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, nsample_feature: int = 1000, nsample_outcome: int = 1000, threshold: float = 0.1, verbose: bool = False, seed: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tuple[float, float]: r""" Monte Carlo estimate of the feasible volume with respect to feature constraints and outcome constraints. Args: bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`. model: The model used for sampling the outcomes. outcome_constraints: A list of callables, each mapping a Tensor of dimension `sample_shape x batch-shape x q x m` to a Tensor of dimension `sample_shape x batch-shape x q`, where negative values imply feasibility. inequality constraints: A list of tuples (indices, coefficients, rhs), with each tuple encoding an inequality constraint of the form `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`. nsample_feature: The number of feature samples satisfying the bounds. nsample_outcome: The number of outcome samples from the model posterior. threshold: A lower limit for the probability of outcome feasibility seed: The seed for both feature and outcome samplers. If omitted, use a random seed. verbose: An indicator for whether to log the results. Returns: 2-element tuple containing: - Estimated proportion of volume in feature space that is feasible wrt the bounds and the inequality constraints (linear). - Estimated proportion of feasible features for which posterior samples (outcome) satisfies the outcome constraints with probability above the given threshold. """ seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item() with manual_seed(seed=seed): box_samples = bounds[0] + (bounds[1] - bounds[0]) * torch.rand( (nsample_feature, bounds.size(1)), dtype=dtype, device=device ) features, p_feature = get_feasible_samples( samples=box_samples, inequality_constraints=inequality_constraints ) # each new feature sample is a row p_outcome = get_outcome_feasibility_probability( model=model, X=features.unsqueeze(-2), outcome_constraints=outcome_constraints, threshold=threshold, nsample_outcome=nsample_outcome, seed=seed, ) if verbose: # pragma: no cover logger.info( "Proportion of volume that satisfies linear constraints: " + f"{p_feature:.4e}" ) if p_feature <= 0.01: logger.warning( "The proportion of satisfying volume is very low and may lead to " + "very long run times. Consider making your constraints less " + "restrictive." ) logger.info( "Proportion of linear-feasible volume that also satisfies each " + f"outcome constraint with probability > 0.1: {p_outcome:.4e}" ) if p_outcome <= 0.001: logger.warning( "The proportion of volume that also satisfies the outcome constraint " + "is very low. Consider making your parameter and outcome constraints " + "less restrictive." ) return p_feature, p_outcome
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Optional, Sequence import torch from botorch.utils.probability.lin_ess import LinearEllipticalSliceSampler from botorch.utils.probability.mvnxpb import MVNXPB from botorch.utils.probability.utils import get_constants_like from torch import Tensor from torch.distributions.multivariate_normal import MultivariateNormal class TruncatedMultivariateNormal(MultivariateNormal): def __init__( self, loc: Tensor, covariance_matrix: Optional[Tensor] = None, precision_matrix: Optional[Tensor] = None, scale_tril: Optional[Tensor] = None, bounds: Tensor = None, solver: Optional[MVNXPB] = None, sampler: Optional[LinearEllipticalSliceSampler] = None, validate_args: Optional[bool] = None, ): r"""Initializes an instance of a TruncatedMultivariateNormal distribution. Let `x ~ N(0, K)` be an `n`-dimensional Gaussian random vector. This class represents the distribution of the truncated Multivariate normal random vector `x | a <= x <= b`. Args: loc: A mean vector for the distribution, `batch_shape x event_shape`. covariance_matrix: Covariance matrix distribution parameter. precision_matrix: Inverse covariance matrix distribution parameter. scale_tril: Lower triangular, square-root covariance matrix distribution parameter. bounds: A `batch_shape x event_shape x 2` tensor of strictly increasing bounds for `x` so that `bounds[..., 0] < bounds[..., 1]` everywhere. solver: A pre-solved MVNXPB instance used to approximate the log partition. sampler: A LinearEllipticalSliceSampler instance used for sample generation. validate_args: Optional argument to super().__init__. """ if bounds is None: raise SyntaxError("Missing required argument `bounds`.") elif bounds.shape[-1] != 2: raise ValueError( f"Expected bounds.shape[-1] to be 2 but bounds shape is {bounds.shape}" ) elif torch.gt(*bounds.unbind(dim=-1)).any(): raise ValueError("`bounds` must be strictly increasing along dim=-1.") super().__init__( loc=loc, covariance_matrix=covariance_matrix, precision_matrix=precision_matrix, scale_tril=scale_tril, validate_args=validate_args, ) self.bounds = bounds self._solver = solver self._sampler = sampler def log_prob(self, value: Tensor) -> Tensor: r"""Approximates the true log probability.""" neg_inf = get_constants_like(-float("inf"), value) inbounds = torch.logical_and( (self.bounds[..., 0] < value).all(-1), (self.bounds[..., 1] > value).all(-1), ) if inbounds.any(): return torch.where( inbounds, super().log_prob(value) - self.log_partition, neg_inf, ) return torch.full(value.shape[: -len(self.event_shape)], neg_inf) def rsample(self, sample_shape: torch.Size = torch.Size()) -> Tensor: # noqa: B008 r"""Draw samples from the Truncated Multivariate Normal. Args: sample_shape: The shape of the samples. Returns: The (sample_shape x batch_shape x event_shape) tensor of samples. """ num_samples = sample_shape.numel() if sample_shape else 1 return self.loc + self.sampler.draw(n=num_samples).view(*sample_shape, -1) @property def log_partition(self) -> Tensor: return self.solver.log_prob @property def solver(self) -> MVNXPB: if self._solver is None: self._solver = MVNXPB( covariance_matrix=self.covariance_matrix, bounds=self.bounds - self.loc.unsqueeze(-1), ) self._solver.solve() return self._solver @property def sampler(self) -> LinearEllipticalSliceSampler: if self._sampler is None: eye = torch.eye( self.scale_tril.shape[-1], dtype=self.scale_tril.dtype, device=self.scale_tril.device, ) A = torch.concat([-eye, eye]) b = torch.concat( [ self.loc - self.bounds[..., 0], self.bounds[..., 1] - self.loc, ], dim=-1, ).unsqueeze(-1) self._sampler = LinearEllipticalSliceSampler( inequality_constraints=(A, b), covariance_root=self.scale_tril, ) return self._sampler def expand( self, batch_shape: Sequence[int], _instance: TruncatedMultivariateNormal = None ) -> TruncatedMultivariateNormal: new = self._get_checked_instance(TruncatedMultivariateNormal, _instance) super().expand(batch_shape=batch_shape, _instance=new) new.bounds = self.bounds.expand(*new.batch_shape, *self.event_shape, 2) new._sampler = None # does not implement `expand` new._solver = ( None if self._solver is None else self._solver.expand(*batch_shape) ) return new def __repr__(self) -> str: return super().__repr__()[:-1] + f", bounds: {self.bounds.shape})"
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from inspect import getmembers from typing import Optional, Sequence, Union import torch from botorch.utils.probability.linalg import augment_cholesky, block_matrix_concat from botorch.utils.probability.mvnxpb import MVNXPB from botorch.utils.probability.truncated_multivariate_normal import ( TruncatedMultivariateNormal, ) from linear_operator.operators import LinearOperator from linear_operator.utils.errors import NotPSDError from torch import Tensor from torch.distributions.multivariate_normal import Distribution, MultivariateNormal from torch.distributions.utils import lazy_property from torch.nn.functional import pad class UnifiedSkewNormal(Distribution): arg_constraints = {} def __init__( self, trunc: TruncatedMultivariateNormal, gauss: MultivariateNormal, cross_covariance_matrix: Union[Tensor, LinearOperator], validate_args: Optional[bool] = None, ): r"""Unified Skew Normal distribution of `Y | a < X < b` for jointly Gaussian random vectors `X ∈ R^m` and `Y ∈ R^n`. Batch shapes `trunc.batch_shape` and `gauss.batch_shape` must be broadcastable. Care should be taken when choosing `trunc.batch_shape`. When `trunc` is of lower batch dimensionality than `gauss`, the user should consider expanding `trunc` to hasten `UnifiedSkewNormal.log_prob`. In these cases, it is suggested that the user invoke `trunc.solver` before calling `trunc.expand` to avoid paying for multiple, identical solves. Args: trunc: Distribution of `Z = (X | a < X < b) ∈ R^m`. gauss: Distribution of `Y ∈ R^n`. cross_covariance_matrix: Cross-covariance `Cov(X, Y) ∈ R^{m x n}`. validate_args: Optional argument to super().__init__. """ if len(trunc.event_shape) != len(gauss.event_shape): raise ValueError( f"{len(trunc.event_shape)}-dimensional `trunc` incompatible with" f"{len(gauss.event_shape)}-dimensional `gauss`." ) # LinearOperator currently doesn't support torch.linalg.solve_triangular, # so for the time being, we cast the operator to dense here if isinstance(cross_covariance_matrix, LinearOperator): cross_covariance_matrix = cross_covariance_matrix.to_dense() try: batch_shape = torch.broadcast_shapes(trunc.batch_shape, gauss.batch_shape) except RuntimeError as e: raise ValueError("Incompatible batch shapes") from e super().__init__( batch_shape=batch_shape, event_shape=gauss.event_shape, validate_args=validate_args, ) self.trunc = trunc self.gauss = gauss self.cross_covariance_matrix = cross_covariance_matrix if self._validate_args: try: # calling _orthogonalized_gauss first makes the following call # _orthogonalized_gauss.scale_tril which is used by self.rsample self._orthogonalized_gauss self.scale_tril except Exception as e: # error could be thrown by linalg.augment_cholesky (NotPSDError) # or torch.linalg.cholesky (with "positive-definite" in the message) if ( isinstance(e, NotPSDError) or "positive-definite" in str(e) or "PositiveDefinite" in str(e) ): e = ValueError( "UnifiedSkewNormal is only well-defined for positive definite" " joint covariance matrices." ) raise e def log_prob(self, value: Tensor) -> Tensor: r"""Computes the log probability `ln p(Y = value | a < X < b)`.""" event_ndim = len(self.event_shape) if value.ndim < event_ndim or value.shape[-event_ndim:] != self.event_shape: raise ValueError( f"`value` with shape {value.shape} does not comply with the instance's" f"`event_shape` of {self.event_shape}." ) # Iterate with a fixed batch size to keep memory overhead in check i = 0 pre_shape = value.shape[: -len(self.event_shape) - len(self.batch_shape)] batch_size = self.batch_shape.numel() log_probs = torch.empty( pre_shape.numel() * batch_size, device=value.device, dtype=value.dtype ) for batch in value.view(-1, *value.shape[len(pre_shape) :]): log_probs[i : i + batch_size] = self._log_prob(batch).view(-1) i += batch_size return log_probs.view(pre_shape + self.batch_shape) def _log_prob(self, value: Tensor) -> Tensor: r"""Computes the log probability `ln p(Y = value | a < X < b)`.""" # Center by subtracting E[X | Y = value] from `bounds`. bounds = ( self.trunc.bounds - self.trunc.loc.unsqueeze(-1) - self._iKyy_Kyx.transpose(-2, -1) @ (value - self.gauss.loc).unsqueeze(-1) ) # Approximately solve for MVN CDF solver = MVNXPB(covariance_matrix=self._K_schur_Kyy, bounds=bounds) # p(Y = value | a < X < b) = P(a < X < b | Y = value)p(Y = value)/P(a < X < b) return solver.solve() + self.gauss.log_prob(value) - self.trunc.log_partition def rsample(self, sample_shape: torch.Size = torch.Size()) -> Tensor: # noqa: B008 r"""Draw samples from the Unified Skew Normal. Args: sample_shape: The shape of the samples. Returns: The (sample_shape x batch_shape x event_shape) tensor of samples. """ residuals = self._orthogonalized_gauss.rsample(sample_shape=sample_shape) trunc_rvs = self.trunc.rsample(sample_shape=sample_shape) - self.trunc.loc cond_expectations = self.gauss.loc + trunc_rvs @ self._iKxx_Kxy return cond_expectations + residuals def expand( self, batch_shape: Sequence[int], _instance: UnifiedSkewNormal = None ) -> UnifiedSkewNormal: new = self._get_checked_instance(UnifiedSkewNormal, _instance) super(UnifiedSkewNormal, new).__init__( batch_shape=batch_shape, event_shape=self.event_shape, validate_args=False ) new._validate_args = self._validate_args new.gauss = self.gauss.expand(batch_shape=batch_shape) new.trunc = self.trunc.expand(batch_shape=batch_shape) new.cross_covariance_matrix = self.cross_covariance_matrix.expand( batch_shape + self.cross_covariance_matrix.shape[-2:] ) # Expand cached properties for name, _ in getmembers( UnifiedSkewNormal, lambda x: isinstance(x, lazy_property) ): if name not in self.__dict__: continue obj = getattr(self, name) if isinstance(obj, Tensor): base = obj if (obj._base is None) else obj._base new_obj = obj.expand(batch_shape + base.shape) elif isinstance(obj, Distribution): new_obj = obj.expand(batch_shape=batch_shape) else: raise TypeError( f"Type {type(obj)} of UnifiedSkewNormal's lazy property " f"{name} not supported." ) setattr(new, name, new_obj) return new def __repr__(self) -> str: args_string = ", ".join( ( f"trunc: {self.trunc}", f"gauss: {self.gauss}", f"cross_covariance_matrix: {self.cross_covariance_matrix.shape}", ) ) return self.__class__.__name__ + "(" + args_string + ")" @lazy_property def covariance_matrix(self) -> Tensor: Kxx = self.trunc.covariance_matrix Kxy = self.cross_covariance_matrix Kyy = self.gauss.covariance_matrix return block_matrix_concat(blocks=([Kxx, Kxy], [Kxy.transpose(-2, -1), Kyy])) @lazy_property def scale_tril(self) -> Tensor: Lxx = self.trunc.scale_tril Lyx = self._iLxx_Kxy.transpose(-2, -1) if "_orthogonalized_gauss" in self.__dict__: n = Lyx.shape[-2] Lyy = self._orthogonalized_gauss.scale_tril return block_matrix_concat(blocks=([pad(Lxx, (0, n))], [Lyx, Lyy])) return augment_cholesky(Laa=Lxx, Lba=Lyx, Kbb=self.gauss.covariance_matrix) @lazy_property def _orthogonalized_gauss(self) -> MultivariateNormal: r"""Distribution of `Y ⊥ X = Y - E[Y | X]`, where `Y ~ gauss` and `X ~ untrunc` is the untruncated version of `Z ~ trunc`.""" n = self.gauss.loc.shape[-1] parameters = {"loc": torch.zeros_like(self.gauss.loc)} if "scale_tril" in self.__dict__: parameters["scale_tril"] = self.scale_tril[..., -n:, -n:] else: beta = self._iLxx_Kxy parameters["covariance_matrix"] = ( self.gauss.covariance_matrix - beta.transpose(-1, -2) @ beta ) return MultivariateNormal(**parameters, validate_args=self._validate_args) @lazy_property def _iLyy_Kyx(self) -> Tensor: r"""Cov(Y, Y)^{-1/2}Cov(Y, X)`.""" return torch.linalg.solve_triangular( self.gauss.scale_tril, self.cross_covariance_matrix.transpose(-1, -2), upper=False, ) @lazy_property def _iKyy_Kyx(self) -> Tensor: r"""Cov(Y, Y)^{-1}Cov(Y, X)`.""" return torch.linalg.solve_triangular( self.gauss.scale_tril.transpose(-1, -2), self._iLyy_Kyx, upper=True, ) @lazy_property def _iLxx_Kxy(self) -> Tensor: r"""Cov(X, X)^{-1/2}Cov(X, Y)`.""" return torch.linalg.solve_triangular( self.trunc.scale_tril, self.cross_covariance_matrix, upper=False ) @lazy_property def _iKxx_Kxy(self) -> Tensor: r"""Cov(X, X)^{-1}Cov(X, Y)`.""" return torch.linalg.solve_triangular( self.trunc.scale_tril.transpose(-1, -2), self._iLxx_Kxy, upper=True, ) @lazy_property def _K_schur_Kyy(self) -> Tensor: r"""Cov(X, X) - Cov(X, Y)Cov(Y, Y)^{-1} Cov(Y, X)`.""" beta = self._iLyy_Kyx return self.trunc.covariance_matrix - beta.transpose(-1, -2) @ beta
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Linear Elliptical Slice Sampler. References .. [Gessner2020] A. Gessner, O. Kanjilal, and P. Hennig. Integrals over gaussians under linear domain constraints. AISTATS 2020. This implementation is based (with multiple changes / optimiations) on the following implementations based on the algorithm in [Gessner2020]_: - https://github.com/alpiges/LinConGauss - https://github.com/wjmaddox/pytorch_ess The implementation here differentiates itself from the original implementations with: 1) Support for fixed feature equality constraints. 2) Support for non-standard Normal distributions. 3) Numerical stability improvements, especially relevant for high-dimensional cases. Notably, this implementation does not rely on an adaptive `delta_theta` parameter in order to determine if two neighboring constraint intersection angles `theta` lead to a change in the feasibility of the sample. This both simplifies the implementation and makes it more robust to numerical imprecisions when two constraint intersection angles are close to each other. """ from __future__ import annotations import math from typing import List, Optional, Tuple, Union import torch from botorch.utils.sampling import PolytopeSampler from torch import Tensor _twopi = 2.0 * math.pi class LinearEllipticalSliceSampler(PolytopeSampler): r"""Linear Elliptical Slice Sampler. Ideas: - Add batch support, broadcasting over parallel chains. - Optimize computations if possible, potentially with torch.compile. - Extend fixed features constraint to general linear equality constraints. """ def __init__( self, inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, fixed_indices: Optional[Union[List[int], Tensor]] = None, mean: Optional[Tensor] = None, covariance_matrix: Optional[Tensor] = None, covariance_root: Optional[Tensor] = None, check_feasibility: bool = False, burnin: int = 0, thinning: int = 0, ) -> None: r"""Initialize LinearEllipticalSliceSampler. Args: inequality_constraints: Tensors `(A, b)` describing inequality constraints `A @ x <= b`, where `A` is an `n_ineq_con x d`-dim Tensor and `b` is an `n_ineq_con x 1`-dim Tensor, with `n_ineq_con` the number of inequalities and `d` the dimension of the sample space. If omitted, must provide `bounds` instead. bounds: A `2 x d`-dim tensor of box bounds. If omitted, must provide `inequality_constraints` instead. interior_point: A `d x 1`-dim Tensor presenting a point in the (relative) interior of the polytope. If omitted, an interior point is determined automatically by solving a Linear Program. Note: It is crucial that the point lie in the interior of the feasible set (rather than on the boundary), otherwise the sampler will produce invalid samples. fixed_indices: Integer list or `d`-dim Tensor representing the indices of dimensions that are constrained to be fixed to the values specified in the `interior_point`, which is required to be passed in conjunction with `fixed_indices`. mean: The `d x 1`-dim mean of the MVN distribution (if omitted, use zero). covariance_matrix: The `d x d`-dim covariance matrix of the MVN distribution (if omitted, use the identity). covariance_root: A `d x d`-dim root of the covariance matrix such that covariance_root @ covariance_root.T = covariance_matrix. NOTE: This matrix is assumed to be lower triangular. check_feasibility: If True, raise an error if the sampling results in an infeasible sample. This creates some overhead and so is switched off by default. burnin: Number of samples to generate upon initialization to warm up the sampler. thinning: Number of samples to skip before returning a sample in `draw`. This sampler samples from a multivariante Normal `N(mean, covariance_matrix)` subject to linear domain constraints `A x <= b` (intersected with box bounds, if provided). """ super().__init__( inequality_constraints=inequality_constraints, # TODO: Support equality constraints? interior_point=interior_point, bounds=bounds, ) tkwargs = {"device": self.x0.device, "dtype": self.x0.dtype} if covariance_matrix is not None and covariance_root is not None: raise ValueError( "Provide either covariance_matrix or covariance_root, not both." ) # can't unpack inequality constraints directly if bounds are passed A, b = self.A, self.b self._Az, self._bz = A, b self._is_fixed, self._not_fixed = None, None if fixed_indices is not None: mean, covariance_matrix = self._fixed_features_initialization( A=A, b=b, interior_point=interior_point, fixed_indices=fixed_indices, mean=mean, covariance_matrix=covariance_matrix, covariance_root=covariance_root, ) self._mean = mean # Have to delay factorization until after fixed features initialization. if covariance_matrix is not None: # implies root is None covariance_root, info = torch.linalg.cholesky_ex(covariance_matrix) not_psd = torch.any(info) if not_psd: raise ValueError( "Covariance matrix is not positive definite. " "Currently only non-degenerate distributions are supported." ) self._covariance_root = covariance_root # Rewrite the constraints as a system that constrains a standard Normal. self._standardization_initialization() # state of the sampler ("current point") self._x = self.x0.clone() self._z = self._transform(self._x) # We will need the following repeatedly, let's allocate them once self._zero = torch.zeros(1, **tkwargs) self._nan = torch.tensor(float("nan"), **tkwargs) self._full_angular_range = torch.tensor([0.0, _twopi], **tkwargs) self.check_feasibility = check_feasibility self._lifetime_samples = 0 if burnin > 0: self.thinning = 0 self.draw(burnin) self.thinning = thinning def _fixed_features_initialization( self, A: Tensor, b: Tensor, interior_point: Optional[Tensor], fixed_indices: Union[List[int], Tensor], mean: Optional[Tensor], covariance_matrix: Optional[Tensor], covariance_root: Optional[Tensor], ) -> Tuple[Optional[Tensor], Optional[Tensor]]: """Modifies the constraint system (A, b) due to fixed indices and assigns the modified constraints system to `self._Az`, `self._bz`. NOTE: Needs to be called prior to `self._standardization_initialization` in the constructor. Returns: Tuple of `mean` and `covariance_matrix` tensors of the non-fixed dimensions. """ if interior_point is None: raise ValueError( "If `fixed_indices` are provided, an interior point must also be " "provided in order to infer feasible values of the fixed features." ) if covariance_root is not None: raise ValueError( "Provide either covariance_root or fixed_indices, not both." ) d = interior_point.shape[0] is_fixed, not_fixed = get_index_tensors(fixed_indices=fixed_indices, d=d) self._is_fixed = is_fixed self._not_fixed = not_fixed # Transforming constraint system to incorporate fixed features: # A @ x - b = (A[:, fixed] @ x[fixed] + A[:, not fixed] @ x[not fixed]) - b # = A[:, not fixed] @ x[not fixed] - (b - A[:, fixed] @ x[fixed]) # = Az @ z - bz self._Az = A[:, not_fixed] self._bz = b - A[:, is_fixed] @ interior_point[is_fixed] if mean is not None: mean = mean[not_fixed] if covariance_matrix is not None: # subselect active dimensions covariance_matrix = covariance_matrix[ not_fixed.unsqueeze(-1), not_fixed.unsqueeze(0) ] return mean, covariance_matrix def _standardization_initialization(self) -> None: """For non-standard mean and covariance, we're going to rewrite the problem as sampling from a standard normal distribution subject to modified constraints. A @ x - b = A @ (covar_root @ z + mean) - b = (A @ covar_root) @ z - (b - A @ mean) = _Az @ z - _bz NOTE: We need to standardize bz before Az in the following, because it relies on the untransformed Az. We can't simply use A instead because Az might have been subject to the fixed features transformation. """ if self._mean is not None: self._bz = self._bz - self._Az @ self._mean if self._covariance_root is not None: self._Az = self._Az @ self._covariance_root @property def lifetime_samples(self) -> int: """The total number of samples generated by the sampler during its lifetime.""" return self._lifetime_samples def draw(self, n: int = 1) -> Tuple[Tensor, Tensor]: r"""Draw samples. Args: n: The number of samples. Returns: A `n x d`-dim tensor of `n` samples. """ samples = [] for _ in range(n): for _ in range(self.thinning): self.step() samples.append(self.step()) return torch.cat(samples, dim=-1).transpose(-1, -2) def step(self) -> Tensor: r"""Take a step, return the new sample, update the internal state. Returns: A `d x 1`-dim sample from the domain. """ nu = torch.randn_like(self._z) theta = self._draw_angle(nu=nu) z = self._get_cart_coords(nu=nu, theta=theta) self._z[:] = z x = self._untransform(z) self._x[:] = x self._lifetime_samples += 1 if self.check_feasibility and (not self._is_feasible(self._x)): Axmb = self.A @ self._x - self.b violated_indices = Axmb > 0 raise RuntimeError( "Sampling resulted in infeasible point. \n\t- Number " f"of violated constraints: {violated_indices.sum()}." f"\n\t- Magnitude of violations: {Axmb[violated_indices]}" "\n\t- If the error persists, please report this bug on GitHub." ) return x def _draw_angle(self, nu: Tensor) -> Tensor: r"""Draw the rotation angle. Args: nu: A `d x 1`-dim tensor (the "new" direction, drawn from N(0, I)). Returns: A `1`-dim Tensor containing the rotation angle (radians). """ rot_angle, rot_slices = self._find_rotated_intersections(nu) rot_lengths = rot_slices[:, 1] - rot_slices[:, 0] cum_lengths = torch.cumsum(rot_lengths, dim=0) cum_lengths = torch.cat((self._zero, cum_lengths), dim=0) rnd_angle = cum_lengths[-1] * torch.rand( 1, device=cum_lengths.device, dtype=cum_lengths.dtype ) idx = torch.searchsorted(cum_lengths, rnd_angle) - 1 return (rot_slices[idx, 0] + rnd_angle + rot_angle) - cum_lengths[idx] def _get_cart_coords(self, nu: Tensor, theta: Tensor) -> Tensor: r"""Determine location on ellipsoid in cartesian coordinates. Args: nu: A `d x 1`-dim tensor (the "new" direction, drawn from N(0, I)). theta: A `k`-dim tensor of angles. Returns: A `d x k`-dim tensor of samples from the domain in cartesian coordinates. """ return self._z * torch.cos(theta) + nu * torch.sin(theta) def _find_rotated_intersections(self, nu: Tensor) -> Tuple[Tensor, Tensor]: r"""Finds rotated intersections. Rotates the intersections by the rotation angle and makes sure that all angles lie in [0, 2*pi]. Args: nu: A `d x 1`-dim tensor (the "new" direction, drawn from N(0, I)). Returns: A two-tuple containing rotation angle (scalar) and a `num_active / 2 x 2`-dim tensor of shifted angles. """ slices = self._find_active_intersections(nu) rot_angle = slices[0] slices = (slices - rot_angle).reshape(-1, 2) # Ensuring that we don't sample within numerical precision of the boundaries # due to resulting instabilities in the constraint satisfaction. eps = 1e-6 if slices.dtype == torch.float32 else 1e-12 eps = torch.tensor(eps, dtype=slices.dtype, device=slices.device) eps = eps.minimum(slices.diff(dim=-1).abs() / 4) slices = slices + torch.cat((eps, -eps), dim=-1) # NOTE: The remainder call relies on the epsilon contraction, since the # remainder of_twopi divided by _twopi is zero, not _twopi. return rot_angle, slices.remainder(_twopi) def _find_active_intersections(self, nu: Tensor) -> Tensor: """ Find angles of those intersections that are at the boundary of the integration domain by adding and subtracting a small angle and evaluating on the ellipse to see if we are on the boundary of the integration domain. Args: nu: A `d x 1`-dim tensor (the "new" direction, drawn from N(0, I)). Returns: A `num_active`-dim tensor containing the angles of active intersection in increasing order so that activation happens in positive direction. If a slice crosses `theta=0`, the first angle is appended at the end of the tensor. Every element of the returned tensor defines a slice for elliptical slice sampling. """ theta = self._find_intersection_angles(nu) theta_active, delta_active = self._active_theta_and_delta( nu=nu, theta=theta, ) if theta_active.numel() == 0: theta_active = self._full_angular_range # TODO: What about `self.ellipse_in_domain = False` in the original code? elif delta_active[0] == -1: # ensuring that the first interval is feasible theta_active = torch.cat((theta_active[1:], theta_active[:1])) return theta_active.view(-1) def _find_intersection_angles(self, nu: Tensor) -> Tensor: """Compute all of the up to 2*n_ineq_con intersections of the ellipse and the linear constraints. For background, see equation (2) in http://proceedings.mlr.press/v108/gessner20a/gessner20a.pdf Args: nu: A `d x 1`-dim tensor (the "new" direction, drawn from N(0, I)). Returns: An `M`-dim tensor, where `M <= 2 * n_ineq_con` (with `M = n_ineq_con` if all intermediate computations yield finite numbers). """ # Compared to the implementation in https://github.com/alpiges/LinConGauss # we need to flip the sign of A b/c the original algorithm considers # A @ x + b >= 0 feasible, whereas we consider A @ x - b <= 0 feasible. g1 = -self._Az @ self._z g2 = -self._Az @ nu r = torch.sqrt(g1**2 + g2**2) phi = 2 * torch.atan(g2 / (r + g1)).squeeze() arg = -(self._bz / r).squeeze() # Write NaNs if there is no intersection arg = torch.where(torch.absolute(arg) <= 1, arg, self._nan) # Two solutions per linear constraint, shape of theta: (n_ineq_con, 2) acos_arg = torch.arccos(arg) theta = torch.stack((phi + acos_arg, phi - acos_arg), dim=-1) theta = theta[torch.isfinite(theta)] # shape: `n_ineq_con - num_not_finite` theta = torch.where(theta < 0, theta + _twopi, theta) # in [0, 2*pi] return torch.sort(theta).values def _active_theta_and_delta(self, nu: Tensor, theta: Tensor) -> Tensor: r"""Determine active indices. Args: nu: A `d x 1`-dim tensor (the "new" direction, drawn from N(0, I)). theta: A sorted `M`-dim tensor of intersection angles in [0, 2pi]. Returns: A tuple of Tensors of active constraint intersection angles `theta_active`, and the change in the feasibility of the points on the ellipse on the left and right of the active intersection angles `delta_active`. `delta_active` is is negative if decreasing the angle renders the sample feasible, and positive if increasing the angle renders the sample feasible. """ # In order to determine if an angle that gives rise to an intersection with a # constraint boundary leads to a change in the feasibility of the solution, # we evaluate the constraints on the midpoint of the intersection angles. # This gets rid of the `delta_theta` parameter in the original implementation, # which cannot be set universally since it can be both 1) too large, when # the distance in adjacent intersection angles is small, and 2) too small, # when it approaches the numerical precision limit. # The implementation below solves both problems and gets rid of the parameter. if len(theta) < 2: # if we have no or only a tangential intersection theta_active = torch.tensor([], dtype=theta.dtype, device=theta.device) delta_active = torch.tensor([], dtype=int, device=theta.device) return theta_active, delta_active theta_mid = (theta[:-1] + theta[1:]) / 2 # midpoints of intersection angles last_mid = (theta[:1] + theta[-1:] + _twopi) / 2 last_mid = last_mid.where(last_mid < _twopi, last_mid - _twopi) theta_mid = torch.cat((last_mid, theta_mid, last_mid), dim=0) samples_mid = self._get_cart_coords(nu=nu, theta=theta_mid) delta_feasibility = ( self._is_feasible(samples_mid, transformed=True).to(dtype=int).diff() ) active_indices = delta_feasibility.nonzero() return theta[active_indices], delta_feasibility[active_indices] def _is_feasible(self, points: Tensor, transformed: bool = False) -> Tensor: r"""Returns a Boolean tensor indicating whether the `points` are feasible, i.e. they satisfy `A @ points <= b`, where `(A, b)` are the tensors passed as the `inequality_constraints` to the constructor of the sampler. Args: points: A `d x M`-dim tensor of points. transformed: Wether points are assumed to be transformed by a change of basis, which means feasibility should be computed based on the transformed constraint system (_Az, _bz), instead of (A, b). Returns: An `M`-dim binary tensor where `True` indicates that the associated point is feasible. """ A, b = (self._Az, self._bz) if transformed else (self.A, self.b) return (A @ points <= b).all(dim=0) def _transform(self, x: Tensor) -> Tensor: """Transforms the input so that it is equivalent to a standard Normal variable constrained with the modified system constraints (self._Az, self._bz). Args: x: The input tensor to be transformed, usually `d x 1`-dimensional. Returns: A `d x 1`-dimensional tensor of transformed values subject to the modified system of constraints. """ if self._not_fixed is not None: x = x[self._not_fixed] return self._standardize(x) def _untransform(self, z: Tensor) -> Tensor: """The inverse transform of the `_transform`, i.e. maps `z` back to the original space where it is subject to the original constraint system (self.A, self.b). Args: z: The transformed tensor to be un-transformed, usually `d x 1`-dimensional. Returns: A `d x 1`-dimensional tensor of un-transformed values subject to the original system of constraints. """ if self._is_fixed is None: return self._unstandardize(z) else: x = self._x.clone() # _x already contains the fixed values x[self._not_fixed] = self._unstandardize(z) return x def _standardize(self, x: Tensor) -> Tensor: """_transform helper standardizing the input `x`, which is assumed to be a `d x 1`-dim Tensor, or a `len(self._not_fixed) x 1`-dim if there are fixed indices. """ z = x if self._mean is not None: z = z - self._mean if self._covariance_root is not None: z = torch.linalg.solve_triangular(self._covariance_root, z, upper=False) return z def _unstandardize(self, z: Tensor) -> Tensor: """_untransform helper un-standardizing the input `z`, which is assumed to be a `d x 1`-dim Tensor, or a `len(self._not_fixed) x 1`-dim if there are fixed indices. """ x = z if self._covariance_root is not None: x = self._covariance_root @ x if self._mean is not None: x = x + self._mean return x def get_index_tensors( fixed_indices: Union[List[int], Tensor], d: int ) -> Tuple[Tensor, Tensor]: """Converts `fixed_indices` to a `d`-dim integral Tensor that is True at indices that are contained in `fixed_indices` and False otherwise. Args: fixed_indices: A list or Tensoro of integer indices to fix. d: The dimensionality of the Tensors to be indexed. Returns: A Tuple of integral Tensors partitioning [1, d] into indices that are fixed (first tensor) and non-fixed (second tensor). """ is_fixed = torch.as_tensor(fixed_indices) dtype, device = is_fixed.dtype, is_fixed.device dims = torch.arange(d, dtype=dtype, device=device) not_fixed = torch.tensor([i for i in dims if i not in is_fixed]) return is_fixed, not_fixed
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.utils.probability.bvn import bvn, bvnmom from botorch.utils.probability.lin_ess import LinearEllipticalSliceSampler from botorch.utils.probability.mvnxpb import MVNXPB from botorch.utils.probability.truncated_multivariate_normal import ( TruncatedMultivariateNormal, ) from botorch.utils.probability.unified_skew_normal import UnifiedSkewNormal from botorch.utils.probability.utils import ndtr __all__ = [ "bvn", "bvnmom", "LinearEllipticalSliceSampler", "MVNXPB", "ndtr", "TruncatedMultivariateNormal", "UnifiedSkewNormal", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Bivariate conditioning algorithm for approximating Gaussian probabilities, see [Genz2016numerical]_ and [Trinh2015bivariate]_. .. [Trinh2015bivariate] G. Trinh and A. Genz. Bivariate conditioning approximations for multivariate normal probabilities. Statistics and Computing, 2015. .. [Genz2016numerical] A. Genz and G. Tring. Numerical Computation of Multivariate Normal Probabilities using Bivariate Conditioning. Monte Carlo and Quasi-Monte Carlo Methods, 2016. .. [Gibson1994monte] GJ. Gibson, CA Galsbey, and DA Elston. Monte Carlo evaluation of multivariate normal integrals and sensitivity to variate ordering. Advances in Numerical Methods and Applications. 1994. """ from __future__ import annotations from typing import Any, Optional, TypedDict from warnings import warn import torch from botorch.utils.probability.bvn import bvn, bvnmom from botorch.utils.probability.linalg import ( augment_cholesky, block_matrix_concat, PivotedCholesky, ) from botorch.utils.probability.utils import ( case_dispatcher, get_constants_like, ndtr as Phi, phi, STANDARDIZED_RANGE, swap_along_dim_, ) from botorch.utils.safe_math import log as safe_log, mul as safe_mul from linear_operator.utils.cholesky import psd_safe_cholesky from linear_operator.utils.errors import NotPSDError from torch import LongTensor, Tensor from torch.nn.functional import pad class mvnxpbState(TypedDict): step: int perm: LongTensor bounds: Tensor piv_chol: PivotedCholesky plug_ins: Tensor log_prob: Tensor log_prob_extra: Optional[Tensor] class MVNXPB: r"""An algorithm for approximating Gaussian probabilities `P(X \in bounds)`, where `X ~ N(0, covariance_matrix)`. """ def __init__(self, covariance_matrix: Tensor, bounds: Tensor) -> None: r"""Initializes an MVNXPB instance. Args: covariance_matrix: Covariance matrices of shape `batch_shape x [n, n]`. bounds: Tensor of lower and upper bounds, `batch_shape x [n, 2]`. These bounds are standardized internally and clipped to STANDARDIZED_RANGE. """ *batch_shape, _, n = covariance_matrix.shape device = covariance_matrix.device dtype = covariance_matrix.dtype perm = torch.arange(0, n, device=device).expand(*batch_shape, n).contiguous() # Standardize covariance matrices and bounds var = covariance_matrix.diagonal(dim1=-2, dim2=-1).unsqueeze(-1) std = var.sqrt() istd = var.rsqrt() matrix = istd * covariance_matrix * istd.transpose(-1, -2) # Clip first to avoid differentiating through `istd * inf` bounds = istd * bounds.clip(*(std * lim for lim in STANDARDIZED_RANGE)) # Initialize partial pivoted Cholesky piv_chol = PivotedCholesky( step=0, perm=perm.clone(), diag=std.squeeze(-1).clone(), tril=matrix.tril(), ) self.step = 0 self.perm = perm self.bounds = bounds self.piv_chol = piv_chol self.plug_ins = torch.full( batch_shape + [n], float("nan"), device=device, dtype=dtype ) self.log_prob = torch.zeros(batch_shape, device=device, dtype=dtype) self.log_prob_extra: Optional[Tensor] = None @classmethod def build( cls, step: int, perm: Tensor, bounds: Tensor, piv_chol: PivotedCholesky, plug_ins: Tensor, log_prob: Tensor, log_prob_extra: Optional[Tensor] = None, ) -> MVNXPB: r"""Creates an MVNXPB instance from raw arguments. Unlike MVNXPB.__init__, this methods does not preprocess or copy terms. Args: step: Integer used to track the solver's progress. bounds: Tensor of lower and upper bounds, `batch_shape x [n, 2]`. piv_chol: A PivotedCholesky instance for the system. plug_ins: Tensor of plug-in estimators used to update lower and upper bounds on random variables that have yet to be integrated out. log_prob: Tensor of log probabilities. log_prob_extra: Tensor of conditional log probabilities for the next random variable. Used when integrating over an odd number of random variables. """ new = cls.__new__(cls) new.step = step new.perm = perm new.bounds = bounds new.piv_chol = piv_chol new.plug_ins = plug_ins new.log_prob = log_prob new.log_prob_extra = log_prob_extra return new def solve(self, num_steps: Optional[int] = None, eps: float = 1e-10) -> Tensor: r"""Runs the MVNXPB solver instance for a fixed number of steps. Calculates a bivariate conditional approximation to P(X \in bounds), where X ~ N(0, Σ). For details, see [Genz2016numerical] or [Trinh2015bivariate]_. """ if self.step > self.piv_chol.step: raise ValueError("Invalid state: solver ran ahead of matrix decomposition.") # Unpack some terms start = self.step bounds = self.bounds piv_chol = self.piv_chol L = piv_chol.tril y = self.plug_ins # Subtract marginal log probability of final term from previous result if # it did not fit in a block. ndim = y.shape[-1] if ndim > start and start % 2: self.log_prob = self.log_prob - self.log_prob_extra self.log_prob_extra = None # Iteratively compute bivariate conditional approximation zero = get_constants_like(0, L) # needed when calling `torch.where` below num_steps = num_steps or ndim - start for i in range(start, start + num_steps): should_update_chol = self.step == piv_chol.step # Determine next pivot element if should_update_chol: pivot = self.select_pivot() else: # pivot using order specified by precomputed pivoted Cholesky step mask = self.perm[..., i:] == piv_chol.perm[..., i : i + 1] pivot = i + torch.nonzero(mask, as_tuple=True)[-1] if pivot is not None and torch.any(pivot > i): self.pivot_(pivot=pivot) # Compute whitened bounds conditional on preceding plug-ins Lii = L[..., i, i].clone() if should_update_chol: Lii = Lii.clip(min=0).sqrt() # conditional stddev inv_Lii = Lii.reciprocal() bounds_i = bounds[..., i, :].clone() if i != 0: bounds_i = bounds_i - torch.sum( L[..., i, :i].clone() * y[..., :i].clone(), dim=-1, keepdim=True ) lb, ub = (inv_Lii.unsqueeze(-1) * bounds_i).unbind(dim=-1) # Initialize `i`-th plug-in value as univariate conditional expectation Phi_i = Phi(ub) - Phi(lb) small = Phi_i <= i * eps y[..., i] = case_dispatcher( # used to select next pivot out=(phi(lb) - phi(ub)) / Phi_i, cases=( # fallback cases for enhanced numerical stability (lambda: small & (lb < -9), lambda m: ub[m]), (lambda: small & (lb > 9), lambda m: lb[m]), (lambda: small, lambda m: 0.5 * (lb[m] + ub[m])), ), ) # Maybe finalize the current block if i and i % 2: h = i - 1 blk = slice(h, i + 1) Lhh = L[..., h, h].clone() Lih = L[..., i, h].clone() std_i = (Lii.square() + Lih.square()).sqrt() istds = 1 / torch.stack([Lhh, std_i], -1) blk_bounds = bounds[..., blk, :].clone() if i > 1: blk_bounds = blk_bounds - ( L[..., blk, : i - 1].clone() @ y[..., : i - 1, None].clone() ) blk_lower, blk_upper = ( pair.unbind(-1) # pair of bounds for `yh` and `yi` for pair in safe_mul(istds.unsqueeze(-1), blk_bounds).unbind(-1) ) blk_corr = Lhh * Lih * istds.prod(-1) blk_prob = bvn(blk_corr, *blk_lower, *blk_upper) zh, zi = bvnmom(blk_corr, *blk_lower, *blk_upper, p=blk_prob) # Replace 1D expectations with 2D ones `L[blk, blk]^{-1} y[..., blk]` mask = blk_prob > zero y[..., h] = torch.where(mask, zh, zero) y[..., i] = torch.where(mask, inv_Lii * (std_i * zi - Lih * zh), zero) # Update running approximation to log probability self.log_prob = self.log_prob + safe_log(blk_prob) self.step += 1 if should_update_chol: piv_chol.update_(eps=eps) # Factor in univariate probability if final term fell outside of a block. if self.step % 2: self.log_prob_extra = safe_log(Phi_i) self.log_prob = self.log_prob + self.log_prob_extra return self.log_prob def select_pivot(self) -> Optional[LongTensor]: r"""GGE variable prioritization strategy from [Gibson1994monte]_. Returns the index of the random variable least likely to satisfy its bounds when conditioning on the previously integrated random variables `X[:t - 1]` attaining the values of plug-in estimators `y[:t - 1]`. Equivalently, ``` argmin_{i = t, ..., n} P(X[i] \in bounds[i] | X[:t-1] = y[:t -1]), ``` where `t` denotes the current step.""" i = self.piv_chol.step L = self.piv_chol.tril bounds = self.bounds if i: bounds = bounds[..., i:, :] - L[..., i:, :i] @ self.plug_ins[..., :i, None] inv_stddev = torch.diagonal(L, dim1=-2, dim2=-1)[..., i:].clip(min=0).rsqrt() probs_1d = Phi(inv_stddev.unsqueeze(-1) * bounds).diff(dim=-1).squeeze(-1) return i + torch.argmin(probs_1d, dim=-1) def pivot_(self, pivot: LongTensor) -> None: r"""Swap random variables at `pivot` and `step` positions.""" step = self.step if self.piv_chol.step == step: self.piv_chol.pivot_(pivot) elif self.step > self.piv_chol.step: raise ValueError for tnsr in (self.perm, self.bounds): swap_along_dim_(tnsr, i=self.step, j=pivot, dim=pivot.ndim) def __getitem__(self, key: Any) -> MVNXPB: return self.build( step=self.step, perm=self.perm[key], bounds=self.bounds[key], piv_chol=self.piv_chol[key], plug_ins=self.plug_ins[key], log_prob=self.log_prob[key], log_prob_extra=( None if self.log_prob_extra is None else self.log_prob_extra[key] ), ) def concat(self, other: MVNXPB, dim: int) -> MVNXPB: if not isinstance(other, MVNXPB): raise TypeError( f"Expected `other` to be {type(self)} typed but was {type(other)}." ) batch_ndim = self.log_prob.ndim if dim > batch_ndim or dim < -batch_ndim: raise ValueError(f"`dim={dim}` is not a valid batch dimension.") state_dict = self.asdict() for key, _other in other.asdict().items(): _self = state_dict.get(key) if _self is None and _other is None: continue if type(_self) is not type(_other): raise TypeError( f"Concatenation failed: `self.{key}` has type {type(_self)}, " f"but `other.{key}` is of type {type(_self)}." ) if isinstance(_self, PivotedCholesky): state_dict[key] = _self.concat(_other, dim=dim) elif isinstance(_self, Tensor): state_dict[key] = torch.concat((_self, _other), dim=dim) elif _self != _other: raise ValueError( f"Concatenation failed: `self.{key}` does not equal `other.{key}`." ) return self.build(**state_dict) def expand(self, *sizes: int) -> MVNXPB: state_dict = self.asdict() state_dict["piv_chol"] = state_dict["piv_chol"].expand(*sizes) for name, ndim in { "bounds": 2, "perm": 1, "plug_ins": 1, "log_prob": 0, "log_prob_extra": 0, }.items(): src = state_dict[name] if isinstance(src, Tensor): state_dict[name] = src.expand( sizes + src.shape[-ndim:] if ndim else sizes ) return self.build(**state_dict) def augment( self, covariance_matrix: Tensor, bounds: Tensor, cross_covariance_matrix: Tensor, disable_pivoting: bool = False, jitter: Optional[float] = None, max_tries: Optional[int] = None, ) -> MVNXPB: r"""Augment an `n`-dimensional MVNXPB instance to include `m` additional random variables. """ n = self.perm.shape[-1] m = covariance_matrix.shape[-1] if n != self.piv_chol.step: raise NotImplementedError( "Augmentation of incomplete solutions not implemented yet." ) var = covariance_matrix.diagonal(dim1=-2, dim2=-1).unsqueeze(-1) std = var.sqrt() istd = var.rsqrt() Kmn = istd * cross_covariance_matrix if self.piv_chol.diag is None: diag = pad(std.squeeze(-1), (cross_covariance_matrix.shape[-1], 0), value=1) else: Kmn = Kmn * (1 / self.piv_chol.diag).unsqueeze(-2) diag = torch.concat([self.piv_chol.diag, std.squeeze(-1)], -1) # Augment partial pivoted Cholesky factor Kmm = istd * covariance_matrix * istd.transpose(-1, -2) Lnn = self.piv_chol.tril try: L = augment_cholesky(Laa=Lnn, Kba=Kmn, Kbb=Kmm, jitter=jitter) except NotPSDError: warn("Joint covariance matrix not positive definite, attempting recovery.") Knn = Lnn @ Lnn.transpose(-1, -2) Knm = Kmn.transpose(-1, -2) K = block_matrix_concat(blocks=((Knn, Knm), (Kmn, Kmm))) L = psd_safe_cholesky(K, jitter=jitter, max_tries=max_tries) if not disable_pivoting: Lmm = L[..., n:, n:].clone() L[..., n:, n:] = (Lmm @ Lmm.transpose(-2, -1)).tril() _bounds = istd * bounds.clip(*(std * lim for lim in STANDARDIZED_RANGE)) _perm = torch.arange(n, n + m, dtype=self.perm.dtype, device=self.perm.device) _perm = _perm.expand(covariance_matrix.shape[:-2] + (m,)) piv_chol = PivotedCholesky( step=n + m if disable_pivoting else n, tril=L.contiguous(), perm=torch.cat([self.piv_chol.perm, _perm], dim=-1).contiguous(), diag=diag, ) return self.build( step=self.step, perm=torch.cat([self.perm, _perm], dim=-1), bounds=torch.cat([self.bounds, _bounds], dim=-2), piv_chol=piv_chol, plug_ins=pad(self.plug_ins, (0, m), value=float("nan")), log_prob=self.log_prob, log_prob_extra=self.log_prob_extra, ) def detach(self) -> MVNXPB: state_dict = self.asdict() for key, obj in state_dict.items(): if isinstance(obj, (PivotedCholesky, Tensor)): state_dict[key] = obj.detach() return self.build(**state_dict) def clone(self) -> MVNXPB: state_dict = self.asdict() for key, obj in state_dict.items(): if isinstance(obj, (PivotedCholesky, Tensor)): state_dict[key] = obj.clone() return self.build(**state_dict) def asdict(self) -> mvnxpbState: return mvnxpbState( step=self.step, perm=self.perm, bounds=self.bounds, piv_chol=self.piv_chol, plug_ins=self.plug_ins, log_prob=self.log_prob, log_prob_extra=self.log_prob_extra, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from dataclasses import dataclass, InitVar from itertools import chain from typing import Any, Optional, Sequence import torch from botorch.utils.probability.utils import swap_along_dim_ from linear_operator.utils.errors import NotPSDError from torch import LongTensor, Tensor from torch.nn.functional import pad def block_matrix_concat(blocks: Sequence[Sequence[Tensor]]) -> Tensor: rows = [] shape = torch.broadcast_shapes(*(x.shape[:-2] for x in chain.from_iterable(blocks))) for tensors in blocks: parts = [x.expand(*shape, *x.shape[-2:]) for x in tensors] if len(parts) > 1: rows.append(torch.cat(parts, dim=-1)) else: rows.extend(parts) return torch.concat(rows, dim=-2) def augment_cholesky( Laa: Tensor, Kbb: Tensor, Kba: Optional[Tensor] = None, Lba: Optional[Tensor] = None, jitter: Optional[float] = None, ) -> Tensor: r"""Computes the Cholesky factor of a block matrix `K = [[Kaa, Kab], [Kba, Kbb]]` based on a precomputed Cholesky factor `Kaa = Laa Laa^T`. Args: Laa: Cholesky factor of K's upper left block. Kbb: Lower-right block of K. Kba: Lower-left block of K. Lba: Precomputed solve `Kba Laa^{-T}`. jitter: Optional nugget to be added to the diagonal of Kbb. """ if not (Kba is None) ^ (Lba is None): raise ValueError("One and only one of `Kba` or `Lba` must be provided.") if jitter is not None: Kbb = Kbb.clone() Kbb.diagonal(dim1=-2, dim2=-1).add_(jitter) if Lba is None: Lba = torch.linalg.solve_triangular( Laa.transpose(-2, -1), Kba, left=False, upper=True ) Lbb, info = torch.linalg.cholesky_ex(Kbb - Lba @ Lba.transpose(-2, -1)) if info.any(): raise NotPSDError( "Schur complement of `K` with respect to `Kaa` not PSD for the given " "Cholesky factor `Laa`" f"{'.' if jitter is None else f' and nugget jitter={jitter}.'}" ) n = Lbb.shape[-1] return block_matrix_concat(blocks=([pad(Laa, (0, n))], [Lba, Lbb])) @dataclass class PivotedCholesky: step: int tril: Tensor perm: LongTensor diag: Optional[Tensor] = None validate_init: InitVar[bool] = True def __post_init__(self, validate_init: bool = True): if not validate_init: return if self.tril.shape[-2] != self.tril.shape[-1]: raise ValueError( f"Expected square matrices but `matrix` has shape `{self.tril.shape}`." ) if self.perm.shape != self.tril.shape[:-1]: raise ValueError( f"`perm` of shape `{self.perm.shape}` incompatible with " f"`matrix` of shape `{self.tril.shape}`." ) if self.diag is not None and self.diag.shape != self.tril.shape[:-1]: raise ValueError( f"`diag` of shape `{self.diag.shape}` incompatible with " f"`matrix` of shape `{self.tril.shape}`." ) def __getitem__(self, key: Any) -> PivotedCholesky: return PivotedCholesky( step=self.step, tril=self.tril[key], perm=self.perm[key], diag=None if self.diag is None else self.diag[key], ) def update_(self, eps: float = 1e-10) -> None: r"""Performs a single matrix decomposition step.""" i = self.step L = self.tril Lii = self.tril[..., i, i].clone().clip(min=0).sqrt() # Finalize `i-th` row and column of Cholesky factor L[..., i, i] = Lii L[..., i, i + 1 :] = 0 L[..., i + 1 :, i] = L[..., i + 1 :, i].clone() / Lii.unsqueeze(-1) # Update `tril(L[i + 1:, i + 1:])` to be the lower triangular part # of the Schur complement of `cov` with respect to `cov[:i, :i]`. rank1 = L[..., i + 1 :, i : i + 1].clone() rank1 = (rank1 * rank1.transpose(-1, -2)).tril() L[..., i + 1 :, i + 1 :] = L[..., i + 1 :, i + 1 :].clone() - rank1 L[Lii <= i * eps, i:, i] = 0 # numerical stability clause self.step += 1 def pivot_(self, pivot: LongTensor) -> None: *batch_shape, _, size = self.tril.shape if pivot.shape != tuple(batch_shape): raise ValueError("Argument `pivot` does to match with batch shape`.") # Perform basic swaps for key in ("perm", "diag"): tnsr = getattr(self, key, None) if tnsr is not None: swap_along_dim_(tnsr, i=self.step, j=pivot, dim=tnsr.ndim - 1) # Perform matrix swaps; prealloacte buffers for row/column linear indices size2 = size**2 min_pivot = pivot.min() tkwargs = {"device": pivot.device, "dtype": pivot.dtype} buffer_col = torch.arange(size * (1 + min_pivot), size2, size, **tkwargs) buffer_row = torch.arange(0, max(self.step, pivot.max()), **tkwargs) head = buffer_row[: self.step] indices_v1 = [] indices_v2 = [] for i, piv in enumerate(pivot.view(-1, 1)): v1 = pad(piv, (1, 0), value=self.step).unsqueeze(-1) v2 = pad(piv, (0, 1), value=self.step).unsqueeze(-1) start = i * size2 indices_v1.extend((start + v1 + size * v1).ravel()) indices_v2.extend((start + v2 + size * v2).ravel()) indices_v1.extend((start + size * v1 + head).ravel()) indices_v2.extend((start + size * v2 + head).ravel()) tail = buffer_col[piv - min_pivot :] indices_v1.extend((start + v1 + tail).ravel()) indices_v2.extend((start + v2 + tail).ravel()) interior = buffer_row[min(piv, self.step + 1) : piv] indices_v1.extend(start + size * interior + self.step) indices_v2.extend(start + size * piv + interior) swap_along_dim_( self.tril.view(-1), i=torch.as_tensor(indices_v1, **tkwargs), j=torch.as_tensor(indices_v2, **tkwargs), dim=0, ) def expand(self, *sizes: int) -> PivotedCholesky: fields = {} for name, ndim in {"perm": 1, "diag": 1, "tril": 2}.items(): src = getattr(self, name) if src is not None: fields[name] = src.expand(sizes + src.shape[-ndim:]) return type(self)(step=self.step, **fields) def concat(self, other: PivotedCholesky, dim: int = 0) -> PivotedCholesky: if self.step != other.step: raise ValueError("Cannot conncatenate decompositions at different steps.") fields = {} for name in ("tril", "perm", "diag"): a = getattr(self, name) b = getattr(other, name) if type(a) is not type(b): raise NotImplementedError(f"Types of field {name} do not match.") if a is not None: fields[name] = torch.concat((a, b), dim=dim) return type(self)(step=self.step, **fields) def detach(self) -> PivotedCholesky: fields = {} for name in ("tril", "perm", "diag"): obj = getattr(self, name) if obj is not None: fields[name] = obj.detach() return type(self)(step=self.step, **fields) def clone(self) -> PivotedCholesky: fields = {} for name in ("tril", "perm", "diag"): obj = getattr(self, name) if obj is not None: fields[name] = obj.clone() return type(self)(step=self.step, **fields)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import math from functools import lru_cache from math import pi from numbers import Number from typing import Any, Callable, Iterable, Iterator, Optional, Tuple, Union import torch from botorch.utils.safe_math import logdiffexp from numpy.polynomial.legendre import leggauss as numpy_leggauss from torch import BoolTensor, LongTensor, Tensor CaseNd = Tuple[Callable[[], BoolTensor], Callable[[BoolTensor], Tensor]] _log_2 = math.log(2) _sqrt_pi = math.sqrt(pi) _inv_sqrt_pi = 1 / _sqrt_pi _inv_sqrt_2pi = 1 / math.sqrt(2 * pi) _inv_sqrt_2 = 1 / math.sqrt(2) _neg_inv_sqrt_2 = -_inv_sqrt_2 _log_sqrt_2pi = math.log(2 * pi) / 2 STANDARDIZED_RANGE: Tuple[float, float] = (-1e6, 1e6) _log_two_inv_sqrt_2pi = _log_2 - _log_sqrt_2pi # = log(2 / sqrt(2 * pi)) def case_dispatcher( out: Tensor, cases: Iterable[CaseNd] = (), default: Callable[[BoolTensor], Tensor] = None, ) -> Tensor: r"""Basic implementation of a tensorized switching case statement. Args: out: Tensor to which case outcomes are written. cases: Iterable of function pairs (pred, func), where `mask=pred()` specifies whether `func` is applicable for each entry in `out`. Note that cases are resolved first-come, first-serve. default: Optional `func` to which all unclaimed entries of `out` are dispatched. """ active = None for closure, func in cases: pred = closure() if not pred.any(): continue mask = pred if (active is None) else pred & active if not mask.any(): continue if mask.all(): # where possible, use Ellipsis to avoid indexing out[...] = func(...) return out out[mask] = func(mask) if active is None: active = ~mask else: active[mask] = False if not active.any(): break if default is not None: if active is None: out[...] = default(...) elif active.any(): out[active] = default(active) return out @lru_cache(maxsize=None) def get_constants( values: Union[Number, Iterator[Number]], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Union[Tensor, Tuple[Tensor, ...]]: r"""Returns scalar-valued Tensors containing each of the given constants. Used to expedite tensor operations involving scalar arithmetic. Note that the returned Tensors should not be modified in-place.""" if isinstance(values, Number): return torch.full((), values, dtype=dtype, device=device) return tuple(torch.full((), val, dtype=dtype, device=device) for val in values) def get_constants_like( values: Union[Number, Iterator[Number]], ref: Tensor, ) -> Union[Tensor, Iterator[Tensor]]: return get_constants(values, device=ref.device, dtype=ref.dtype) def gen_positional_indices( shape: torch.Size, dim: int, device: Optional[torch.device] = None, ) -> Iterator[torch.LongTensor]: ndim = len(shape) _dim = ndim + dim if dim < 0 else dim if _dim >= ndim or _dim < 0: raise ValueError(f"dim={dim} invalid for shape {shape}.") cumsize = shape[_dim + 1 :].numel() for i, s in enumerate(reversed(shape[: _dim + 1])): yield torch.arange(0, s * cumsize, cumsize, device=device)[(...,) + i * (None,)] cumsize *= s def build_positional_indices( shape: torch.Size, dim: int, device: Optional[torch.device] = None, ) -> LongTensor: return sum(gen_positional_indices(shape=shape, dim=dim, device=device)) @lru_cache(maxsize=None) def leggauss(deg: int, **tkwargs: Any) -> Tuple[Tensor, Tensor]: x, w = numpy_leggauss(deg) return torch.as_tensor(x, **tkwargs), torch.as_tensor(w, **tkwargs) def ndtr(x: Tensor) -> Tensor: r"""Standard normal CDF.""" half, neg_inv_sqrt_2 = get_constants_like((0.5, _neg_inv_sqrt_2), x) return half * torch.erfc(neg_inv_sqrt_2 * x) def phi(x: Tensor) -> Tensor: r"""Standard normal PDF.""" inv_sqrt_2pi, neg_half = get_constants_like((_inv_sqrt_2pi, -0.5), x) return inv_sqrt_2pi * (neg_half * x.square()).exp() def log_phi(x: Tensor) -> Tensor: r"""Logarithm of standard normal pdf""" log_sqrt_2pi, neg_half = get_constants_like((_log_sqrt_2pi, -0.5), x) return neg_half * x.square() - log_sqrt_2pi def log_ndtr(x: Tensor) -> Tensor: """Implementation of log_ndtr that remedies problems of torch.special's version for large negative x, where the torch implementation yields Inf or NaN gradients. Args: x: An input tensor with dtype torch.float32 or torch.float64. Returns: A tensor of values of the same type and shape as x containing log(ndtr(x)). """ if not (x.dtype == torch.float32 or x.dtype == torch.float64): raise TypeError( f"log_Phi only supports torch.float32 and torch.float64 " f"dtypes, but received {x.dtype = }." ) neg_inv_sqrt_2, log_2 = get_constants_like((_neg_inv_sqrt_2, _log_2), x) return log_erfc(neg_inv_sqrt_2 * x) - log_2 def log_erfc(x: Tensor) -> Tensor: """Computes the logarithm of the complementary error function in a numerically stable manner. The GitHub issue https://github.com/pytorch/pytorch/issues/31945 tracks progress toward moving this feature into PyTorch in C++. Args: x: An input tensor with dtype torch.float32 or torch.float64. Returns: A tensor of values of the same type and shape as x containing log(erfc(x)). """ if not (x.dtype == torch.float32 or x.dtype == torch.float64): raise TypeError( f"log_erfc only supports torch.float32 and torch.float64 " f"dtypes, but received {x.dtype = }." ) is_pos = x > 0 x_pos = x.masked_fill(~is_pos, 0) x_neg = x.masked_fill(is_pos, 0) return torch.where( is_pos, torch.log(torch.special.erfcx(x_pos)) - x_pos.square(), torch.log(torch.special.erfc(x_neg)), ) def log_erfcx(x: Tensor) -> Tensor: """Computes the logarithm of the complementary scaled error function in a numerically stable manner. The GitHub issue tracks progress toward moving this feature into PyTorch in C++: https://github.com/pytorch/pytorch/issues/31945. Args: x: An input tensor with dtype torch.float32 or torch.float64. Returns: A tensor of values of the same type and shape as x containing log(erfcx(x)). """ is_pos = x > 0 x_pos = x.masked_fill(~is_pos, 0) x_neg = x.masked_fill(is_pos, 0) return torch.where( is_pos, torch.special.erfcx(x_pos).log(), torch.special.erfc(x_neg).log() + x.square(), ) def standard_normal_log_hazard(x: Tensor) -> Tensor: """Computes the logarithm of the hazard function of the standard normal distribution, i.e. `log(phi(x) / Phi(-x))`. Args: x: A tensor of any shape, with either float32 or float64 dtypes. Returns: A Tensor of the same shape `x`, containing the values of the logarithm of the hazard function evaluated at `x`. """ # NOTE: using _inv_sqrt_2 instead of _neg_inv_sqrt_2 means we are computing Phi(-x). a, b = get_constants_like((_log_two_inv_sqrt_2pi, _inv_sqrt_2), x) return a - log_erfcx(b * x) def log_prob_normal_in(a: Tensor, b: Tensor) -> Tensor: r"""Computes the probability that a standard normal random variable takes a value in \[a, b\], i.e. log(Phi(b) - Phi(a)), where Phi is the standard normal CDF. Returns accurate values and permits numerically stable backward passes for inputs in [-1e100, 1e100] for double precision and [-1e20, 1e20] for single precision. In contrast, a naive approach is not numerically accurate beyond [-10, 10]. Args: a: Tensor of lower integration bounds of the Gaussian probability measure. b: Tensor of upper integration bounds of the Gaussian probability measure. Returns: Tensor of the log probabilities. """ if not (a < b).all(): raise ValueError("Received input tensors a, b for which not all a < b.") # if abs(b) > abs(a), we use Phi(b) - Phi(a) = Phi(-a) - Phi(-b), since the # right tail converges to 0 from below, leading to digit cancellation issues, while # the left tail of log_ndtr is well behaved and results in large negative numbers rev_cond = b.abs() > a.abs() # condition for reversal of inputs if rev_cond.any(): c = torch.where(rev_cond, -b, a) b = torch.where(rev_cond, -a, b) a = c # after we updated b, can assign c to a return logdiffexp(log_a=log_ndtr(a), log_b=log_ndtr(b)) def swap_along_dim_( values: Tensor, i: Union[int, LongTensor], j: Union[int, LongTensor], dim: int, buffer: Optional[Tensor] = None, ) -> Tensor: r"""Swaps Tensor slices in-place along dimension `dim`. When passed as Tensors, `i` (and `j`) should be `dim`-dimensional tensors with the same shape as `values.shape[:dim]`. The xception to this rule occurs when `dim=0`, in which case `i` (and `j`) should be (at most) one-dimensional when passed as a Tensor. Args: values: Tensor whose values are to be swapped. i: Indices for slices along dimension `dim`. j: Indices for slices along dimension `dim`. dim: The dimension of `values` along which to swap slices. buffer: Optional buffer used internally to store copied values. Returns: The original `values` tensor. """ dim = values.ndim + dim if dim < 0 else dim if dim and ( (isinstance(i, Tensor) and i.ndim) or (isinstance(j, Tensor) and j.ndim) ): # Handle n-dimensional batches of heterogeneous swaps via linear indexing if isinstance(i, Tensor) and i.shape != values.shape[:dim]: raise ValueError("Batch shapes of `i` and `values` do not match.") if isinstance(j, Tensor) and j.shape != values.shape[:dim]: raise ValueError("Batch shapes of `j` and `values` do not match.") pidx = build_positional_indices( shape=values.shape[: dim + 1], dim=-2, device=values.device ) swap_along_dim_( values.view(-1, *values.shape[dim + 1 :]), i=(pidx + i).view(-1), j=(pidx + j).view(-1), dim=0, buffer=buffer, ) else: # Base cases: homogeneous swaps and 1-dimenensional heterogeneous swaps if isinstance(i, Tensor) and i.ndim > 1: raise ValueError("Tensor `i` must be at most 1-dimensional when `dim=0`.") if isinstance(j, Tensor) and j.ndim > 1: raise ValueError("Tensor `j` must be at most 1-dimensional when `dim=0`.") if dim: ctx = tuple(slice(None) for _ in range(dim)) i = ctx + (i,) j = ctx + (j,) if buffer is None: buffer = values[i].clone() else: buffer.copy_(values[i]) values[i] = values[j] values[j] = buffer return values
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Methods for computing bivariate normal probabilities and statistics. .. [Genz2004bvnt] A. Genz. Numerical computation of rectangular bivariate and trivariate normal and t probabilities. Statistics and Computing, 2004. .. [Muthen1990moments] B. Muthen. Moments of the censored and truncated bivariate normal distribution. British Journal of Mathematical and Statistical Psychology, 1990. """ from __future__ import annotations from math import pi as _pi from typing import Optional, Tuple import torch from botorch.exceptions import UnsupportedError from botorch.utils.probability.utils import ( case_dispatcher, get_constants_like, leggauss, ndtr as Phi, phi, STANDARDIZED_RANGE, ) from botorch.utils.safe_math import ( div as safe_div, exp as safe_exp, mul as safe_mul, sub as safe_sub, ) from torch import Tensor # Some useful constants _inf = float("inf") _2pi = 2 * _pi _sqrt_2pi = _2pi**0.5 _inv_2pi = 1 / _2pi def bvn(r: Tensor, xl: Tensor, yl: Tensor, xu: Tensor, yu: Tensor) -> Tensor: r"""A function for computing bivariate normal probabilities. Calculates `P(xl < x < xu, yl < y < yu)` where `x` and `y` are bivariate normal with unit variance and correlation coefficient `r`. See Section 2.4 of [Genz2004bvnt]_. This method uses a sign flip trick to improve numerical performance. Many of `bvnu`s internal branches rely on evaluations `Phi(-bound)`. For `a < b < 0`, the term `Phi(-a) - Phi(-b)` goes to zero faster than `Phi(b) - Phi(a)` because `finfo(dtype).epsneg` is typically much larger than `finfo(dtype).tiny`. In these cases, flipping the sign can prevent situations where `bvnu(...) - bvnu(...)` would otherwise be zero due to round-off error. Args: r: Tensor of correlation coefficients. xl: Tensor of lower bounds for `x`, same shape as `r`. yl: Tensor of lower bounds for `y`, same shape as `r`. xu: Tensor of upper bounds for `x`, same shape as `r`. yu: Tensor of upper bounds for `y`, same shape as `r`. Returns: Tensor of probabilities `P(xl < x < xu, yl < y < yu)`. """ if not (r.shape == xl.shape == xu.shape == yl.shape == yu.shape): raise UnsupportedError("Arguments to `bvn` must have the same shape.") # Sign flip trick _0, _1, _2 = get_constants_like(values=(0, 1, 2), ref=r) flip_x = xl.abs() > xu # is xl more negative than xu is positive? flip_y = yl.abs() > yu flip = (flip_x & (~flip_y | yu.isinf())) | (flip_y & (~flip_x | xu.isinf())) if flip.any(): # symmetric calls to `bvnu` below makes swapping bounds unnecessary sign = _1 - _2 * flip.to(dtype=r.dtype) xl = sign * xl # becomes `-xu` if flipped xu = sign * xu # becomes `-xl` yl = sign * yl # becomes `-yu` yu = sign * yu # becomes `-yl` p = bvnu(r, xl, yl) - bvnu(r, xu, yl) - bvnu(r, xl, yu) + bvnu(r, xu, yu) return p.clip(_0, _1) def bvnu(r: Tensor, h: Tensor, k: Tensor) -> Tensor: r"""Solves for `P(x > h, y > k)` where `x` and `y` are standard bivariate normal random variables with correlation coefficient `r`. In [Genz2004bvnt]_, this is (1) `L(h, k, r) = P(x < -h, y < -k) \ = 1/(a 2\pi) \int_{h}^{\infty} \int_{k}^{\infty} f(x, y, r) dy dx,` where `f(x, y, r) = e^{-1/(2a^2) (x^2 - 2rxy + y^2)}` and `a = (1 - r^2)^{1/2}`. [Genz2004bvnt]_ report the following integation scheme incurs a maximum of 5e-16 error when run in double precision: if `|r| >= 0.925`, use a 20-point quadrature rule on a 5th order Taylor expansion; else, numerically integrate in polar coordinates using no more than 20 quadrature points. Args: r: Tensor of correlation coefficients. h: Tensor of negative upper bounds for `x`, same shape as `r`. k: Tensor of negative upper bounds for `y`, same shape as `r`. Returns: A tensor of probabilities `P(x > h, y > k)`. """ if not (r.shape == h.shape == k.shape): raise UnsupportedError("Arguments to `bvnu` must have the same shape.") _0, _1, lower, upper = get_constants_like((0, 1) + STANDARDIZED_RANGE, r) x_free = h < lower y_free = k < lower return case_dispatcher( out=torch.empty_like(r), cases=( # Special cases admitting closed-form solutions (lambda: (h > upper) | (k > upper), lambda mask: _0), (lambda: x_free & y_free, lambda mask: _1), (lambda: x_free, lambda mask: Phi(-k[mask])), (lambda: y_free, lambda mask: Phi(-h[mask])), (lambda: r == _0, lambda mask: Phi(-h[mask]) * Phi(-k[mask])), ( # For |r| >= 0.925, use a Taylor approximation lambda: r.abs() >= get_constants_like(0.925, r), lambda m: _bvnu_taylor(r[m], h[m], k[m]), ), ), # For |r| < 0.925, integrate in polar coordinates. default=lambda mask: _bvnu_polar(r[mask], h[mask], k[mask]), ) def _bvnu_polar( r: Tensor, h: Tensor, k: Tensor, num_points: Optional[int] = None ) -> Tensor: r"""Solves for `P(x > h, y > k)` by integrating in polar coordinates as `L(h, k, r) = \Phi(-h)\Phi(-k) + 1/(2\pi) \int_{0}^{sin^{-1}(r)} f(t) dt \ f(t) = e^{-0.5 cos(t)^{-2} (h^2 + k^2 - 2hk sin(t))}` For details, see Section 2.2 of [Genz2004bvnt]_. """ if num_points is None: mar = r.abs().max() num_points = 6 if mar < 0.3 else 12 if mar < 0.75 else 20 _0, _1, _i2, _i2pi = get_constants_like(values=(0, 1, 0.5, _inv_2pi), ref=r) x, w = leggauss(num_points, dtype=r.dtype, device=r.device) x = x + _1 asin_r = _i2 * torch.asin(r) sin_asrx = (asin_r.unsqueeze(-1) * x).sin() _h = h.unsqueeze(-1) _k = k.unsqueeze(-1) vals = safe_exp( safe_sub(safe_mul(sin_asrx, _h * _k), _i2 * (_h.square() + _k.square())) / (_1 - sin_asrx.square()) ) probs = Phi(-h) * Phi(-k) + _i2pi * asin_r * (vals @ w) return probs.clip(min=_0, max=_1) # necessary due to "safe" handling of inf def _bvnu_taylor(r: Tensor, h: Tensor, k: Tensor, num_points: int = 20) -> Tensor: r"""Solves for `P(x > h, y > k)` via Taylor expansion. Per Section 2.3 of [Genz2004bvnt]_, the bvnu equation (1) may be rewritten as `L(h, k, r) = L(h, k, s) - s/(2\pi) \int_{0}^{a} f(x) dx \ f(x) = (1 - x^2){-1/2} e^{-0.5 ((h - sk)/ x)^2} e^{-shk/(1 + (1 - x^2)^{1/2})},` where `s = sign(r)` and `a = sqrt(1 - r^{2})`. The term `L(h, k, s)` is analytic. The second integral is approximated via Taylor expansion. See Sections 2.3 and 2.4 of [Genz2004bvnt]_. """ _0, _1, _ni2, _i2pi, _sq2pi = get_constants_like( values=(0, 1, -0.5, _inv_2pi, _sqrt_2pi), ref=r ) x, w = leggauss(num_points, dtype=r.dtype, device=r.device) x = x + _1 s = get_constants_like(2, r) * (r > _0).to(r) - _1 # sign of `r` where sign(0) := 1 sk = s * k skh = sk * h comp_r2 = _1 - r.square() a = comp_r2.clip(min=0).sqrt() b = safe_sub(h, sk) b2 = b.square() c = get_constants_like(1 / 8, r) * (get_constants_like(4, r) - skh) d = get_constants_like(1 / 80, r) * (get_constants_like(12, r) - skh) # ---- Solve for `L(h, k, s)` int_from_0_to_s = case_dispatcher( out=torch.empty_like(r), cases=[(lambda: r > _0, lambda mask: Phi(-torch.maximum(h[mask], k[mask])))], default=lambda mask: (Phi(sk[mask]) - Phi(h[mask])).clip(min=_0), ) # ---- Solve for `s/(2\pi) \int_{0}^{a} f(x) dx` # Analytic part _a0 = _ni2 * (safe_div(b2, comp_r2) + skh) _a1 = c * get_constants_like(1 / 3, r) * (_1 - d * b2) _a2 = _1 - b2 * _a1 abs_b = b.abs() analytic_part = torch.subtract( # analytic part of solution a * (_a2 + comp_r2 * _a1 + c * d * comp_r2.square()) * safe_exp(_a0), _sq2pi * Phi(safe_div(-abs_b, a)) * abs_b * _a2 * safe_exp(_ni2 * skh), ) # Quadrature part _b2 = b2.unsqueeze(-1) _skh = skh.unsqueeze(-1) _q0 = get_constants_like(0.25, r) * comp_r2.unsqueeze(-1) * x.square() _q1 = (_1 - _q0).sqrt() _q2 = _ni2 * (_b2 / _q0 + _skh) _b2 = b2.unsqueeze(-1) _c = c.unsqueeze(-1) _d = d.unsqueeze(-1) vals = (_ni2 * (_b2 / _q0 + _skh)).exp() * torch.subtract( _1 + _c * _q0 * (_1 + get_constants_like(5, r) * _d * _q0), safe_exp(_ni2 * _q0 / (_1 + _q1).square() * _skh) / _q1, ) mask = _q2 > get_constants_like(-100, r) if not mask.all(): vals[~mask] = _0 quadrature_part = _ni2 * a * (vals @ w) # Return `P(x > h, y > k)` int_from_0_to_a = _i2pi * s * (analytic_part + quadrature_part) return (int_from_0_to_s - int_from_0_to_a).clip(min=_0, max=_1) def bvnmom( r: Tensor, xl: Tensor, yl: Tensor, xu: Tensor, yu: Tensor, p: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor]: r"""Computes the expected values of truncated, bivariate normal random variables. Let `x` and `y` be a pair of standard bivariate normal random variables having correlation `r`. This function computes `E([x,y] \| [xl,yl] < [x,y] < [xu,yu])`. Following [Muthen1990moments]_ equations (4) and (5), we have `E(x \| [xl, yl] < [x, y] < [xu, yu]) \ = Z^{-1} \phi(xl) P(yl < y < yu \| x=xl) - \phi(xu) P(yl < y < yu \| x=xu),` where `Z = P([xl, yl] < [x, y] < [xu, yu])` and `\phi` is the standard normal PDF. Args: r: Tensor of correlation coefficients. xl: Tensor of lower bounds for `x`, same shape as `r`. xu: Tensor of upper bounds for `x`, same shape as `r`. yl: Tensor of lower bounds for `y`, same shape as `r`. yu: Tensor of upper bounds for `y`, same shape as `r`. p: Tensor of probabilities `P(xl < x < xu, yl < y < yu)`, same shape as `r`. Returns: `E(x \| [xl, yl] < [x, y] < [xu, yu])` and `E(y \| [xl, yl] < [x, y] < [xu, yu])`. """ if not (r.shape == xl.shape == xu.shape == yl.shape == yu.shape): raise UnsupportedError("Arguments to `bvn` must have the same shape.") if p is None: p = bvn(r=r, xl=xl, xu=xu, yl=yl, yu=yu) corr = r[..., None, None] istd = (1 - corr.square()).rsqrt() lower = torch.stack([xl, yl], -1) upper = torch.stack([xu, yu], -1) bounds = torch.stack([lower, upper], -1) deltas = safe_mul(corr, bounds) # Compute densities and conditional probabilities density_at_bounds = phi(bounds) prob_given_bounds = Phi( safe_mul(istd, safe_sub(upper.flip(-1).unsqueeze(-1), deltas)) ) - Phi(safe_mul(istd, safe_sub(lower.flip(-1).unsqueeze(-1), deltas))) # Evaluate Muthen's formula p_diffs = -(density_at_bounds * prob_given_bounds).diff().squeeze(-1) moments = (1 / p).unsqueeze(-1) * (p_diffs + r.unsqueeze(-1) * p_diffs.flip(-1)) return moments.unbind(-1)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.utils.multi_objective.hypervolume import Hypervolume, infer_reference_point from botorch.utils.multi_objective.pareto import is_non_dominated from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization __all__ = [ "get_chebyshev_scalarization", "infer_reference_point", "is_non_dominated", "Hypervolume", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import torch from torch import Tensor # maximum tensor size for simple pareto computation MAX_BYTES = 5e6 def is_non_dominated(Y: Tensor, deduplicate: bool = True) -> Tensor: r"""Computes the non-dominated front. Note: this assumes maximization. For small `n`, this method uses a highly parallel methodology that compares all pairs of points in Y. However, this is memory intensive and slow for large `n`. For large `n` (or if Y is larger than 5MB), this method will dispatch to a loop-based approach that is faster and has a lower memory footprint. Args: Y: A `(batch_shape) x n x m`-dim tensor of outcomes. deduplicate: A boolean indicating whether to only return unique points on the pareto frontier. Returns: A `(batch_shape) x n`-dim boolean tensor indicating whether each point is non-dominated. """ n = Y.shape[-2] if n == 0: return torch.zeros(Y.shape[:-1], dtype=torch.bool, device=Y.device) el_size = 64 if Y.dtype == torch.double else 32 if n > 1000 or n**2 * Y.shape[:-2].numel() * el_size / 8 > MAX_BYTES: return _is_non_dominated_loop(Y) Y1 = Y.unsqueeze(-3) Y2 = Y.unsqueeze(-2) dominates = (Y1 >= Y2).all(dim=-1) & (Y1 > Y2).any(dim=-1) nd_mask = ~(dominates.any(dim=-1)) if deduplicate: # remove duplicates # find index of first occurrence of each unique element indices = (Y1 == Y2).all(dim=-1).long().argmax(dim=-1) keep = torch.zeros_like(nd_mask) keep.scatter_(dim=-1, index=indices, value=1.0) return nd_mask & keep return nd_mask def _is_non_dominated_loop(Y: Tensor, maximize: bool = True) -> Tensor: r"""Determine which points are non-dominated. Compared to `is_non_dominated`, this method is significantly faster for large `n` on a CPU and will significant reduce memory overhead. However, `is_non_dominated` is faster for smaller problems. Args: Y: A `(batch_shape) x n x m` Tensor of outcomes. maximize: A boolean indicating if the goal is maximization. Returns: A `(batch_shape) x n`-dim Tensor of booleans indicating whether each point is non-dominated. """ is_efficient = torch.ones(*Y.shape[:-1], dtype=bool, device=Y.device) for i in range(Y.shape[-2]): i_is_efficient = is_efficient[..., i] if i_is_efficient.any(): vals = Y[..., i : i + 1, :] if maximize: update = (Y > vals).any(dim=-1) else: update = (Y < vals).any(dim=-1) # If an element in Y[..., i, :] is efficient, mark it as efficient update[..., i] = i_is_efficient.clone() # Only include batches where Y[..., i, :] is efficient # Create a copy is_efficient2 = is_efficient.clone() if Y.ndim > 2: # Set all elements in all batches where Y[..., i, :] is not # efficient to False is_efficient2[~i_is_efficient] = False # Only include elements from in_efficient from the batches # where Y[..., i, :] is efficient is_efficient[is_efficient2] = update[is_efficient2] return is_efficient
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Helper utilities for constructing scalarizations. References .. [Knowles2005] J. Knowles, "ParEGO: a hybrid algorithm with on-line landscape approximation for expensive multiobjective optimization problems," in IEEE Transactions on Evolutionary Computation, vol. 10, no. 1, pp. 50-66, Feb. 2006. """ from __future__ import annotations from typing import Callable, Optional import torch from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError from botorch.utils.transforms import normalize from torch import Tensor def get_chebyshev_scalarization( weights: Tensor, Y: Tensor, alpha: float = 0.05 ) -> Callable[[Tensor, Optional[Tensor]], Tensor]: r"""Construct an augmented Chebyshev scalarization. The augmented Chebyshev scalarization is given by g(y) = max_i(w_i * y_i) + alpha * sum_i(w_i * y_i) where the goal is to minimize g(y) in the setting where all objectives y_i are to be minimized. Since the default in BoTorch is to maximize all objectives, this method constructs a Chebyshev scalarization where the inputs are first multiplied by -1, so that all objectives are to be minimized. Then, it computes g(y) (which should be minimized), and returns -g(y), which should be maximized. Minimizing an objective is supported by passing a negative weight for that objective. To make all w * y's have the same sign such that they are comparable when computing max(w * y), outcomes of minimization objectives are shifted from [0,1] to [-1,0]. See [Knowles2005]_ for details. This scalarization can be used with qExpectedImprovement to implement q-ParEGO as proposed in [Daulton2020qehvi]_. Args: weights: A `m`-dim tensor of weights. Positive for maximization and negative for minimization. Y: A `n x m`-dim tensor of observed outcomes, which are used for scaling the outcomes to [0,1] or [-1,0]. If `n=0`, then outcomes are left unnormalized. alpha: Parameter governing the influence of the weighted sum term. The default value comes from [Knowles2005]_. Returns: Transform function using the objective weights. Example: >>> weights = torch.tensor([0.75, -0.25]) >>> transform = get_aug_chebyshev_scalarization(weights, Y) """ # the chebyshev_obj assumes all objectives should be minimized, so # multiply Y by -1 Y = -Y if weights.shape != Y.shape[-1:]: raise BotorchTensorDimensionError( "weights must be an `m`-dim tensor where Y is `... x m`." f"Got shapes {weights.shape} and {Y.shape}." ) elif Y.ndim > 2: raise NotImplementedError("Batched Y is not currently supported.") def chebyshev_obj(Y: Tensor, X: Optional[Tensor] = None) -> Tensor: product = weights * Y return product.max(dim=-1).values + alpha * product.sum(dim=-1) # A boolean mask indicating if minimizing an objective minimize = weights < 0 if Y.shape[-2] == 0: if minimize.any(): raise UnsupportedError( "negative weights (for minimization) are only supported if " "Y is provided." ) # If there are no observations, we do not need to normalize the objectives def obj(Y: Tensor, X: Optional[Tensor] = None) -> Tensor: # multiply the scalarization by -1, so that the scalarization should # be maximized return -chebyshev_obj(Y=-Y) return obj if Y.shape[-2] == 1: # If there is only one observation, set the bounds to be # [min(Y_m), min(Y_m) + 1] for each objective m. This ensures we do not # divide by zero Y_bounds = torch.cat([Y, Y + 1], dim=0) else: # Set the bounds to be [min(Y_m), max(Y_m)], for each objective m Y_bounds = torch.stack([Y.min(dim=-2).values, Y.max(dim=-2).values]) def obj(Y: Tensor, X: Optional[Tensor] = None) -> Tensor: # scale to [0,1] Y_normalized = normalize(-Y, bounds=Y_bounds) # If minimizing an objective, convert Y_normalized values to [-1,0], # such that min(w*y) makes sense, we want all w*y's to be positive Y_normalized[..., minimize] = Y_normalized[..., minimize] - 1 # multiply the scalarization by -1, so that the scalarization should # be maximized return -chebyshev_obj(Y=Y_normalized) return obj
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Hypervolume Utilities. References .. [Fonseca2006] C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary Computation, pages 1157-1163, Vancouver, Canada, July 2006. .. [Ishibuchi2011] H. Ishibuchi, N. Akedo, and Y. Nojima. A many-objective test problem for visually examining diversity maintenance behavior in a decision space. Proc. 13th Annual Conf. Genetic Evol. Comput., 2011. """ from __future__ import annotations from typing import List, Optional import torch from botorch.exceptions.errors import BotorchError, BotorchTensorDimensionError from torch import Tensor MIN_Y_RANGE = 1e-7 def infer_reference_point( pareto_Y: Tensor, max_ref_point: Optional[Tensor] = None, scale: float = 0.1, scale_max_ref_point: bool = False, ) -> Tensor: r"""Get reference point for hypervolume computations. This sets the reference point to be `ref_point = nadir - scale * range` when there is no `pareto_Y` that is better than `max_ref_point`. If there's `pareto_Y` better than `max_ref_point`, the reference point will be set to `max_ref_point - scale * range` if `scale_max_ref_point` is true and to `max_ref_point` otherwise. [Ishibuchi2011]_ find 0.1 to be a robust multiplier for scaling the nadir point. Note: this assumes maximization of all objectives. Args: pareto_Y: A `n x m`-dim tensor of Pareto-optimal points. max_ref_point: A `m` dim tensor indicating the maximum reference point. Some elements can be NaN, except when `pareto_Y` is empty, in which case these dimensions will be treated as if no `max_ref_point` was provided and set to `nadir - scale * range`. scale: A multiplier used to scale back the reference point based on the range of each objective. scale_max_ref_point: A boolean indicating whether to apply scaling to the max_ref_point based on the range of each objective. Returns: A `m`-dim tensor containing the reference point. """ if pareto_Y.shape[0] == 0: if max_ref_point is None: raise BotorchError("Empty pareto set and no max ref point provided") if max_ref_point.isnan().any(): raise BotorchError("Empty pareto set and max ref point includes NaN.") if scale_max_ref_point: return max_ref_point - scale * max_ref_point.abs() return max_ref_point if max_ref_point is not None: non_nan_idx = ~max_ref_point.isnan() # Count all points exceeding non-NaN reference point as being better. better_than_ref = (pareto_Y[:, non_nan_idx] > max_ref_point[non_nan_idx]).all( dim=-1 ) else: non_nan_idx = torch.ones( pareto_Y.shape[-1], dtype=torch.bool, device=pareto_Y.device ) better_than_ref = torch.ones( pareto_Y.shape[:1], dtype=torch.bool, device=pareto_Y.device ) if max_ref_point is not None and better_than_ref.any() and non_nan_idx.all(): Y_range = pareto_Y[better_than_ref].max(dim=0).values - max_ref_point if scale_max_ref_point: return max_ref_point - scale * Y_range return max_ref_point elif pareto_Y.shape[0] == 1: # no points better than max_ref_point and only a single observation # subtract MIN_Y_RANGE to handle the case that pareto_Y is a singleton # with objective value of 0. Y_range = pareto_Y.abs().clamp_min(MIN_Y_RANGE).view(-1) ref_point = pareto_Y.view(-1) - scale * Y_range else: # no points better than max_ref_point and multiple observations # make sure that each dimension of the nadir point is no greater than # the max_ref_point nadir = pareto_Y.min(dim=0).values if max_ref_point is not None: nadir[non_nan_idx] = torch.min( nadir[non_nan_idx], max_ref_point[non_nan_idx] ) ideal = pareto_Y.max(dim=0).values # handle case where all values for one objective are the same Y_range = (ideal - nadir).clamp_min(MIN_Y_RANGE) ref_point = nadir - scale * Y_range # Set not-nan indices - if any - to max_ref_point. if non_nan_idx.any() and not non_nan_idx.all() and better_than_ref.any(): if scale_max_ref_point: ref_point[non_nan_idx] = (max_ref_point - scale * Y_range)[non_nan_idx] else: ref_point[non_nan_idx] = max_ref_point[non_nan_idx] return ref_point class Hypervolume: r"""Hypervolume computation dimension sweep algorithm from [Fonseca2006]_. Adapted from Simon Wessing's implementation of the algorithm (Variant 3, Version 1.2) in [Fonseca2006]_ in PyMOO: https://github.com/msu-coinlab/pymoo/blob/master/pymoo/vendor/hv.py Maximization is assumed. TODO: write this in C++ for faster looping. """ def __init__(self, ref_point: Tensor) -> None: r"""Initialize hypervolume object. Args: ref_point: `m`-dim Tensor containing the reference point. """ self.ref_point = ref_point @property def ref_point(self) -> Tensor: r"""Get reference point (for maximization). Returns: A `m`-dim tensor containing the reference point. """ return -self._ref_point @ref_point.setter def ref_point(self, ref_point: Tensor) -> None: r"""Set the reference point for maximization Args: ref_point: A `m`-dim tensor containing the reference point. """ self._ref_point = -ref_point def compute(self, pareto_Y: Tensor) -> float: r"""Compute hypervolume. Args: pareto_Y: A `n x m`-dim tensor of pareto optimal outcomes Returns: The hypervolume. """ if pareto_Y.shape[-1] != self._ref_point.shape[0]: raise BotorchTensorDimensionError( "pareto_Y must have the same number of objectives as ref_point. " f"Got {pareto_Y.shape[-1]}, expected {self._ref_point.shape[0]}." ) elif pareto_Y.ndim != 2: raise BotorchTensorDimensionError( f"pareto_Y must have exactly two dimensions, got {pareto_Y.ndim}." ) # This assumes maximization, but internally flips the sign of the pareto front # and the reference point and computes hypervolume for the minimization problem. pareto_Y = -pareto_Y better_than_ref = (pareto_Y <= self._ref_point).all(dim=-1) pareto_Y = pareto_Y[better_than_ref] # shift the pareto front so that reference point is all zeros pareto_Y = pareto_Y - self._ref_point self._initialize_multilist(pareto_Y) bounds = torch.full_like(self._ref_point, float("-inf")) return self._hv_recursive( i=self._ref_point.shape[0] - 1, n_pareto=pareto_Y.shape[0], bounds=bounds ) def _hv_recursive(self, i: int, n_pareto: int, bounds: Tensor) -> float: r"""Recursive method for hypervolume calculation. This assumes minimization (internally). In contrast to the paper, this code assumes that the reference point is the origin. This enables pruning a few operations. Args: i: objective index n_pareto: number of pareto points bounds: objective bounds Returns: The hypervolume. """ hvol = torch.tensor(0.0, dtype=bounds.dtype, device=bounds.device) sentinel = self.list.sentinel if n_pareto == 0: # base case: one dimension return hvol.item() elif i == 0: # base case: one dimension return -sentinel.next[0].data[0].item() elif i == 1: # two dimensions, end recursion q = sentinel.next[1] h = q.data[0] p = q.next[1] while p is not sentinel: hvol += h * (q.data[1] - p.data[1]) if p.data[0] < h: h = p.data[0] q = p p = q.next[1] hvol += h * q.data[1] return hvol.item() else: p = sentinel q = p.prev[i] while q.data is not None: if q.ignore < i: q.ignore = 0 q = q.prev[i] q = p.prev[i] while n_pareto > 1 and ( q.data[i] > bounds[i] or q.prev[i].data[i] >= bounds[i] ): p = q self.list.remove(p, i, bounds) q = p.prev[i] n_pareto -= 1 q_prev = q.prev[i] if n_pareto > 1: hvol = q_prev.volume[i] + q_prev.area[i] * (q.data[i] - q_prev.data[i]) else: q.area[0] = 1 q.area[1 : i + 1] = q.area[:i] * -(q.data[:i]) q.volume[i] = hvol if q.ignore >= i: q.area[i] = q_prev.area[i] else: q.area[i] = self._hv_recursive(i - 1, n_pareto, bounds) if q.area[i] <= q_prev.area[i]: q.ignore = i while p is not sentinel: p_data = p.data[i] hvol += q.area[i] * (p_data - q.data[i]) bounds[i] = p_data self.list.reinsert(p, i, bounds) n_pareto += 1 q = p p = p.next[i] q.volume[i] = hvol if q.ignore >= i: q.area[i] = q.prev[i].area[i] else: q.area[i] = self._hv_recursive(i - 1, n_pareto, bounds) if q.area[i] <= q.prev[i].area[i]: q.ignore = i hvol -= q.area[i] * q.data[i] return hvol.item() def _initialize_multilist(self, pareto_Y: Tensor) -> None: r"""Sets up the multilist data structure needed for calculation. Note: this assumes minimization. Args: pareto_Y: A `n x m`-dim tensor of pareto optimal objectives. """ m = pareto_Y.shape[-1] nodes = [ Node(m=m, dtype=pareto_Y.dtype, device=pareto_Y.device, data=point) for point in pareto_Y ] self.list = MultiList(m=m, dtype=pareto_Y.dtype, device=pareto_Y.device) for i in range(m): sort_by_dimension(nodes, i) self.list.extend(nodes, i) def sort_by_dimension(nodes: List[Node], i: int) -> None: r"""Sorts the list of nodes in-place by the specified objective. Args: nodes: A list of Nodes i: The index of the objective to sort by """ # build a list of tuples of (point[i], node) decorated = [(node.data[i], index, node) for index, node in enumerate(nodes)] # sort by this value decorated.sort() # write back to original list nodes[:] = [node for (_, _, node) in decorated] class Node: r"""Node in the MultiList data structure.""" def __init__( self, m: int, dtype: torch.dtype, device: torch.device, data: Optional[Tensor] = None, ) -> None: r"""Initialize MultiList. Args: m: The number of objectives dtype: The dtype device: The device data: The tensor data to be stored in this Node. """ self.data = data self.next = [None] * m self.prev = [None] * m self.ignore = 0 self.area = torch.zeros(m, dtype=dtype, device=device) self.volume = torch.zeros_like(self.area) class MultiList: r"""A special data structure used in hypervolume computation. It consists of several doubly linked lists that share common nodes. Every node has multiple predecessors and successors, one in every list. """ def __init__(self, m: int, dtype: torch.dtype, device: torch.device) -> None: r"""Initialize `m` doubly linked lists. Args: m: number of doubly linked lists dtype: the dtype device: the device """ self.m = m self.sentinel = Node(m=m, dtype=dtype, device=device) self.sentinel.next = [self.sentinel] * m self.sentinel.prev = [self.sentinel] * m def append(self, node: Node, index: int) -> None: r"""Appends a node to the end of the list at the given index. Args: node: the new node index: the index where the node should be appended. """ last = self.sentinel.prev[index] node.next[index] = self.sentinel node.prev[index] = last # set the last element as the new one self.sentinel.prev[index] = node last.next[index] = node def extend(self, nodes: List[Node], index: int) -> None: r"""Extends the list at the given index with the nodes. Args: nodes: list of nodes to append at the given index. index: the index where the nodes should be appended. """ for node in nodes: self.append(node=node, index=index) def remove(self, node: Node, index: int, bounds: Tensor) -> Node: r"""Removes and returns 'node' from all lists in [0, 'index']. Args: node: The node to remove index: The upper bound on the range of indices bounds: A `2 x m`-dim tensor bounds on the objectives """ for i in range(index): predecessor = node.prev[i] successor = node.next[i] predecessor.next[i] = successor successor.prev[i] = predecessor bounds.data = torch.min(bounds, node.data) return node def reinsert(self, node: Node, index: int, bounds: Tensor) -> None: r"""Re-inserts the node at its original position. Re-inserts the node at its original position in all lists in [0, 'index'] before it was removed. This method assumes that the next and previous nodes of the node that is reinserted are in the list. Args: node: The node index: The upper bound on the range of indices bounds: A `2 x m`-dim tensor bounds on the objectives """ for i in range(index): node.prev[i].next[i] = node node.next[i].prev[i] = node bounds.data = torch.min(bounds, node.data)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Box decomposition algorithms. References .. [Lacour17] R. Lacour, K. Klamroth, C. Fonseca. A box decomposition algorithm to compute the hypervolume indicator. Computers & Operations Research, Volume 79, 2017. """ from __future__ import annotations from abc import ABC, abstractmethod from typing import Optional import torch from botorch.exceptions.errors import BotorchError from botorch.utils.multi_objective.box_decompositions.utils import ( _expand_ref_point, _pad_batch_pareto_frontier, update_local_upper_bounds_incremental, ) from botorch.utils.multi_objective.pareto import is_non_dominated from torch import Tensor from torch.nn import Module class BoxDecomposition(Module, ABC): r"""An abstract class for box decompositions. Note: Internally, we store the negative reference point (minimization). :meta private: """ def __init__( self, ref_point: Tensor, sort: bool, Y: Optional[Tensor] = None ) -> None: """Initialize BoxDecomposition. Args: ref_point: A `m`-dim tensor containing the reference point. sort: A boolean indicating whether to sort the Pareto frontier. Y: A `(batch_shape) x n x m`-dim tensor of outcomes. """ super().__init__() self._neg_ref_point = -ref_point self.sort = torch.tensor(sort, dtype=torch.bool) self.num_outcomes = ref_point.shape[-1] self.register_buffer("hypercell_bounds", None) if Y is not None: if Y.isnan().any(): raise ValueError( "NaN inputs are not supported. Got Y with " f"{Y.isnan().sum()} NaN values." ) self._neg_Y = -Y self._validate_inputs() self._neg_pareto_Y = self._compute_pareto_Y() self.partition_space() else: self._neg_Y = None self._neg_pareto_Y = None @property def pareto_Y(self) -> Tensor: r"""This returns the non-dominated set. Returns: A `n_pareto x m`-dim tensor of outcomes. """ if self._neg_pareto_Y is not None: return -self._neg_pareto_Y raise BotorchError("pareto_Y has not been initialized") @property def ref_point(self) -> Tensor: r"""Get the reference point. Returns: A `m`-dim tensor of outcomes. """ return -self._neg_ref_point @property def Y(self) -> Tensor: r"""Get the raw outcomes. Returns: A `n x m`-dim tensor of outcomes. """ if self._neg_Y is not None: return -self._neg_Y raise BotorchError("Y data has not been initialized") def _compute_pareto_Y(self) -> Tensor: if self._neg_Y is None: raise BotorchError("Y data has not been initialized") # is_non_dominated assumes maximization if self._neg_Y.shape[-2] == 0: return self._neg_Y # assumes maximization pareto_Y = -_pad_batch_pareto_frontier( Y=self.Y, ref_point=_expand_ref_point( ref_point=self.ref_point, batch_shape=self.batch_shape ), ) if not self.sort: return pareto_Y # sort by first objective if len(self.batch_shape) > 0: pareto_Y = pareto_Y.gather( index=torch.argsort(pareto_Y[..., :1], dim=-2).expand(pareto_Y.shape), dim=-2, ) else: pareto_Y = pareto_Y[torch.argsort(pareto_Y[:, 0])] return pareto_Y def _reset_pareto_Y(self) -> bool: r"""Update the non-dominated front. Returns: A boolean indicating whether the Pareto frontier has changed. """ pareto_Y = self._compute_pareto_Y() if (self._neg_pareto_Y is None) or not torch.equal( pareto_Y, self._neg_pareto_Y ): self._neg_pareto_Y = pareto_Y return True return False def partition_space(self) -> None: r"""Compute box decomposition.""" if self.num_outcomes == 2: try: self._partition_space_2d() except NotImplementedError: self._partition_space() else: self._partition_space() def _partition_space_2d(self) -> None: r"""Compute box decomposition for 2 objectives.""" raise NotImplementedError @abstractmethod def _partition_space(self) -> None: r"""Partition the non-dominated space into disjoint hypercells. This method supports an arbitrary number of outcomes, but is less efficient than `partition_space_2d` for the 2-outcome case. """ @abstractmethod def get_hypercell_bounds(self) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Returns: A `2 x num_cells x num_outcomes`-dim tensor containing the lower and upper vertices bounding each hypercell. """ def _update_neg_Y(self, Y: Tensor) -> bool: r"""Update the set of outcomes. Returns: A boolean indicating if _neg_Y was initialized. """ if Y.isnan().any(): raise ValueError( "NaN inputs are not supported. Got Y with " f"{Y.isnan().sum()} NaN values." ) # multiply by -1, since internally we minimize. if self._neg_Y is not None: self._neg_Y = torch.cat([self._neg_Y, -Y], dim=-2) return False self._neg_Y = -Y return True def update(self, Y: Tensor) -> None: r"""Update non-dominated front and decomposition. By default, the partitioning is recomputed. Subclasses can override this functionality. Args: Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes. """ self._update_neg_Y(Y=Y) self.reset() def _validate_inputs(self) -> None: self.batch_shape = self.Y.shape[:-2] self.num_outcomes = self.Y.shape[-1] if len(self.batch_shape) > 1: raise NotImplementedError( f"{type(self).__name__} only supports a single " f"batch dimension, but got {len(self.batch_shape)} " "batch dimensions." ) elif len(self.batch_shape) > 0 and self.num_outcomes > 2: raise NotImplementedError( f"{type(self).__name__} only supports a batched box " f"decompositions in the 2-objective setting." ) def reset(self) -> None: r"""Reset non-dominated front and decomposition.""" self._validate_inputs() is_new_pareto = self._reset_pareto_Y() # Update decomposition if the Pareto front changed if is_new_pareto: self.partition_space() @abstractmethod def _compute_hypervolume_if_y_has_data(self) -> Tensor: """Compute hypervolume for the case that there is data in self._neg_pareto_Y.""" def compute_hypervolume(self) -> Tensor: r"""Compute hypervolume that is dominated by the Pareto Froniter. Returns: A `(batch_shape)`-dim tensor containing the hypervolume dominated by each Pareto frontier. """ if self._neg_pareto_Y is None: return torch.tensor(0.0) if self._neg_pareto_Y.shape[-2] == 0: return torch.zeros( self._neg_pareto_Y.shape[:-2], dtype=self._neg_pareto_Y.dtype, device=self._neg_pareto_Y.device, ) return self._compute_hypervolume_if_y_has_data() class FastPartitioning(BoxDecomposition, ABC): r"""A class for partitioning the (non-)dominated space into hyper-cells. Note: this assumes maximization. Internally, it multiplies outcomes by -1 and performs the decomposition under minimization. This class is abstract to support to two applications of Alg 1 from [Lacour17]_: 1) partitioning the space that is dominated by the Pareto frontier and 2) partitioning the space that is not dominated by the Pareto frontier. :meta private: """ def __init__( self, ref_point: Tensor, Y: Optional[Tensor] = None, ) -> None: """ Args: ref_point: A `m`-dim tensor containing the reference point. Y: A `(batch_shape) x n x m`-dim tensor """ super().__init__(ref_point=ref_point, Y=Y, sort=ref_point.shape[-1] == 2) def update(self, Y: Tensor) -> None: r"""Update non-dominated front and decomposition. Args: Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes. """ if self._update_neg_Y(Y=Y): self.reset() else: if self.num_outcomes == 2 or self._neg_pareto_Y.shape[-2] == 0: # If there are two objective, recompute the box decomposition # because the partitions can be computed analytically. # If the current pareto set has no points, recompute the box # decomposition. self.reset() else: # only include points that are better than the reference point better_than_ref = (Y > self.ref_point).all(dim=-1) Y = Y[better_than_ref] Y_all = torch.cat([self._neg_pareto_Y, -Y], dim=-2) pareto_mask = is_non_dominated(-Y_all) # determine the number of points in Y that are Pareto optimal num_new_pareto = pareto_mask[-Y.shape[-2] :].sum() self._neg_pareto_Y = Y_all[pareto_mask] if num_new_pareto > 0: # update local upper bounds for the minimization problem self._U, self._Z = update_local_upper_bounds_incremental( # this assumes minimization new_pareto_Y=self._neg_pareto_Y[-num_new_pareto:], U=self._U, Z=self._Z, ) # use the negative local upper bounds as the new pareto # frontier for the minimization problem and perform # box decomposition on dominated space. self._get_partitioning() @abstractmethod def _get_single_cell(self) -> None: r"""Set the partitioning to be a single cell in the case of no Pareto points. This method should set self.hypercell_bounds """ pass # pragma: no cover def partition_space(self) -> None: if self._neg_pareto_Y.shape[-2] == 0: self._get_single_cell() else: super().partition_space() def _partition_space(self): r"""Partition the non-dominated space into disjoint hypercells. This method supports an arbitrary number of outcomes, but is less efficient than `partition_space_2d` for the 2-outcome case. """ if len(self.batch_shape) > 0: # this could be triggered when m=2 outcomes and # BoxDecomposition._partition_space_2d is not overridden. raise NotImplementedError( "_partition_space does not support batch dimensions." ) # this assumes minimization # initialize local upper bounds self.register_buffer("_U", self._neg_ref_point.unsqueeze(-2).clone()) # initialize defining points to be the dummy points \hat{z} that are # defined in Sec 2.1 in [Lacour17]_. Note that in [Lacour17]_, outcomes # are assumed to be between [0,1], so they used 0 rather than -inf. self._Z = torch.zeros( 1, self.num_outcomes, self.num_outcomes, dtype=self.Y.dtype, device=self.Y.device, ) for j in range(self.ref_point.shape[-1]): # use ref point for maximization as the ideal point for minimization. self._Z[0, j] = float("-inf") self._Z[0, j, j] = self._U[0, j] # incrementally update local upper bounds and defining points # for each new Pareto point self._U, self._Z = update_local_upper_bounds_incremental( new_pareto_Y=self._neg_pareto_Y, U=self._U, Z=self._Z, ) self._get_partitioning() @abstractmethod def _get_partitioning(self) -> None: r"""Compute partitioning given local upper bounds for the minimization problem. This method should set self.hypercell_bounds """ pass # pragma: no cover def get_hypercell_bounds(self) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Returns: A `2 x (batch_shape) x num_cells x m`-dim tensor containing the lower and upper vertices bounding each hypercell. """ return self.hypercell_bounds
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.utils.multi_objective.box_decompositions.box_decomposition_list import ( # noqa E501 BoxDecompositionList, ) from botorch.utils.multi_objective.box_decompositions.dominated import ( DominatedPartitioning, ) from botorch.utils.multi_objective.box_decompositions.non_dominated import ( FastNondominatedPartitioning, NondominatedPartitioning, ) from botorch.utils.multi_objective.box_decompositions.utils import ( compute_dominated_hypercell_bounds_2d, compute_non_dominated_hypercell_bounds_2d, ) __all__ = [ "compute_dominated_hypercell_bounds_2d", "compute_non_dominated_hypercell_bounds_2d", "BoxDecompositionList", "DominatedPartitioning", "FastNondominatedPartitioning", "NondominatedPartitioning", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Utilities for box decomposition algorithms.""" from typing import Optional, Tuple import torch from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError from botorch.utils.multi_objective.pareto import is_non_dominated from torch import Size, Tensor def _expand_ref_point(ref_point: Tensor, batch_shape: Size) -> Tensor: r"""Expand reference point to the proper batch_shape. Args: ref_point: A `(batch_shape) x m`-dim tensor containing the reference point. batch_shape: The batch shape. Returns: A `batch_shape x m`-dim tensor containing the expanded reference point """ if ref_point.shape[:-1] != batch_shape: if ref_point.ndim > 1: raise BotorchTensorDimensionError( "Expected ref_point to be a `batch_shape x m` or `m`-dim tensor, " f"but got {ref_point.shape}." ) ref_point = ref_point.view( *(1 for _ in batch_shape), ref_point.shape[-1] ).expand(batch_shape + ref_point.shape[-1:]) return ref_point def _pad_batch_pareto_frontier( Y: Tensor, ref_point: Tensor, is_pareto: bool = False, feasibility_mask: Optional[Tensor] = None, ) -> Tensor: r"""Get a batch Pareto frontier by padding the pareto frontier with repeated points. This assumes maximization. Args: Y: A `(batch_shape) x n x m`-dim tensor of points ref_point: a `(batch_shape) x m`-dim tensor containing the reference point is_pareto: a boolean indicating whether the points in Y are already non-dominated. feasibility_mask: A `(batch_shape) x n`-dim tensor of booleans indicating whether each point is feasible. Returns: A `(batch_shape) x max_num_pareto x m`-dim tensor of padded Pareto frontiers. """ tkwargs = {"dtype": Y.dtype, "device": Y.device} ref_point = ref_point.unsqueeze(-2) batch_shape = Y.shape[:-2] if len(batch_shape) > 1: raise UnsupportedError( "_pad_batch_pareto_frontier only supports a single " f"batch dimension, but got {len(batch_shape)} " "batch dimensions." ) if feasibility_mask is not None: # set infeasible points to be the reference point (corresponding to the batch) Y = torch.where(feasibility_mask.unsqueeze(-1), Y, ref_point) if not is_pareto: pareto_mask = is_non_dominated(Y) else: pareto_mask = torch.ones(Y.shape[:-1], dtype=torch.bool, device=Y.device) better_than_ref = (Y > ref_point).all(dim=-1) # is_non_dominated assumes maximization # TODO: filter out points that are worse than the reference point first here pareto_mask = pareto_mask & better_than_ref if len(batch_shape) == 0: return Y[pareto_mask] # Note: in the batch case, the Pareto frontier is padded by repeating # a Pareto point. This ensures that the padded box-decomposition has # the same number of points, which enables fast batch operations. max_n_pareto = pareto_mask.sum(dim=-1).max().item() pareto_Y = torch.empty(*batch_shape, max_n_pareto, Y.shape[-1], **tkwargs) for i, pareto_i in enumerate(pareto_mask): pareto_i = Y[i, pareto_mask[i]] n_pareto = pareto_i.shape[0] if n_pareto > 0: pareto_Y[i, :n_pareto] = pareto_i # pad pareto_Y, so that all batches have the same size Pareto set pareto_Y[i, n_pareto:] = pareto_i[-1] else: # if there are no pareto points in this batch, use the reference # point pareto_Y[i, :] = ref_point[i] return pareto_Y def compute_local_upper_bounds( U: Tensor, Z: Tensor, z: Tensor ) -> Tuple[Tensor, Tensor]: r"""Compute local upper bounds. Note: this assumes minimization. This uses the incremental algorithm (Alg. 1) from [Lacour17]_. Args: U: A `n x m`-dim tensor containing the local upper bounds. Z: A `n x m x m`-dim tensor containing the defining points. z: A `m`-dim tensor containing the new point. Returns: 2-element tuple containing: - A new `n' x m`-dim tensor local upper bounds. - A `n' x m x m`-dim tensor containing the defining points. """ num_outcomes = U.shape[-1] z_dominates_U = (U > z).all(dim=-1) # Select upper bounds that are dominated by z. # These are the search zones that contain z. if not z_dominates_U.any(): return U, Z A = U[z_dominates_U] A_Z = Z[z_dominates_U] P = [] P_Z = [] mask = torch.ones(num_outcomes, dtype=torch.bool, device=U.device) for j in range(num_outcomes): mask[j] = 0 z_uj_max = A_Z[:, mask, j].max(dim=-1).values.view(-1) add_z = z[j] >= z_uj_max if add_z.any(): u_j = A[add_z].clone() u_j[:, j] = z[j] P.append(u_j) A_Z_filtered = A_Z[add_z] Z_ku = A_Z_filtered[:, mask] lt_zj = Z_ku[..., j] <= z[j] P_uj = torch.zeros( u_j.shape[0], num_outcomes, num_outcomes, dtype=U.dtype, device=U.device ) P_uj[:, mask] = Z_ku[lt_zj].view(P_uj.shape[0], num_outcomes - 1, -1) P_uj[:, ~mask] = z P_Z.append(P_uj) mask[j] = 1 # filter out elements of U that are in A not_z_dominates_U = ~z_dominates_U U = U[not_z_dominates_U] # remaining indices Z = Z[not_z_dominates_U] if len(P) > 0: # add points from P_Z Z = torch.cat([Z, *P_Z], dim=0) # return elements in P or elements in (U that are not in A) U = torch.cat([U, *P], dim=-2) return U, Z def get_partition_bounds(Z: Tensor, U: Tensor, ref_point: Tensor) -> Tensor: r"""Get the cell bounds given the local upper bounds and the defining points. This implements Equation 2 in [Lacour17]_. Args: Z: A `n x m x m`-dim tensor containing the defining points. The first dimension corresponds to u_idx, the second dimension corresponds to j, and Z[u_idx, j] is the set of definining points Z^j(u) where u = U[u_idx]. U: A `n x m`-dim tensor containing the local upper bounds. ref_point: A `m`-dim tensor containing the reference point. Returns: A `2 x num_cells x m`-dim tensor containing the lower and upper vertices bounding each hypercell. """ bounds = torch.empty(2, U.shape[0], U.shape[-1], dtype=U.dtype, device=U.device) for u_idx in range(U.shape[0]): # z_1^1(u) bounds[0, u_idx, 0] = Z[u_idx, 0, 0] # z_1^r(u) bounds[1, u_idx, 0] = ref_point[0] for j in range(1, U.shape[-1]): bounds[0, u_idx, j] = Z[u_idx, :j, j].max() bounds[1, u_idx, j] = U[u_idx, j] # remove empty partitions # Note: the equality will evaluate as True if the lower and upper bound # are both (-inf), which could happen if the reference point is -inf. empty = (bounds[1] <= bounds[0]).any(dim=-1) return bounds[:, ~empty] def update_local_upper_bounds_incremental( new_pareto_Y: Tensor, U: Tensor, Z: Tensor ) -> Tuple[Tensor, Tensor]: r"""Update the current local upper with the new pareto points. This assumes minimization. Args: new_pareto_Y: A `n x m`-dim tensor containing the new Pareto points. U: A `n' x m`-dim tensor containing the local upper bounds. Z: A `n x m x m`-dim tensor containing the defining points. Returns: 2-element tuple containing: - A new `n' x m`-dim tensor local upper bounds. - A `n' x m x m`-dim tensor containing the defining points """ for i in range(new_pareto_Y.shape[-2]): U, Z = compute_local_upper_bounds(U=U, Z=Z, z=new_pareto_Y[i]) return U, Z def compute_non_dominated_hypercell_bounds_2d( pareto_Y_sorted: Tensor, ref_point: Tensor ) -> Tensor: r"""Compute an axis-aligned partitioning of the non-dominated space for 2 objectives. Args: pareto_Y_sorted: A `(batch_shape) x n_pareto x 2`-dim tensor of pareto outcomes that are sorted by the 0th dimension in increasing order. All points must be better than the reference point. ref_point: A `(batch_shape) x 2`-dim reference point. Returns: A `2 x (batch_shape) x n_pareto + 1 x m`-dim tensor of cell bounds. """ # add boundary point to each front # the boundary point is the extreme value in each outcome # (a single coordinate of reference point) batch_shape = pareto_Y_sorted.shape[:-2] if ref_point.ndim == pareto_Y_sorted.ndim - 1: expanded_boundary_point = ref_point.unsqueeze(-2) else: view_shape = torch.Size([1] * len(batch_shape)) + torch.Size([1, 2]) expanded_shape = batch_shape + torch.Size([1, 2]) expanded_boundary_point = ref_point.view(view_shape).expand(expanded_shape) # add the points (ref, y) and (x, ref) to the corresponding ends pareto_Y_sorted0, pareto_Y_sorted1 = torch.split(pareto_Y_sorted, 1, dim=-1) expanded_boundary_point0, expanded_boundary_point1 = torch.split( expanded_boundary_point, 1, dim=-1 ) left_end = torch.cat( [expanded_boundary_point0[..., :1, :], pareto_Y_sorted1[..., :1, :]], dim=-1 ) right_end = torch.cat( [pareto_Y_sorted0[..., -1:, :], expanded_boundary_point1[..., :1, :]], dim=-1 ) front = torch.cat([left_end, pareto_Y_sorted, right_end], dim=-2) # The top left corners of axis-aligned rectangles in dominated partitioning. # These are the bottom left corners of the non-dominated partitioning front0, front1 = torch.split(front, 1, dim=-1) bottom_lefts = torch.cat([front0[..., :-1, :], front1[..., 1:, :]], dim=-1) top_right_xs = torch.cat( [ front0[..., 1:-1, :], torch.full( bottom_lefts.shape[:-2] + torch.Size([1, 1]), float("inf"), dtype=front.dtype, device=front.device, ), ], dim=-2, ) top_rights = torch.cat( [ top_right_xs, torch.full( bottom_lefts.shape[:-1] + torch.Size([1]), float("inf"), dtype=front.dtype, device=front.device, ), ], dim=-1, ) return torch.stack([bottom_lefts, top_rights], dim=0) def compute_dominated_hypercell_bounds_2d( pareto_Y_sorted: Tensor, ref_point: Tensor ) -> Tensor: r"""Compute an axis-aligned partitioning of the dominated space for 2-objectives. Args: pareto_Y_sorted: A `(batch_shape) x n_pareto x 2`-dim tensor of pareto outcomes that are sorted by the 0th dimension in increasing order. ref_point: A `2`-dim reference point. Returns: A `2 x (batch_shape) x n_pareto x m`-dim tensor of cell bounds. """ # add boundary point to each front # the boundary point is the extreme value in each outcome # (a single coordinate of reference point) batch_shape = pareto_Y_sorted.shape[:-2] if ref_point.ndim == pareto_Y_sorted.ndim - 1: expanded_boundary_point = ref_point.unsqueeze(-2) else: view_shape = torch.Size([1] * len(batch_shape)) + torch.Size([1, 2]) expanded_shape = batch_shape + torch.Size([1, 2]) expanded_boundary_point = ref_point.view(view_shape).expand(expanded_shape) # add the points (ref, y) and (x, ref) to the corresponding ends pareto_Y_sorted0, pareto_Y_sorted1 = torch.split(pareto_Y_sorted, 1, dim=-1) expanded_boundary_point0, expanded_boundary_point1 = torch.split( expanded_boundary_point, 1, dim=-1 ) left_end = torch.cat( [expanded_boundary_point0[..., :1, :], pareto_Y_sorted0[..., :1, :]], dim=-1 ) right_end = torch.cat( [pareto_Y_sorted1[..., :1, :], expanded_boundary_point1[..., :1, :]], dim=-1 ) front = torch.cat([left_end, pareto_Y_sorted, right_end], dim=-2) # compute hypervolume by summing rectangles from min_x -> max_x top_rights = front[..., 1:-1, :] bottom_lefts = torch.cat( [ front[..., :-2, :1], expanded_boundary_point1.expand(*top_rights.shape[:-1], 1), ], dim=-1, ) return torch.stack([bottom_lefts, top_rights], dim=0)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Algorithms for partitioning the dominated space into hyperrectangles.""" from __future__ import annotations from botorch.utils.multi_objective.box_decompositions.box_decomposition import ( FastPartitioning, ) from botorch.utils.multi_objective.box_decompositions.utils import ( compute_dominated_hypercell_bounds_2d, get_partition_bounds, ) from torch import Tensor class DominatedPartitioning(FastPartitioning): r"""Partition dominated space into axis-aligned hyperrectangles. This uses the Algorithm 1 from [Lacour17]_. Example: >>> bd = DominatedPartitioning(ref_point, Y) """ def _partition_space_2d(self) -> None: r"""Partition the non-dominated space into disjoint hypercells. This direct method works for `m=2` outcomes. """ cell_bounds = compute_dominated_hypercell_bounds_2d( # flip self.pareto_Y because it is sorted in decreasing order (since # self._pareto_Y was sorted in increasing order and we multiplied by -1) pareto_Y_sorted=self.pareto_Y.flip(-2), ref_point=self.ref_point, ) self.hypercell_bounds = cell_bounds def _get_partitioning(self) -> None: r"""Get the bounds of each hypercell in the decomposition.""" minimization_cell_bounds = get_partition_bounds( Z=self._Z, U=self._U, ref_point=self._neg_ref_point.view(-1) ) cell_bounds = -minimization_cell_bounds.flip(0) self.hypercell_bounds = cell_bounds def _compute_hypervolume_if_y_has_data(self) -> Tensor: r"""Compute hypervolume that is dominated by the Pareto Frontier. Returns: A `(batch_shape)`-dim tensor containing the hypervolume dominated by each Pareto frontier. """ return ( (self.hypercell_bounds[1] - self.hypercell_bounds[0]) .prod(dim=-1) .sum(dim=-1) ) def _get_single_cell(self) -> None: r"""Set the partitioning to be a single cell in the case of no Pareto points.""" # Set lower and upper bounds to be the reference point to define an empty cell cell_bounds = self.ref_point.expand( 2, *self._neg_pareto_Y.shape[:-2], 1, self.num_outcomes ).clone() self.hypercell_bounds = cell_bounds
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Box decomposition container.""" from __future__ import annotations from typing import List, Union import torch from botorch.exceptions.errors import BotorchTensorDimensionError from botorch.utils.multi_objective.box_decompositions.box_decomposition import ( BoxDecomposition, ) from torch import Tensor from torch.nn import Module, ModuleList class BoxDecompositionList(Module): r"""A list of box decompositions.""" def __init__(self, *box_decompositions: BoxDecomposition) -> None: r"""Initialize the box decomposition list. Args: *box_decompositions: An variable number of box decompositions Example: >>> bd1 = FastNondominatedPartitioning(ref_point, Y=Y1) >>> bd2 = FastNondominatedPartitioning(ref_point, Y=Y2) >>> bd = BoxDecompositionList(bd1, bd2) """ super().__init__() self.box_decompositions = ModuleList(box_decompositions) @property def pareto_Y(self) -> List[Tensor]: r"""This returns the non-dominated set. Note: Internally, we store the negative pareto set (minimization). Returns: A list where the ith element is the `n_pareto_i x m`-dim tensor of pareto optimal outcomes for each box_decomposition `i`. """ return [p.pareto_Y for p in self.box_decompositions] @property def ref_point(self) -> Tensor: r"""Get the reference point. Note: Internally, we store the negative reference point (minimization). Returns: A `n_box_decompositions x m`-dim tensor of outcomes. """ return torch.stack([p.ref_point for p in self.box_decompositions], dim=0) def get_hypercell_bounds(self) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Returns: A `2 x n_box_decompositions x num_cells x num_outcomes`-dim tensor containing the lower and upper vertices bounding each hypercell. """ bounds_list = [] max_num_cells = 0 for p in self.box_decompositions: bounds = p.get_hypercell_bounds() max_num_cells = max(max_num_cells, bounds.shape[-2]) bounds_list.append(bounds) # pad the decomposition with empty cells so that all # decompositions have the same number of cells for i, bounds in enumerate(bounds_list): num_missing = max_num_cells - bounds.shape[-2] if num_missing > 0: padding = torch.zeros( 2, num_missing, bounds.shape[-1], dtype=bounds.dtype, device=bounds.device, ) bounds_list[i] = torch.cat( [ bounds, padding, ], dim=-2, ) return torch.stack(bounds_list, dim=-3) def update(self, Y: Union[List[Tensor], Tensor]) -> None: r"""Update the partitioning. Args: Y: A `n_box_decompositions x n x num_outcomes`-dim tensor or a list where the ith element contains the new points for box_decomposition `i`. """ if ( torch.is_tensor(Y) and Y.ndim != 3 and Y.shape[0] != len(self.box_decompositions) ) or (isinstance(Y, List) and len(Y) != len(self.box_decompositions)): raise BotorchTensorDimensionError( "BoxDecompositionList.update requires either a batched tensor Y, " "with one batch per box decomposition or a list of tensors with " "one element per box decomposition." ) for i, p in enumerate(self.box_decompositions): p.update(Y[i]) def compute_hypervolume(self) -> Tensor: r"""Compute hypervolume that is dominated by the Pareto Froniter. Returns: A `(batch_shape)`-dim tensor containing the hypervolume dominated by each Pareto frontier. """ return torch.stack( [p.compute_hypervolume() for p in self.box_decompositions], dim=0 )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Algorithms for partitioning the non-dominated space into rectangles. References .. [Couckuyt2012] I. Couckuyt, D. Deschrijver and T. Dhaene, "Towards Efficient Multiobjective Optimization: Multiobjective statistical criterions," 2012 IEEE Congress on Evolutionary Computation, Brisbane, QLD, 2012, pp. 1-8. """ from __future__ import annotations from typing import Optional import torch from botorch.utils.multi_objective.box_decompositions.box_decomposition import ( BoxDecomposition, FastPartitioning, ) from botorch.utils.multi_objective.box_decompositions.utils import ( _expand_ref_point, compute_non_dominated_hypercell_bounds_2d, get_partition_bounds, update_local_upper_bounds_incremental, ) from torch import Tensor class NondominatedPartitioning(BoxDecomposition): r"""A class for partitioning the non-dominated space into hyper-cells. Note: this assumes maximization. Internally, it multiplies outcomes by -1 and performs the decomposition under minimization. TODO: use maximization internally as well. Note: it is only feasible to use this algorithm to compute an exact decomposition of the non-dominated space for `m<5` objectives (alpha=0.0). The alpha parameter can be increased to obtain an approximate partitioning faster. The `alpha` is a fraction of the total hypervolume encapsuling the entire Pareto set. When a hypercell's volume divided by the total hypervolume is less than `alpha`, we discard the hypercell. See Figure 2 in [Couckuyt2012]_ for a visual representation. This PyTorch implementation of the binary partitioning algorithm ([Couckuyt2012]_) is adapted from numpy/tensorflow implementation at: https://github.com/GPflow/GPflowOpt/blob/master/gpflowopt/pareto.py. TODO: replace this with a more efficient decomposition. E.g. https://link.springer.com/content/pdf/10.1007/s10898-019-00798-7.pdf """ def __init__( self, ref_point: Tensor, Y: Optional[Tensor] = None, alpha: float = 0.0, ) -> None: """Initialize NondominatedPartitioning. Args: ref_point: A `m`-dim tensor containing the reference point. Y: A `(batch_shape) x n x m`-dim tensor. alpha: A thresold fraction of total volume used in an approximate decomposition. Example: >>> bd = NondominatedPartitioning(ref_point, Y=Y1) """ self.alpha = alpha super().__init__(ref_point=ref_point, sort=True, Y=Y) def _partition_space(self) -> None: r"""Partition the non-dominated space into disjoint hypercells. This method supports an arbitrary number of outcomes, but is less efficient than `partition_space_2d` for the 2-outcome case. """ # The binary parititoning algorithm uses indices the augmented Pareto front. # n_pareto + 2 x m aug_pareto_Y_idcs = self._get_augmented_pareto_front_indices() # Initialize one cell over entire pareto front cell = torch.zeros( 2, self.num_outcomes, dtype=torch.long, device=self._neg_Y.device ) cell[1] = aug_pareto_Y_idcs.shape[0] - 1 stack = [cell] # hypercells contains the indices of the (augmented) Pareto front # that specify that bounds of the each hypercell. # It is a `2 x num_cells x m`-dim tensor self.hypercells = torch.empty( 2, 0, self.num_outcomes, dtype=torch.long, device=self._neg_Y.device ) outcome_idxr = torch.arange( self.num_outcomes, dtype=torch.long, device=self._neg_Y.device ) # edge case: empty pareto set # use a single cell if self._neg_pareto_Y.shape[-2] == 0: # 2 x m cell_bounds_pareto_idcs = aug_pareto_Y_idcs[cell, outcome_idxr] self.hypercells = torch.cat( [self.hypercells, cell_bounds_pareto_idcs.unsqueeze(1)], dim=1 ) else: # Extend Pareto front with the ideal and anti-ideal point ideal_point = self._neg_pareto_Y.min(dim=0, keepdim=True).values - 1 anti_ideal_point = self._neg_pareto_Y.max(dim=0, keepdim=True).values + 1 # `n_pareto + 2 x m` aug_pareto_Y = torch.cat( [ideal_point, self._neg_pareto_Y, anti_ideal_point], dim=0 ) total_volume = (anti_ideal_point - ideal_point).prod() # Use binary partitioning while len(stack) > 0: # The following 3 tensors are all `2 x m` cell = stack.pop() cell_bounds_pareto_idcs = aug_pareto_Y_idcs[cell, outcome_idxr] cell_bounds_pareto_values = aug_pareto_Y[ cell_bounds_pareto_idcs, outcome_idxr ] # Check cell bounds # - if cell upper bound is better than Pareto front on all outcomes: # - accept the cell # - elif cell lower bound is better than Pareto front on all outcomes: # - this means the cell overlaps the Pareto front. Divide the cell # along its longest edge. if ( (cell_bounds_pareto_values[1] <= self._neg_pareto_Y) .any(dim=1) .all() ): # Cell is entirely non-dominated self.hypercells = torch.cat( [self.hypercells, cell_bounds_pareto_idcs.unsqueeze(1)], dim=1 ) elif ( (cell_bounds_pareto_values[0] <= self._neg_pareto_Y) .any(dim=1) .all() ): # The cell overlaps the pareto front # compute the distance (in integer indices) # This has shape `m` idx_dist = cell[1] - cell[0] any_not_adjacent = (idx_dist > 1).any() cell_volume = ( (cell_bounds_pareto_values[1] - cell_bounds_pareto_values[0]) .prod(dim=-1) .item() ) # Only divide a cell when it is not composed of adjacent indices # and the fraction of total volume is above the approximation # threshold fraction if ( any_not_adjacent and ((cell_volume / total_volume) > self.alpha).all() ): # Divide the test cell over its largest dimension # largest (by index length) length, longest_dim = torch.max(idx_dist, dim=0) length = length.item() longest_dim = longest_dim.item() new_length1 = int(round(length / 2.0)) new_length2 = length - new_length1 # Store divided cells # cell 1: subtract new_length1 from the upper bound of the cell # cell 2: add new_length2 to the lower bound of the cell for bound_idx, length_delta in ( (1, -new_length1), (0, new_length2), ): new_cell = cell.clone() new_cell[bound_idx, longest_dim] += length_delta stack.append(new_cell) def _partition_space_2d(self) -> None: r"""Partition the non-dominated space into disjoint hypercells. This direct method works for `m=2` outcomes. """ pf_ext_idx = self._get_augmented_pareto_front_indices() n_pf_plus_1 = self._neg_pareto_Y.shape[-2] + 1 view_shape = torch.Size([1] * len(self.batch_shape) + [n_pf_plus_1]) expand_shape = self.batch_shape + torch.Size([n_pf_plus_1]) range_pf_plus1 = torch.arange( n_pf_plus_1, dtype=torch.long, device=self._neg_pareto_Y.device ) range_pf_plus1_expanded = range_pf_plus1.view(view_shape).expand(expand_shape) lower = torch.stack( [range_pf_plus1_expanded, torch.zeros_like(range_pf_plus1_expanded)], dim=-1 ) upper = torch.stack( [1 + range_pf_plus1_expanded, pf_ext_idx[..., -range_pf_plus1 - 1, -1]], dim=-1, ) # 2 x batch_shape x n_cells x 2 self.hypercells = torch.stack([lower, upper], dim=0) def _get_augmented_pareto_front_indices(self) -> Tensor: r"""Get indices of augmented Pareto front.""" pf_idx = torch.argsort(self._neg_pareto_Y, dim=-2) return torch.cat( [ torch.zeros( *self.batch_shape, 1, self.num_outcomes, dtype=torch.long, device=self._neg_Y.device, ), # Add 1 because index zero is used for the ideal point pf_idx + 1, torch.full( torch.Size( [ *self.batch_shape, 1, self.num_outcomes, ] ), self._neg_pareto_Y.shape[-2] + 1, dtype=torch.long, device=self._neg_Y.device, ), ], dim=-2, ) def get_hypercell_bounds(self) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Args: ref_point: A `(batch_shape) x m`-dim tensor containing the reference point. Returns: A `2 x num_cells x m`-dim tensor containing the lower and upper vertices bounding each hypercell. """ ref_point = _expand_ref_point( ref_point=self.ref_point, batch_shape=self.batch_shape ) aug_pareto_Y = torch.cat( [ # -inf is the lower bound of the non-dominated space torch.full( torch.Size( [ *self.batch_shape, 1, self.num_outcomes, ] ), float("-inf"), dtype=self._neg_pareto_Y.dtype, device=self._neg_pareto_Y.device, ), self._neg_pareto_Y, # note: internally, this class minimizes, so use negative here -(ref_point.unsqueeze(-2)), ], dim=-2, ) minimization_cell_bounds = self._get_hypercell_bounds(aug_pareto_Y=aug_pareto_Y) # swap upper and lower bounds and multiply by -1 return -minimization_cell_bounds.flip(0) def _get_hypercell_bounds(self, aug_pareto_Y: Tensor) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Args: aug_pareto_Y: A `n_pareto + 2 x m`-dim tensor containing the augmented Pareto front. Returns: A `2 x (batch_shape) x num_cells x m`-dim tensor containing the lower and upper vertices bounding each hypercell. """ num_cells = self.hypercells.shape[-2] cells_times_outcomes = num_cells * self.num_outcomes outcome_idxr = ( torch.arange(self.num_outcomes, dtype=torch.long, device=self._neg_Y.device) .repeat(num_cells) .view( *(1 for _ in self.hypercells.shape[:-2]), cells_times_outcomes, ) .expand(*self.hypercells.shape[:-2], cells_times_outcomes) ) # this tensor is 2 x (num_cells * m) x 2 # the batch dim corresponds to lower/upper bound cell_bounds_idxr = torch.stack( [ self.hypercells.view(*self.hypercells.shape[:-2], -1), outcome_idxr, ], dim=-1, ).view(2, -1, 2) if len(self.batch_shape) > 0: # TODO: support multiple batch dimensions here batch_idxr = ( torch.arange( self.batch_shape[0], dtype=torch.long, device=self._neg_Y.device ) .unsqueeze(1) .expand(-1, cells_times_outcomes) .reshape(1, -1, 1) .expand(2, -1, 1) ) cell_bounds_idxr = torch.cat([batch_idxr, cell_bounds_idxr], dim=-1) cell_bounds_values = aug_pareto_Y[ cell_bounds_idxr.chunk(cell_bounds_idxr.shape[-1], dim=-1) ] view_shape = (2, *self.batch_shape, num_cells, self.num_outcomes) return cell_bounds_values.view(view_shape) def _compute_hypervolume_if_y_has_data(self) -> Tensor: ref_point = _expand_ref_point( ref_point=self.ref_point, batch_shape=self.batch_shape ) # internally we minimize ref_point = -ref_point.unsqueeze(-2) ideal_point = self._neg_pareto_Y.min(dim=-2, keepdim=True).values aug_pareto_Y = torch.cat([ideal_point, self._neg_pareto_Y, ref_point], dim=-2) cell_bounds_values = self._get_hypercell_bounds(aug_pareto_Y=aug_pareto_Y) total_volume = (ref_point - ideal_point).squeeze(-2).prod(dim=-1) non_dom_volume = ( (cell_bounds_values[1] - cell_bounds_values[0]).prod(dim=-1).sum(dim=-1) ) return total_volume - non_dom_volume class FastNondominatedPartitioning(FastPartitioning): r"""A class for partitioning the non-dominated space into hyper-cells. Note: this assumes maximization. Internally, it multiplies by -1 and performs the decomposition under minimization. This class is far more efficient than NondominatedPartitioning for exact box partitionings This class uses the two-step approach similar to that in [Yang2019]_, where: a) first, Alg 1 from [Lacour17]_ is used to find the local lower bounds for the maximization problem b) second, the local lower bounds are used as the Pareto frontier for the minimization problem, and [Lacour17]_ is applied again to partition the space dominated by that Pareto frontier. """ def __init__( self, ref_point: Tensor, Y: Optional[Tensor] = None, ) -> None: """Initialize FastNondominatedPartitioning. Args: ref_point: A `m`-dim tensor containing the reference point. Y: A `(batch_shape) x n x m`-dim tensor. Example: >>> bd = FastNondominatedPartitioning(ref_point, Y=Y1) """ super().__init__(ref_point=ref_point, Y=Y) def _get_single_cell(self) -> None: r"""Set the partitioning to be a single cell in the case of no Pareto points.""" cell_bounds = torch.full( (2, *self._neg_pareto_Y.shape[:-2], 1, self.num_outcomes), float("inf"), dtype=self._neg_pareto_Y.dtype, device=self._neg_pareto_Y.device, ) cell_bounds[0] = self.ref_point self.hypercell_bounds = cell_bounds def _get_partitioning(self) -> None: r"""Compute non-dominated partitioning. Given local upper bounds for the minimization problem (self._U), this computes the non-dominated partitioning for the maximization problem. Note that -self.U contains the local lower bounds for the maximization problem. Following [Yang2019]_, this treats -self.U as a *new* pareto frontier for a minimization problem with a reference point of [infinity]^m and computes a dominated partitioning for this minimization problem. """ new_ref_point = torch.full( torch.Size([1]) + self._neg_ref_point.shape, float("inf"), dtype=self._neg_ref_point.dtype, device=self._neg_ref_point.device, ) # initialize local upper bounds for the second minimization problem self._U2 = new_ref_point # initialize defining points for the second minimization problem # use ref point for maximization as the ideal point for minimization. self._Z2 = self.ref_point.expand( 1, self.num_outcomes, self.num_outcomes ).clone() for j in range(self._neg_ref_point.shape[-1]): self._Z2[0, j, j] = self._U2[0, j] # incrementally update local upper bounds and defining points # for each new Pareto point self._U2, self._Z2 = update_local_upper_bounds_incremental( new_pareto_Y=-self._U, U=self._U2, Z=self._Z2, ) cell_bounds = get_partition_bounds( Z=self._Z2, U=self._U2, ref_point=new_ref_point.view(-1) ) self.hypercell_bounds = cell_bounds def _partition_space_2d(self) -> None: r"""Partition the non-dominated space into disjoint hypercells. This direct method works for `m=2` outcomes. """ cell_bounds = compute_non_dominated_hypercell_bounds_2d( pareto_Y_sorted=self.pareto_Y.flip(-2), ref_point=self.ref_point, ) self.hypercell_bounds = cell_bounds def _compute_hypervolume_if_y_has_data(self) -> Tensor: ideal_point = self.pareto_Y.max(dim=-2, keepdim=True).values total_volume = ( (ideal_point.squeeze(-2) - self.ref_point).clamp_min(0.0).prod(dim=-1) ) finite_cell_bounds = torch.min(self.hypercell_bounds, ideal_point) non_dom_volume = ( (finite_cell_bounds[1] - finite_cell_bounds[0]) .clamp_min(0.0) .prod(dim=-1) .sum(dim=-1) ) return total_volume - non_dom_volume
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" References .. [Zhe2019hogp] S. Zhe, W. Xing, and R. M. Kirby. Scalable high-order gaussian process regression. Proceedings of Machine Learning Research, volume 89, Apr 2019. """ from __future__ import annotations import warnings from contextlib import ExitStack from typing import Any, List, Optional, Tuple, Union import torch from botorch.acquisition.objective import PosteriorTransform from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel from botorch.models.model import FantasizeMixin from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform, Standardize from botorch.models.utils import gpt_posterior_settings from botorch.models.utils.gpytorch_modules import ( get_gaussian_likelihood_with_gamma_prior, ) from botorch.posteriors import ( GPyTorchPosterior, HigherOrderGPPosterior, TransformedPosterior, ) from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import Kernel, MaternKernel from gpytorch.likelihoods import Likelihood from gpytorch.models import ExactGP from gpytorch.priors.torch_priors import GammaPrior, MultivariateNormalPrior from gpytorch.settings import fast_pred_var, skip_posterior_variances from linear_operator.operators import ( BatchRepeatLinearOperator, DiagLinearOperator, KroneckerProductLinearOperator, LinearOperator, ZeroLinearOperator, ) from linear_operator.settings import _fast_solves from torch import Tensor from torch.nn import ModuleList, Parameter, ParameterList class FlattenedStandardize(Standardize): r""" Standardize outcomes in a structured multi-output settings by reshaping the batched output dimensions to be a vector. Specifically, an output dimension of [a x b x c] will be squeezed to be a vector of [a * b * c]. """ def __init__( self, output_shape: torch.Size, batch_shape: torch.Size = None, min_stdv: float = 1e-8, ): r""" Args: output_shape: A `n x output_shape`-dim tensor of training targets. batch_shape: The batch_shape of the training targets. min_stddv: The minimum standard deviation for which to perform standardization (if lower, only de-mean the data). """ if batch_shape is None: batch_shape = torch.Size() super(FlattenedStandardize, self).__init__( m=1, outputs=None, batch_shape=batch_shape, min_stdv=min_stdv ) self.output_shape = output_shape self.batch_shape = batch_shape def _squeeze_to_single_output(self, tsr: Tensor) -> Tensor: dim_ct = tsr.ndim - len(self.output_shape) - 1 return tsr.reshape(*tsr.shape[:dim_ct], -1, 1) def _return_to_output_shape(self, tsr: Tensor) -> Tensor: out = tsr.reshape(*tsr.shape[:-2], -1, *self.output_shape) return out def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: Y = self._squeeze_to_single_output(Y) if Yvar is not None: Yvar = self._squeeze_to_single_output(Yvar) Y, Yvar = super().forward(Y, Yvar) Y_out = self._return_to_output_shape(Y) if Yvar is not None: Yvar_out = self._return_to_output_shape(Yvar) else: Yvar_out = None return Y_out, Yvar_out def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: Y = self._squeeze_to_single_output(Y) if Yvar is not None: Yvar = self._squeeze_to_single_output(Yvar) Y, Yvar = super().untransform(Y, Yvar) Y = self._return_to_output_shape(Y) if Yvar is not None: Yvar = self._return_to_output_shape(Yvar) return Y, Yvar def untransform_posterior( self, posterior: HigherOrderGPPosterior ) -> TransformedPosterior: # TODO: return a HigherOrderGPPosterior once rescaling constant # muls * LinearOperators won't force a dense decomposition rather than a # Kronecker structured one. return TransformedPosterior( posterior=posterior, sample_transform=lambda s: self._return_to_output_shape( self.means + self.stdvs * self._squeeze_to_single_output(s) ), mean_transform=lambda m, v: self._return_to_output_shape( self.means + self.stdvs * self._squeeze_to_single_output(m) ), variance_transform=lambda m, v: self._return_to_output_shape( self._stdvs_sq * self._squeeze_to_single_output(v) ), ) class HigherOrderGP(BatchedMultiOutputGPyTorchModel, ExactGP, FantasizeMixin): r""" A model for high-dimensional output regression. As described in [Zhe2019hogp]_. “Higher-order” means that the predictions are matrices (tensors) with at least two dimensions, such as images or grids of images, or measurements taken from a region of at least two dimensions. The posterior uses Matheron's rule [Doucet2010sampl]_ as described in [Maddox2021bohdo]_. `HigherOrderGP` differs from a "vector” multi-output model in that it uses Kronecker algebra to obtain parsimonious covariance matrices for these outputs (see `KroneckerMultiTaskGP` for more information). For example, imagine a 10 x 20 x 30 grid of images. If we were to vectorize the resulting 6,000 data points in order to use them in a non-higher-order GP, they would have a 6,000 x 6,000 covariance matrix, with 36 million entries. The Kronecker structure allows representing this as a product of 10x10, 20x20, and 30x30 covariance matrices, with only 1,400 entries. NOTE: This model requires the use of specialized Kronecker solves in linear operator, which are disabled by default in BoTorch. These are enabled by default in the `HigherOrderGP.posterior` call. However, they need to be manually enabled by the user during model fitting. Example: >>> from linear_operator.settings import _fast_solves >>> model = SingleTaskGP(train_X, train_Y) >>> mll = ExactMarginalLogLikelihood(model.likelihood, model) >>> with _fast_solves(True): >>> fit_gpytorch_mll_torch(mll) >>> samples = model.posterior(test_X).rsample() """ def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_modules: Optional[List[Kernel]] = None, num_latent_dims: Optional[List[int]] = None, learn_latent_pars: bool = True, latent_init: str = "default", outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ): r""" Args: train_X: A `batch_shape x n x d`-dim tensor of training inputs. train_Y: A `batch_shape x n x output_shape`-dim tensor of training targets. likelihood: Gaussian likelihood for the model. covar_modules: List of kernels for each output structure. num_latent_dims: Sizes for the latent dimensions. learn_latent_pars: If true, learn the latent parameters. latent_init: [default or gp] how to initialize the latent parameters. """ if input_transform is not None: input_transform.to(train_X) # infer the dimension of `output_shape`. num_output_dims = train_Y.dim() - train_X.dim() + 1 batch_shape = train_X.shape[:-2] if len(batch_shape) > 1: raise NotImplementedError( "HigherOrderGP currently only supports 1-dim `batch_shape`." ) if outcome_transform is not None: if isinstance(outcome_transform, Standardize) and not isinstance( outcome_transform, FlattenedStandardize ): warnings.warn( "HigherOrderGP does not support the outcome_transform " "`Standardize`! Using `FlattenedStandardize` with `output_shape=" f"{train_Y.shape[- num_output_dims:]} and batch_shape=" f"{batch_shape} instead.", RuntimeWarning, ) outcome_transform = FlattenedStandardize( output_shape=train_Y.shape[-num_output_dims:], batch_shape=batch_shape, ) train_Y, _ = outcome_transform(train_Y) self._aug_batch_shape = batch_shape self._num_dimensions = num_output_dims + 1 self._num_outputs = train_Y.shape[0] if batch_shape else 1 self.target_shape = train_Y.shape[-num_output_dims:] self._input_batch_shape = batch_shape if likelihood is None: likelihood = get_gaussian_likelihood_with_gamma_prior( batch_shape=self._aug_batch_shape ) else: self._is_custom_likelihood = True super().__init__( train_X, train_Y.view(*self._aug_batch_shape, -1), likelihood=likelihood, ) if covar_modules is not None: self.covar_modules = ModuleList(covar_modules) else: self.covar_modules = ModuleList( [ MaternKernel( nu=2.5, lengthscale_prior=GammaPrior(3.0, 6.0), batch_shape=self._aug_batch_shape, ard_num_dims=1 if dim > 0 else train_X.shape[-1], ) for dim in range(self._num_dimensions) ] ) if num_latent_dims is None: num_latent_dims = [1] * (self._num_dimensions - 1) self.to(train_X) self._initialize_latents( latent_init=latent_init, num_latent_dims=num_latent_dims, learn_latent_pars=learn_latent_pars, device=train_Y.device, dtype=train_Y.dtype, ) if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform def _initialize_latents( self, latent_init: str, num_latent_dims: List[int], learn_latent_pars: bool, device: torch.device, dtype: torch.dtype, ): self.latent_parameters = ParameterList() if latent_init == "default": for dim_num in range(len(self.covar_modules) - 1): self.latent_parameters.append( Parameter( torch.rand( *self._aug_batch_shape, self.target_shape[dim_num], num_latent_dims[dim_num], device=device, dtype=dtype, ), requires_grad=learn_latent_pars, ) ) elif latent_init == "gp": for dim_num, covar in enumerate(self.covar_modules[1:]): latent_covar = covar( torch.linspace( 0.0, 1.0, self.target_shape[dim_num], device=device, dtype=dtype, ) ).add_jitter(1e-4) latent_dist = MultivariateNormal( torch.zeros( *self._aug_batch_shape, self.target_shape[dim_num], device=device, dtype=dtype, ), latent_covar, ) sample_shape = torch.Size((num_latent_dims[dim_num],)) latent_sample = latent_dist.sample(sample_shape=sample_shape) latent_sample = latent_sample.reshape( *self._aug_batch_shape, self.target_shape[dim_num], num_latent_dims[dim_num], ) self.latent_parameters.append( Parameter( latent_sample, requires_grad=learn_latent_pars, ) ) self.register_prior( "latent_parameters_" + str(dim_num), MultivariateNormalPrior( latent_dist.loc, latent_dist.covariance_matrix.detach().clone(), transform=lambda x: x.squeeze(-1), ), lambda module, dim_num=dim_num: self.latent_parameters[dim_num], ) def forward(self, X: Tensor) -> MultivariateNormal: if self.training: X = self.transform_inputs(X) covariance_list = [] covariance_list.append(self.covar_modules[0](X)) for cm, param in zip(self.covar_modules[1:], self.latent_parameters): if not self.training: with torch.no_grad(): covariance_list.append(cm(param)) else: covariance_list.append(cm(param)) # check batch_shapes if covariance_list[0].batch_shape != covariance_list[1].batch_shape: for i in range(1, len(covariance_list)): cm = covariance_list[i] covariance_list[i] = BatchRepeatLinearOperator( cm, covariance_list[0].batch_shape ) kronecker_covariance = KroneckerProductLinearOperator(*covariance_list) # TODO: expand options for the mean module via batch shaping? mean = torch.zeros( *covariance_list[0].batch_shape, kronecker_covariance.shape[-1], device=kronecker_covariance.device, dtype=kronecker_covariance.dtype, ) return MultivariateNormal(mean, kronecker_covariance) def get_fantasy_model(self, inputs, targets, **kwargs): # we need to squeeze the targets in order to preserve the shaping inputs_batch_dims = len(inputs.shape[:-2]) target_shape = (*inputs.shape[:-2], -1) if (inputs_batch_dims + self._num_dimensions) < targets.ndim: target_shape = (targets.shape[0], *target_shape) reshaped_targets = targets.view(*target_shape) return super().get_fantasy_model(inputs, reshaped_targets, **kwargs) def condition_on_observations( self, X: Tensor, Y: Tensor, **kwargs: Any ) -> HigherOrderGP: r"""Condition the model on new observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `m` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). Y: A `batch_shape' x n' x m_d`-dim Tensor, where `m_d` is the shaping of the model outputs, `n'` is the number of points per batch, and `batch_shape'` is the batch shape of the observations. `batch_shape'` must be broadcastable to `batch_shape` using standard broadcasting semantics. If `Y` has fewer batch dimensions than `X`, its is assumed that the missing batch dimensions are the same for all `Y`. Returns: A `BatchedMultiOutputGPyTorchModel` object of the same type with `n + n'` training examples, representing the original model conditioned on the new observations `(X, Y)` (and possibly noise observations passed in via kwargs). """ noise = kwargs.get("noise") if hasattr(self, "outcome_transform"): # we need to apply transforms before shifting batch indices around Y, noise = self.outcome_transform(Y, noise) self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False) # we don't need to do un-squeezing because Y already is batched # we don't support fixed noise here yet # if noise is not None: # kwargs.update({"noise": noise}) fantasy_model = super( BatchedMultiOutputGPyTorchModel, self ).condition_on_observations(X=X, Y=Y, **kwargs) fantasy_model._input_batch_shape = fantasy_model.train_targets.shape[ : (-1 if self._num_outputs == 1 else -2) ] fantasy_model._aug_batch_shape = fantasy_model.train_targets.shape[:-1] return fantasy_model def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> GPyTorchPosterior: self.eval() # make sure we're calling a posterior if posterior_transform is not None: # this could be very costly, disallow for now raise NotImplementedError( "Posterior transforms currently not supported for " f"{self.__class__.__name__}" ) # input transforms are applied at `posterior` in `eval` mode, and at # `model.forward()` at the training time X = self.transform_inputs(X) no_pred_variance = skip_posterior_variances._state with ExitStack() as es: es.enter_context(gpt_posterior_settings()) es.enter_context(fast_pred_var(True)) es.enter_context(_fast_solves(True)) # we need to skip posterior variances here es.enter_context(skip_posterior_variances(True)) mvn = self(X) if observation_noise is not False: # TODO: ensure that this still works for structured noise solves. mvn = self.likelihood(mvn, X) # lazy covariance matrix includes the interpolated version of the full # covariance matrix so we can actually grab that instead. if X.ndimension() > self.train_inputs[0].ndimension(): X_batch_shape = X.shape[:-2] train_inputs = self.train_inputs[0].reshape( *[1] * len(X_batch_shape), *self.train_inputs[0].shape ) train_inputs = train_inputs.repeat( *X_batch_shape, *[1] * self.train_inputs[0].ndimension() ) else: train_inputs = self.train_inputs[0] # we now compute the data covariances for the training data, the testing # data, the joint covariances, and the test train cross-covariance train_train_covar = self.prediction_strategy.lik_train_train_covar.detach() base_train_train_covar = train_train_covar.linear_op data_train_covar = base_train_train_covar.linear_ops[0] data_covar = self.covar_modules[0] data_train_test_covar = data_covar(X, train_inputs) data_test_test_covar = data_covar(X) data_joint_covar = data_train_covar.cat_rows( cross_mat=data_train_test_covar, new_mat=data_test_test_covar, ) # we detach the latents so that they don't cause gradient errors # TODO: Can we enable backprop through the latent covariances? batch_shape = data_train_test_covar.batch_shape latent_covar_list = [] for latent_covar in base_train_train_covar.linear_ops[1:]: if latent_covar.batch_shape != batch_shape: latent_covar = BatchRepeatLinearOperator(latent_covar, batch_shape) latent_covar_list.append(latent_covar.detach()) joint_covar = KroneckerProductLinearOperator( data_joint_covar, *latent_covar_list ) test_train_covar = KroneckerProductLinearOperator( data_train_test_covar, *latent_covar_list ) # compute the posterior variance if necessary if no_pred_variance: pred_variance = mvn.variance else: pred_variance = self.make_posterior_variances(joint_covar) # mean and variance get reshaped into the target shape new_mean = mvn.mean.reshape(*X.shape[:-1], *self.target_shape) if not no_pred_variance: new_variance = pred_variance.reshape(*X.shape[:-1], *self.target_shape) new_variance = DiagLinearOperator(new_variance) else: new_variance = ZeroLinearOperator( *X.shape[:-1], *self.target_shape, self.target_shape[-1] ) mvn = MultivariateNormal(new_mean, new_variance) # return a specialized Posterior to allow for sampling # cloning the full covar allows backpropagation through it posterior = HigherOrderGPPosterior( distribution=mvn, train_targets=self.train_targets.unsqueeze(-1), train_train_covar=train_train_covar, test_train_covar=test_train_covar, joint_covariance_matrix=joint_covar.clone(), output_shape=X.shape[:-1] + self.target_shape, num_outputs=self._num_outputs, ) if hasattr(self, "outcome_transform"): posterior = self.outcome_transform.untransform_posterior(posterior) return posterior def make_posterior_variances( self, joint_covariance_matrix: LinearOperator ) -> Tensor: r""" Computes the posterior variances given the data points X. As currently implemented, it computes another forwards call with the stacked data to get out the joint covariance across all data points. """ # TODO: use the exposed joint covariances from the prediction strategy data_joint_covariance = joint_covariance_matrix.linear_ops[0].evaluate_kernel() num_train = self.train_inputs[0].shape[-2] test_train_covar = data_joint_covariance[..., num_train:, :num_train] train_train_covar = data_joint_covariance[..., :num_train, :num_train] test_test_covar = data_joint_covariance[..., num_train:, num_train:] jcm_linops = joint_covariance_matrix.linear_ops[1:] full_train_train_covar = KroneckerProductLinearOperator( train_train_covar, *jcm_linops ) full_test_test_covar = KroneckerProductLinearOperator( test_test_covar, *jcm_linops ) full_test_train_covar_tuple = (test_train_covar,) + jcm_linops train_evals, train_evecs = full_train_train_covar.eigh() # (\kron \Lambda_i + \sigma^2 I)^{-1} train_inv_evals = DiagLinearOperator( 1.0 / (train_evals + self.likelihood.noise) ) # compute K_i S_i \hadamard K_i S_i test_train_hadamard = KroneckerProductLinearOperator( *[ lt1.matmul(lt2).to_dense() ** 2 for lt1, lt2 in zip(full_test_train_covar_tuple, train_evecs.linear_ops) ] ) # and compute the column sums of # (\kron K_i S_i * K_i S_i) \tilde{\Lambda}^{-1} test_train_pred_covar = test_train_hadamard.matmul(train_inv_evals).sum(dim=-1) pred_variances = full_test_test_covar.diagonal() - test_train_pred_covar return pred_variances
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Multi-task Gaussian Process Regression models with fully Bayesian inference. """ from typing import Any, Dict, List, Mapping, NoReturn, Optional, Tuple import pyro import torch from botorch.acquisition.objective import PosteriorTransform from botorch.models.fully_bayesian import ( matern52_kernel, MIN_INFERRED_NOISE_LEVEL, PyroModel, reshape_and_detach, SaasPyroModel, ) from botorch.models.multitask import MultiTaskGP from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from botorch.posteriors.fully_bayesian import FullyBayesianPosterior, MCMC_DIM from botorch.utils.datasets import SupervisedDataset from gpytorch.distributions.multivariate_normal import MultivariateNormal from gpytorch.kernels import MaternKernel from gpytorch.kernels.kernel import Kernel from gpytorch.likelihoods.likelihood import Likelihood from gpytorch.means.mean import Mean from torch import Tensor from torch.nn.parameter import Parameter class MultitaskSaasPyroModel(SaasPyroModel): r""" Implementation of the multi-task sparse axis-aligned subspace priors (SAAS) model. The multi-task model uses an ICM kernel. The data kernel is same as the single task SAAS model in order to handle high-dimensional parameter spaces. The task kernel is a Matern-5/2 kernel using learned task embeddings as the input. """ def set_inputs( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor], task_feature: int, task_rank: Optional[int] = None, ): """Set the training data. Args: train_X: Training inputs (n x (d + 1)) train_Y: Training targets (n x 1) train_Yvar: Observed noise variance (n x 1). If None, we infer the noise. Note that the inferred noise is common across all tasks. task_feature: The index of the task feature (`-d <= task_feature <= d`). task_rank: The num of learned task embeddings to be used in the task kernel. If omitted, set it to be 1. """ super().set_inputs(train_X, train_Y, train_Yvar) # obtain a list of task indicies all_tasks = train_X[:, task_feature].unique().to(dtype=torch.long).tolist() self.task_feature = task_feature self.num_tasks = len(all_tasks) self.task_rank = task_rank or 1 # assume there is one column for task feature self.ard_num_dims = self.train_X.shape[-1] - 1 def sample(self) -> None: r"""Sample from the SAAS model. This samples the mean, noise variance, outputscale, and lengthscales according to the SAAS prior. """ tkwargs = {"dtype": self.train_X.dtype, "device": self.train_X.device} base_idxr = torch.arange(self.ard_num_dims, **{"device": tkwargs["device"]}) base_idxr[self.task_feature :] += 1 # exclude task feature task_indices = self.train_X[..., self.task_feature].to( device=tkwargs["device"], dtype=torch.long ) outputscale = self.sample_outputscale(concentration=2.0, rate=0.15, **tkwargs) mean = self.sample_mean(**tkwargs) noise = self.sample_noise(**tkwargs) lengthscale = self.sample_lengthscale(dim=self.ard_num_dims, **tkwargs) K = matern52_kernel(X=self.train_X[..., base_idxr], lengthscale=lengthscale) # compute task covar matrix task_latent_features = self.sample_latent_features(**tkwargs)[task_indices] task_lengthscale = self.sample_task_lengthscale(**tkwargs) task_covar = matern52_kernel( X=task_latent_features, lengthscale=task_lengthscale ) K = K.mul(task_covar) K = outputscale * K + noise * torch.eye(self.train_X.shape[0], **tkwargs) pyro.sample( "Y", pyro.distributions.MultivariateNormal( loc=mean.view(-1).expand(self.train_X.shape[0]), covariance_matrix=K, ), obs=self.train_Y.squeeze(-1), ) def sample_latent_features(self, **tkwargs: Any): return pyro.sample( "latent_features", pyro.distributions.Normal( torch.tensor(0.0, **tkwargs), torch.tensor(1.0, **tkwargs), ).expand(torch.Size([self.num_tasks, self.task_rank])), ) def sample_task_lengthscale( self, concentration: float = 6.0, rate: float = 3.0, **tkwargs: Any ): return pyro.sample( "task_lengthscale", pyro.distributions.Gamma( torch.tensor(concentration, **tkwargs), torch.tensor(rate, **tkwargs), ).expand(torch.Size([self.task_rank])), ) def load_mcmc_samples( self, mcmc_samples: Dict[str, Tensor] ) -> Tuple[Mean, Kernel, Likelihood, Kernel, Parameter]: r"""Load the MCMC samples into the mean_module, covar_module, and likelihood.""" tkwargs = {"device": self.train_X.device, "dtype": self.train_X.dtype} num_mcmc_samples = len(mcmc_samples["mean"]) batch_shape = torch.Size([num_mcmc_samples]) mean_module, covar_module, likelihood = super().load_mcmc_samples( mcmc_samples=mcmc_samples ) task_covar_module = MaternKernel( nu=2.5, ard_num_dims=self.task_rank, batch_shape=batch_shape, ).to(**tkwargs) task_covar_module.lengthscale = reshape_and_detach( target=task_covar_module.lengthscale, new_value=mcmc_samples["task_lengthscale"], ) latent_features = Parameter( torch.rand( batch_shape + torch.Size([self.num_tasks, self.task_rank]), requires_grad=True, **tkwargs, ) ) latent_features = reshape_and_detach( target=latent_features, new_value=mcmc_samples["latent_features"], ) return mean_module, covar_module, likelihood, task_covar_module, latent_features class SaasFullyBayesianMultiTaskGP(MultiTaskGP): r"""A fully Bayesian multi-task GP model with the SAAS prior. This model assumes that the inputs have been normalized to [0, 1]^d and that the output has been stratified standardized to have zero mean and unit variance for each task.The SAAS model [Eriksson2021saasbo]_ with a Matern-5/2 is used as data kernel by default. You are expected to use `fit_fully_bayesian_model_nuts` to fit this model as it isn't compatible with `fit_gpytorch_model`. Example: >>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2) >>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1) >>> train_X = torch.cat([ >>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1), >>> ]) >>> train_Y = torch.cat(f1(X1), f2(X2)).unsqueeze(-1) >>> train_Yvar = 0.01 * torch.ones_like(train_Y) >>> mtsaas_gp = SaasFullyBayesianFixedNoiseMultiTaskGP( >>> train_X, train_Y, train_Yvar, task_feature=-1, >>> ) >>> fit_fully_bayesian_model_nuts(mtsaas_gp) >>> posterior = mtsaas_gp.posterior(test_X) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor], task_feature: int, output_tasks: Optional[List[int]] = None, rank: Optional[int] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, pyro_model: Optional[PyroModel] = None, ) -> None: r"""Initialize the fully Bayesian multi-task GP model. Args: train_X: Training inputs (n x (d + 1)) train_Y: Training targets (n x 1) train_Yvar: Observed noise variance (n x 1). If None, we infer the noise. Note that the inferred noise is common across all tasks. task_feature: The index of the task feature (`-d <= task_feature <= d`). output_tasks: A list of task indices for which to compute model outputs for. If omitted, return outputs for all task indices. rank: The num of learned task embeddings to be used in the task kernel. If omitted, set it to be 1. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied to the inputs `X` in the model's forward pass. pyro_model: Optional `PyroModel`, defaults to `MultitaskSaasPyroModel`. """ if not ( train_X.ndim == train_Y.ndim == 2 and len(train_X) == len(train_Y) and train_Y.shape[-1] == 1 ): raise ValueError( "Expected train_X to have shape n x d and train_Y to have shape n x 1" ) if train_Yvar is not None and train_Y.shape != train_Yvar.shape: raise ValueError( "Expected train_Yvar to be None or have the same shape as train_Y" ) with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) if train_Yvar is not None: # Clamp after transforming train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL) super().__init__( train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar, task_feature=task_feature, output_tasks=output_tasks, ) self.to(train_X) self.mean_module = None self.covar_module = None self.likelihood = None self.task_covar_module = None self.register_buffer("latent_features", None) if pyro_model is None: pyro_model = MultitaskSaasPyroModel() pyro_model.set_inputs( train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar, task_feature=task_feature, task_rank=rank, ) self.pyro_model = pyro_model if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform def train(self, mode: bool = True) -> None: r"""Puts the model in `train` mode.""" super().train(mode=mode) if mode: self.mean_module = None self.covar_module = None self.likelihood = None self.task_covar_module = None @property def median_lengthscale(self) -> Tensor: r"""Median lengthscales across the MCMC samples.""" self._check_if_fitted() lengthscale = self.covar_module.base_kernel.lengthscale.clone() return lengthscale.median(0).values.squeeze(0) @property def num_mcmc_samples(self) -> int: r"""Number of MCMC samples in the model.""" self._check_if_fitted() return len(self.covar_module.outputscale) @property def batch_shape(self) -> torch.Size: r"""Batch shape of the model, equal to the number of MCMC samples. Note that `SaasFullyBayesianMultiTaskGP` does not support batching over input data at this point. """ self._check_if_fitted() return torch.Size([self.num_mcmc_samples]) def fantasize(self, *args, **kwargs) -> NoReturn: raise NotImplementedError("Fantasize is not implemented!") def _check_if_fitted(self): r"""Raise an exception if the model hasn't been fitted.""" if self.covar_module is None: raise RuntimeError( "Model has not been fitted. You need to call " "`fit_fully_bayesian_model_nuts` to fit the model." ) def load_mcmc_samples(self, mcmc_samples: Dict[str, Tensor]) -> None: r"""Load the MCMC hyperparameter samples into the model. This method will be called by `fit_fully_bayesian_model_nuts` when the model has been fitted in order to create a batched MultiTaskGP model. """ ( self.mean_module, self.covar_module, self.likelihood, self.task_covar_module, self.latent_features, ) = self.pyro_model.load_mcmc_samples(mcmc_samples=mcmc_samples) def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> FullyBayesianPosterior: r"""Computes the posterior over model outputs at the provided points. Returns: A `FullyBayesianPosterior` object. Includes observation noise if specified. """ self._check_if_fitted() posterior = super().posterior( X=X, output_indices=output_indices, observation_noise=observation_noise, posterior_transform=posterior_transform, **kwargs, ) posterior = FullyBayesianPosterior(distribution=posterior.distribution) return posterior def forward(self, X: Tensor) -> MultivariateNormal: self._check_if_fitted() X = X.unsqueeze(MCMC_DIM) x_basic, task_idcs = self._split_inputs(X) mean_x = self.mean_module(x_basic) covar_x = self.covar_module(x_basic) tsub_idcs = task_idcs.squeeze(-3).squeeze(-1) latent_features = self.latent_features[:, tsub_idcs, :] if X.ndim > 3: # batch eval mode # for X (batch_shape x num_samples x q x d), task_idcs[:,i,:,] are the same # reshape X to (batch_shape x num_samples x q x d) latent_features = latent_features.permute( [-i for i in range(X.ndim - 1, 2, -1)] + [0] + [-i for i in range(2, 0, -1)] ) # Combine the two in an ICM fashion covar_i = self.task_covar_module(latent_features) covar = covar_x.mul(covar_i) return MultivariateNormal(mean_x, covar) @classmethod def construct_inputs( cls, training_data: Dict[str, SupervisedDataset], task_feature: int, rank: Optional[int] = None, **kwargs: Any, ) -> Dict[str, Any]: r"""Construct `Model` keyword arguments from dictionary of `SupervisedDataset`. Args: training_data: Dictionary of `SupervisedDataset`. task_feature: Column index of embedded task indicator features. For details, see `parse_training_data`. rank: The rank of the cross-task covariance matrix. """ inputs = super().construct_inputs( training_data=training_data, task_feature=task_feature, rank=rank, **kwargs ) inputs.pop("task_covar_prior") if "train_Yvar" not in inputs: inputs["train_Yvar"] = None return inputs def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True): r"""Custom logic for loading the state dict. The standard approach of calling `load_state_dict` currently doesn't play well with the `SaasFullyBayesianMultiTaskGP` since the `mean_module`, `covar_module` and `likelihood` aren't initialized until the model has been fitted. The reason for this is that we don't know the number of MCMC samples until NUTS is called. Given the state dict, we can initialize a new model with some dummy samples and then load the state dict into this model. This currently only works for a `MultitaskSaasPyroModel` and supporting more Pyro models likely requires moving the model construction logic into the Pyro model itself. TODO: If this were to inherif from `SaasFullyBayesianSingleTaskGP`, we could simplify this method and eliminate some others. """ if not isinstance(self.pyro_model, MultitaskSaasPyroModel): raise NotImplementedError( # pragma: no cover "load_state_dict only works for MultitaskSaasPyroModel" ) raw_mean = state_dict["mean_module.raw_constant"] num_mcmc_samples = len(raw_mean) dim = self.pyro_model.train_X.shape[-1] - 1 # Removing 1 for the task feature. task_rank = self.pyro_model.task_rank tkwargs = {"device": raw_mean.device, "dtype": raw_mean.dtype} # Load some dummy samples mcmc_samples = { "mean": torch.ones(num_mcmc_samples, **tkwargs), "lengthscale": torch.ones(num_mcmc_samples, dim, **tkwargs), "outputscale": torch.ones(num_mcmc_samples, **tkwargs), "task_lengthscale": torch.ones(num_mcmc_samples, task_rank, **tkwargs), "latent_features": torch.ones( num_mcmc_samples, self._rank, task_rank, **tkwargs ), } if self.pyro_model.train_Yvar is None: mcmc_samples["noise"] = torch.ones(num_mcmc_samples, **tkwargs) ( self.mean_module, self.covar_module, self.likelihood, self.task_covar_module, self.latent_features, ) = self.pyro_model.load_mcmc_samples(mcmc_samples=mcmc_samples) # Load the actual samples from the state dict super().load_state_dict(state_dict=state_dict, strict=strict)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" References .. [Feng2020HDCPS] Q. Feng, B. Latham, H. Mao and E. Backshy. High-Dimensional Contextual Policy Search with Unknown Context Rewards using Bayesian Optimization. Advances in Neural Information Processing Systems 33, NeurIPS 2020. """ import warnings from typing import List, Optional import torch from botorch.models.multitask import MultiTaskGP from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from gpytorch.constraints import Interval from gpytorch.distributions.multivariate_normal import MultivariateNormal from gpytorch.kernels.rbf_kernel import RBFKernel from linear_operator.operators import InterpolatedLinearOperator, LinearOperator from torch import Tensor from torch.nn import ModuleList class LCEMGP(MultiTaskGP): r"""The Multi-Task GP with the latent context embedding multioutput (LCE-M) kernel. See [Feng2020HDCPS]_ for a reference on the model and its use in Bayesian optimization. """ def __init__( self, train_X: Tensor, train_Y: Tensor, task_feature: int, train_Yvar: Optional[Tensor] = None, context_cat_feature: Optional[Tensor] = None, context_emb_feature: Optional[Tensor] = None, embs_dim_list: Optional[List[int]] = None, output_tasks: Optional[List[int]] = None, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r""" Args: train_X: (n x d) X training data. train_Y: (n x 1) Y training data. task_feature: Column index of train_X to get context indices. train_Yvar: An optional (n x 1) tensor of observed variances of each training Y. If None, we infer the noise. Note that the inferred noise is common across all tasks. context_cat_feature: (n_contexts x k) one-hot encoded context features. Rows are ordered by context indices, where k is the number of categorical variables. If None, task indices will be used and k = 1. context_emb_feature: (n_contexts x m) pre-given continuous embedding features. Rows are ordered by context indices. embs_dim_list: Embedding dimension for each categorical variable. The length equals k. If None, the embedding dimension is set to 1 for each categorical variable. output_tasks: A list of task indices for which to compute model outputs for. If omitted, return outputs for all task indices. """ super().__init__( train_X=train_X, train_Y=train_Y, task_feature=task_feature, train_Yvar=train_Yvar, output_tasks=output_tasks, input_transform=input_transform, outcome_transform=outcome_transform, ) self.device = train_X.device # context indices all_tasks = train_X[:, task_feature].unique() self.all_tasks = all_tasks.to(dtype=torch.long).tolist() self.all_tasks.sort() # unique in python does automatic sort; add for safety if context_cat_feature is None: context_cat_feature = all_tasks.unsqueeze(-1).to(device=self.device) self.context_cat_feature = context_cat_feature # row indices = context indices self.context_emb_feature = context_emb_feature # construct emb_dims based on categorical features if embs_dim_list is None: # set embedding_dim = 1 for each categorical variable embs_dim_list = [1 for _i in range(context_cat_feature.size(1))] n_embs = sum(embs_dim_list) self.emb_dims = [ (len(context_cat_feature[:, i].unique()), embs_dim_list[i]) for i in range(context_cat_feature.size(1)) ] # contruct embedding layer: need to handle multiple categorical features self.emb_layers = ModuleList( [ torch.nn.Embedding(num_embeddings=x, embedding_dim=y, max_norm=1.0) for x, y in self.emb_dims ] ) self.task_covar_module = RBFKernel( ard_num_dims=n_embs, lengthscale_constraint=Interval( 0.0, 2.0, transform=None, initial_value=1.0 ), ) self.to(train_X) def _eval_context_covar(self) -> LinearOperator: """obtain context covariance matrix (num_contexts x num_contexts)""" all_embs = self._task_embeddings() return self.task_covar_module(all_embs) def _task_embeddings(self) -> Tensor: """generate embedding features for all contexts.""" embeddings = [ emb_layer( self.context_cat_feature[:, i].to( dtype=torch.long, device=self.device ) # pyre-ignore ) for i, emb_layer in enumerate(self.emb_layers) ] embeddings = torch.cat(embeddings, dim=1) # add given embeddings if any if self.context_emb_feature is not None: embeddings = torch.cat( [embeddings, self.context_emb_feature.to(self.device)], dim=1, # pyre-ignore ) return embeddings def task_covar_matrix(self, task_idcs: Tensor) -> Tensor: r"""compute covariance matrix of a list of given context Args: task_idcs: (n x 1) or (b x n x 1) task indices tensor """ covar_matrix = self._eval_context_covar() return InterpolatedLinearOperator( base_linear_op=covar_matrix, left_interp_indices=task_idcs, right_interp_indices=task_idcs, ).to_dense() def forward(self, x: Tensor) -> MultivariateNormal: if self.training: x = self.transform_inputs(x) x_basic, task_idcs = self._split_inputs(x) # Compute base mean and covariance mean_x = self.mean_module(x_basic) covar_x = self.covar_module(x_basic) # Compute task covariances covar_i = self.task_covar_matrix(task_idcs) covar = covar_x.mul(covar_i) return MultivariateNormal(mean_x, covar) class FixedNoiseLCEMGP(LCEMGP): r"""The Multi-Task GP the latent context embedding multioutput (LCE-M) kernel, with known observation noise. DEPRECATED: Please use `LCEMGP` with `train_Yvar` instead. """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, task_feature: int, context_cat_feature: Optional[Tensor] = None, context_emb_feature: Optional[Tensor] = None, embs_dim_list: Optional[List[int]] = None, output_tasks: Optional[List[int]] = None, ) -> None: r""" Args: train_X: (n x d) X training data. train_Y: (n x 1) Y training data. train_Yvar: (n x 1) Observed variances of each training Y. task_feature: Column index of train_X to get context indices. context_cat_feature: (n_contexts x k) one-hot encoded context features. Rows are ordered by context indices, where k is the number of categorical variables. If None, task indices will be used and k = 1. context_emb_feature: (n_contexts x m) pre-given continuous embedding features. Rows are ordered by context indices. embs_dim_list: Embedding dimension for each categorical variable. The length equals to k. If None, the embedding dimension is set to 1 for each categorical variable. output_tasks: A list of task indices for which to compute model outputs for. If omitted, return outputs for all task indices. """ warnings.warn( "`FixedNoiseLCEMGP` has been deprecated and will be removed in a " "future release. Please use the `LCEMGP` model instead. " "When `train_Yvar` is specified, `LCEMGP` behaves the same " "as the `FixedNoiseLCEMGP`.", DeprecationWarning, ) super().__init__( train_X=train_X, train_Y=train_Y, task_feature=task_feature, train_Yvar=train_Yvar, context_cat_feature=context_cat_feature, context_emb_feature=context_emb_feature, embs_dim_list=embs_dim_list, output_tasks=output_tasks, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Deterministic Models: Simple wrappers that allow the usage of deterministic mappings via the BoTorch Model and Posterior APIs. Deterministic models are useful for expressing known input-output relationships within the BoTorch Model API. This is useful e.g. for multi-objective optimization with known objective functions (e.g. the number of parameters of a Neural Network in the context of Neural Architecture Search is usually a known function of the architecture configuration), or to encode cost functions for cost-aware acquisition utilities. Cost-aware optimization is desirable when evaluations have a cost that is heterogeneous, either in the inputs `X` or in a particular fidelity parameter that directly encodes the fidelity of the observation. `GenericDeterministicModel` supports arbitrary deterministic functions, while `AffineFidelityCostModel` is a particular cost model for multi-fidelity optimization. Other use cases of deterministic models include representing approximate GP sample paths, e.g. random Fourier features obtained with `get_gp_samples`, which allows them to be substituted in acquisition functions or in other places where a `Model` is expected. """ from __future__ import annotations from abc import abstractmethod from typing import Callable, List, Optional, Union import torch from botorch.models.ensemble import EnsembleModel from botorch.models.model import Model from torch import Tensor class DeterministicModel(EnsembleModel): r""" Abstract base class for deterministic models. :meta private: """ @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Compute the (deterministic) model output at X. Args: X: A `batch_shape x n x d`-dim input tensor `X`. Returns: A `batch_shape x n x m`-dimensional output tensor (the outcome dimension `m` must be explicit if `m=1`). """ pass # pragma: no cover def _forward(self, X: Tensor) -> Tensor: r"""Compatibilizes the `DeterministicModel` with `EnsemblePosterior`""" return self.forward(X=X).unsqueeze(-3) class GenericDeterministicModel(DeterministicModel): r"""A generic deterministic model constructed from a callable. Example: >>> f = lambda x: x.sum(dim=-1, keep_dims=True) >>> model = GenericDeterministicModel(f) """ def __init__(self, f: Callable[[Tensor], Tensor], num_outputs: int = 1) -> None: r""" Args: f: A callable mapping a `batch_shape x n x d`-dim input tensor `X` to a `batch_shape x n x m`-dimensional output tensor (the outcome dimension `m` must be explicit, even if `m=1`). num_outputs: The number of outputs `m`. """ super().__init__() self._f = f self._num_outputs = num_outputs def subset_output(self, idcs: List[int]) -> GenericDeterministicModel: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: The current model, subset to the specified output indices. """ def f_subset(X: Tensor) -> Tensor: return self._f(X)[..., idcs] return self.__class__(f=f_subset, num_outputs=len(idcs)) def forward(self, X: Tensor) -> Tensor: r"""Compute the (deterministic) model output at X. Args: X: A `batch_shape x n x d`-dim input tensor `X`. Returns: A `batch_shape x n x m`-dimensional output tensor. """ return self._f(X) class AffineDeterministicModel(DeterministicModel): r"""An affine deterministic model.""" def __init__(self, a: Tensor, b: Union[Tensor, float] = 0.01) -> None: r"""Affine deterministic model from weights and offset terms. A simple model of the form y[..., m] = b[m] + sum_{i=1}^d a[i, m] * X[..., i] Args: a: A `d x m`-dim tensor of linear weights, where `m` is the number of outputs (must be explicit if `m=1`) b: The affine (offset) term. Either a float (for single-output models or if the offset is shared), or a `m`-dim tensor (with different offset values for for the `m` different outputs). """ if not a.ndim == 2: raise ValueError("a must be two-dimensional") if not torch.is_tensor(b): b = torch.tensor([b]) if not b.ndim == 1: raise ValueError("b nust be one-dimensional") super().__init__() self.register_buffer("a", a) self.register_buffer("b", b.expand(a.size(-1))) self._num_outputs = a.size(-1) def subset_output(self, idcs: List[int]) -> AffineDeterministicModel: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: The current model, subset to the specified output indices. """ a_sub = self.a.detach()[..., idcs].clone() b_sub = self.b.detach()[..., idcs].clone() return self.__class__(a=a_sub, b=b_sub) def forward(self, X: Tensor) -> Tensor: return self.b + torch.einsum("...d,dm", X, self.a) class PosteriorMeanModel(DeterministicModel): """A deterministic model that always returns the posterior mean.""" def __init__(self, model: Model) -> None: r""" Args: model: The base model. """ super().__init__() self.model = model def forward(self, X: Tensor) -> Tensor: return self.model.posterior(X).mean class FixedSingleSampleModel(DeterministicModel): r""" A deterministic model defined by a single sample `w`. Given a base model `f` and a fixed sample `w`, the model always outputs y = f_mean(x) + f_stddev(x) * w We assume the outcomes are uncorrelated here. """ def __init__( self, model: Model, w: Optional[Tensor] = None, dim: Optional[int] = None, jitter: Optional[float] = 1e-8, dtype: Optional[torch.dtype] = None, device: Optional[torch.dtype] = None, ) -> None: r""" Args: model: The base model. w: A 1-d tensor with length model.num_outputs. If None, draw it from a standard normal distribution. dim: dimensionality of w. If None and w is not provided, draw w samples of size model.num_outputs. jitter: jitter value to be added for numerical stability, 1e-8 by default. dtype: dtype for w if specified device: device for w if specified """ super().__init__() self.model = model self._num_outputs = model.num_outputs self.jitter = jitter if w is None: self.w = ( torch.randn(model.num_outputs, dtype=dtype, device=device) if dim is None else torch.randn(dim, dtype=dtype, device=device) ) else: self.w = w def forward(self, X: Tensor) -> Tensor: post = self.model.posterior(X) return post.mean + torch.sqrt(post.variance + self.jitter) * self.w.to(X)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Abstract model class for all GPyTorch-based botorch models. To implement your own, simply inherit from both the provided classes and a GPyTorch Model class such as an ExactGP. """ from __future__ import annotations import itertools import warnings from abc import ABC from copy import deepcopy from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union import torch from botorch.acquisition.objective import PosteriorTransform from botorch.exceptions.errors import BotorchTensorDimensionError, InputDataError from botorch.exceptions.warnings import ( _get_single_precision_warning, BotorchTensorDimensionWarning, ) from botorch.models.model import Model, ModelList from botorch.models.utils import ( _make_X_full, add_output_dim, gpt_posterior_settings, mod_batch_shape, multioutput_to_batch_mode_transform, ) from botorch.posteriors.fully_bayesian import FullyBayesianPosterior from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.utils.transforms import is_fully_bayesian from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood from torch import Tensor if TYPE_CHECKING: from botorch.posteriors.posterior_list import PosteriorList # pragma: no cover from botorch.posteriors.transformed import TransformedPosterior # pragma: no cover from gpytorch.likelihoods import Likelihood # pragma: no cover class GPyTorchModel(Model, ABC): r"""Abstract base class for models based on GPyTorch models. The easiest way to use this is to subclass a model from a GPyTorch model class (e.g. an `ExactGP`) and this `GPyTorchModel`. See e.g. `SingleTaskGP`. :meta private: """ likelihood: Likelihood @staticmethod def _validate_tensor_args( X: Tensor, Y: Tensor, Yvar: Optional[Tensor] = None, strict: bool = True ) -> None: r"""Checks that `Y` and `Yvar` have an explicit output dimension if strict. Checks that the dtypes of the inputs match, and warns if using float. This also checks that `Yvar` has the same trailing dimensions as `Y`. Note we only infer that an explicit output dimension exists when `X` and `Y` have the same `batch_shape`. Args: X: A `batch_shape x n x d`-dim Tensor, where `d` is the dimension of the feature space, `n` is the number of points per batch, and `batch_shape` is the batch shape (potentially empty). Y: A `batch_shape' x n x m`-dim Tensor, where `m` is the number of model outputs, `n'` is the number of points per batch, and `batch_shape'` is the batch shape of the observations. Yvar: A `batch_shape' x n x m` tensor of observed measurement noise. Note: this will be None when using a model that infers the noise level (e.g. a `SingleTaskGP`). strict: A boolean indicating whether to check that `Y` and `Yvar` have an explicit output dimension. """ if X.dim() != Y.dim(): if (X.dim() - Y.dim() == 1) and (X.shape[:-1] == Y.shape): message = ( "An explicit output dimension is required for targets." f" Expected Y with dimension {X.dim()} (got {Y.dim()=})." ) else: message = ( "Expected X and Y to have the same number of dimensions" f" (got X with dimension {X.dim()} and Y with dimension" f" {Y.dim()})." ) if strict: raise BotorchTensorDimensionError(message) else: warnings.warn( "Non-strict enforcement of botorch tensor conventions. The " "following error would have been raised with strict enforcement: " f"{message}", BotorchTensorDimensionWarning, ) # Yvar may not have the same batch dimensions, but the trailing dimensions # of Yvar should be the same as the trailing dimensions of Y. if Yvar is not None and Y.shape[-(Yvar.dim()) :] != Yvar.shape: raise BotorchTensorDimensionError( "An explicit output dimension is required for observation noise." f" Expected Yvar with shape: {Y.shape[-Yvar.dim() :]} (got" f" {Yvar.shape})." ) # Check the dtypes. if X.dtype != Y.dtype or (Yvar is not None and Y.dtype != Yvar.dtype): raise InputDataError( "Expected all inputs to share the same dtype. Got " f"{X.dtype} for X, {Y.dtype} for Y, and " f"{Yvar.dtype if Yvar is not None else None} for Yvar." ) if X.dtype != torch.float64: # NOTE: Not using a BotorchWarning since those get ignored. warnings.warn( _get_single_precision_warning(str(X.dtype)), UserWarning, stacklevel=2 ) @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective, independent of the internal representation of the model (as e.g. in BatchedMultiOutputGPyTorchModel). For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ return self.train_inputs[0].shape[:-2] @property def num_outputs(self) -> int: r"""The number of outputs of the model.""" return self._num_outputs # pyre-fixme[14]: Inconsistent override. # `botorch.models.gpytorch.GPyTorchModel.posterior` overrides method defined # in `Model` inconsistently. Could not find parameter `output_indices` in # overriding signature. def posterior( self, X: Tensor, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> Union[GPyTorchPosterior, TransformedPosterior]: r"""Computes the posterior over model outputs at the provided points. Args: X: A `(batch_shape) x q x d`-dim Tensor, where `d` is the dimension of the feature space and `q` is the number of points considered jointly. observation_noise: If True, add the observation noise from the likelihood to the posterior. If a Tensor, use it directly as the observation noise (must be of shape `(batch_shape) x q`). posterior_transform: An optional PosteriorTransform. Returns: A `GPyTorchPosterior` object, representing a batch of `b` joint distributions over `q` points. Includes observation noise if specified. """ self.eval() # make sure model is in eval mode # input transforms are applied at `posterior` in `eval` mode, and at # `model.forward()` at the training time X = self.transform_inputs(X) with gpt_posterior_settings(): mvn = self(X) if observation_noise is not False: if isinstance(observation_noise, torch.Tensor): # TODO: Make sure observation noise is transformed correctly self._validate_tensor_args(X=X, Y=observation_noise) if observation_noise.size(-1) == 1: observation_noise = observation_noise.squeeze(-1) mvn = self.likelihood(mvn, X, noise=observation_noise) else: mvn = self.likelihood(mvn, X) posterior = GPyTorchPosterior(distribution=mvn) if hasattr(self, "outcome_transform"): posterior = self.outcome_transform.untransform_posterior(posterior) if posterior_transform is not None: return posterior_transform(posterior) return posterior def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Model: r"""Condition the model on new observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). Y: A `batch_shape' x n x m`-dim Tensor, where `m` is the number of model outputs, `n'` is the number of points per batch, and `batch_shape'` is the batch shape of the observations. `batch_shape'` must be broadcastable to `batch_shape` using standard broadcasting semantics. If `Y` has fewer batch dimensions than `X`, its is assumed that the missing batch dimensions are the same for all `Y`. Returns: A `Model` object of the same type, representing the original model conditioned on the new observations `(X, Y)` (and possibly noise observations passed in via kwargs). Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X[:, 0]) + torch.cos(train_X[:, 1]) >>> model = SingleTaskGP(train_X, train_Y) >>> new_X = torch.rand(5, 2) >>> new_Y = torch.sin(new_X[:, 0]) + torch.cos(new_X[:, 1]) >>> model = model.condition_on_observations(X=new_X, Y=new_Y) """ Yvar = kwargs.get("noise", None) if hasattr(self, "outcome_transform"): # pass the transformed data to get_fantasy_model below # (unless we've already trasnformed if BatchedMultiOutputGPyTorchModel) if not isinstance(self, BatchedMultiOutputGPyTorchModel): Y, Yvar = self.outcome_transform(Y, Yvar) # validate using strict=False, since we cannot tell if Y has an explicit # output dimension self._validate_tensor_args(X=X, Y=Y, Yvar=Yvar, strict=False) if Y.size(-1) == 1: Y = Y.squeeze(-1) if Yvar is not None: kwargs.update({"noise": Yvar.squeeze(-1)}) # get_fantasy_model will properly copy any existing outcome transforms # (since it deepcopies the original model) return self.get_fantasy_model(inputs=X, targets=Y, **kwargs) # pyre-fixme[13]: uninitialized attributes _num_outputs, _input_batch_shape, # _aug_batch_shape class BatchedMultiOutputGPyTorchModel(GPyTorchModel): r"""Base class for batched multi-output GPyTorch models with independent outputs. This model should be used when the same training data is used for all outputs. Outputs are modeled independently by using a different batch for each output. :meta private: """ _num_outputs: int _input_batch_shape: torch.Size _aug_batch_shape: torch.Size @staticmethod def get_batch_dimensions( train_X: Tensor, train_Y: Tensor ) -> Tuple[torch.Size, torch.Size]: r"""Get the raw batch shape and output-augmented batch shape of the inputs. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. Returns: 2-element tuple containing - The `input_batch_shape` - The output-augmented batch shape: `input_batch_shape x (m)` """ input_batch_shape = train_X.shape[:-2] aug_batch_shape = input_batch_shape num_outputs = train_Y.shape[-1] if num_outputs > 1: aug_batch_shape += torch.Size([num_outputs]) return input_batch_shape, aug_batch_shape def _set_dimensions(self, train_X: Tensor, train_Y: Tensor) -> None: r"""Store the number of outputs and the batch shape. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. """ self._num_outputs = train_Y.shape[-1] self._input_batch_shape, self._aug_batch_shape = self.get_batch_dimensions( train_X=train_X, train_Y=train_Y ) @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective, independent of the internal representation of the model (as e.g. in BatchedMultiOutputGPyTorchModel). For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ return self._input_batch_shape def _transform_tensor_args( self, X: Tensor, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: r"""Transforms tensor arguments: for single output models, the output dimension is squeezed and for multi-output models, the output dimension is transformed into the left-most batch dimension. Args: X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. Yvar: A `n x m` or `batch_shape x n x m` (batch mode) tensor of observed measurement noise. Note: this will be None when using a model that infers the noise level (e.g. a `SingleTaskGP`). Returns: 3-element tuple containing - A `input_batch_shape x (m) x n x d` tensor of training features. - A `target_batch_shape x (m) x n` tensor of training observations. - A `target_batch_shape x (m) x n` tensor observed measurement noise (or None). """ if self._num_outputs > 1: return multioutput_to_batch_mode_transform( train_X=X, train_Y=Y, train_Yvar=Yvar, num_outputs=self._num_outputs ) return X, Y.squeeze(-1), None if Yvar is None else Yvar.squeeze(-1) def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> Union[GPyTorchPosterior, TransformedPosterior]: r"""Computes the posterior over model outputs at the provided points. Args: X: A `(batch_shape) x q x d`-dim Tensor, where `d` is the dimension of the feature space and `q` is the number of points considered jointly. output_indices: A list of indices, corresponding to the outputs over which to compute the posterior (if the model is multi-output). Can be used to speed up computation if only a subset of the model's outputs are required for optimization. If omitted, computes the posterior over all model outputs. observation_noise: If True, add the observation noise from the likelihood to the posterior. If a Tensor, use it directly as the observation noise (must be of shape `(batch_shape) x q x m`). posterior_transform: An optional PosteriorTransform. Returns: A `GPyTorchPosterior` object, representing `batch_shape` joint distributions over `q` points and the outputs selected by `output_indices` each. Includes observation noise if specified. """ self.eval() # make sure model is in eval mode # input transforms are applied at `posterior` in `eval` mode, and at # `model.forward()` at the training time X = self.transform_inputs(X) with gpt_posterior_settings(): # insert a dimension for the output dimension if self._num_outputs > 1: X, output_dim_idx = add_output_dim( X=X, original_batch_shape=self._input_batch_shape ) mvn = self(X) if observation_noise is not False: if torch.is_tensor(observation_noise): # TODO: Validate noise shape # make observation_noise `batch_shape x q x n` if self.num_outputs > 1: obs_noise = observation_noise.transpose(-1, -2) else: obs_noise = observation_noise.squeeze(-1) mvn = self.likelihood(mvn, X, noise=obs_noise) elif isinstance(self.likelihood, FixedNoiseGaussianLikelihood): # Use the mean of the previous noise values (TODO: be smarter here). noise = self.likelihood.noise.mean().expand(X.shape[:-1]) mvn = self.likelihood(mvn, X, noise=noise) else: mvn = self.likelihood(mvn, X) if self._num_outputs > 1: mean_x = mvn.mean covar_x = mvn.lazy_covariance_matrix output_indices = output_indices or range(self._num_outputs) mvns = [ MultivariateNormal( mean_x.select(dim=output_dim_idx, index=t), covar_x[(slice(None),) * output_dim_idx + (t,)], ) for t in output_indices ] mvn = MultitaskMultivariateNormal.from_independent_mvns(mvns=mvns) posterior = GPyTorchPosterior(distribution=mvn) if hasattr(self, "outcome_transform"): posterior = self.outcome_transform.untransform_posterior(posterior) if posterior_transform is not None: return posterior_transform(posterior) return posterior def condition_on_observations( self, X: Tensor, Y: Tensor, **kwargs: Any ) -> BatchedMultiOutputGPyTorchModel: r"""Condition the model on new observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `m` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). Y: A `batch_shape' x n' x m`-dim Tensor, where `m` is the number of model outputs, `n'` is the number of points per batch, and `batch_shape'` is the batch shape of the observations. `batch_shape'` must be broadcastable to `batch_shape` using standard broadcasting semantics. If `Y` has fewer batch dimensions than `X`, its is assumed that the missing batch dimensions are the same for all `Y`. Returns: A `BatchedMultiOutputGPyTorchModel` object of the same type with `n + n'` training examples, representing the original model conditioned on the new observations `(X, Y)` (and possibly noise observations passed in via kwargs). Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.cat( >>> [torch.sin(train_X[:, 0]), torch.cos(train_X[:, 1])], -1 >>> ) >>> model = SingleTaskGP(train_X, train_Y) >>> new_X = torch.rand(5, 2) >>> new_Y = torch.cat([torch.sin(new_X[:, 0]), torch.cos(new_X[:, 1])], -1) >>> model = model.condition_on_observations(X=new_X, Y=new_Y) """ noise = kwargs.get("noise") if hasattr(self, "outcome_transform"): # we need to apply transforms before shifting batch indices around Y, noise = self.outcome_transform(Y, noise) self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False) inputs = X if self._num_outputs > 1: inputs, targets, noise = multioutput_to_batch_mode_transform( train_X=X, train_Y=Y, num_outputs=self._num_outputs, train_Yvar=noise ) # `multioutput_to_batch_mode_transform` removes the output dimension, # which is necessary for `condition_on_observations` targets = targets.unsqueeze(-1) if noise is not None: noise = noise.unsqueeze(-1) else: inputs = X targets = Y if noise is not None: kwargs.update({"noise": noise}) fantasy_model = super().condition_on_observations(X=inputs, Y=targets, **kwargs) fantasy_model._input_batch_shape = fantasy_model.train_targets.shape[ : (-1 if self._num_outputs == 1 else -2) ] fantasy_model._aug_batch_shape = fantasy_model.train_targets.shape[:-1] return fantasy_model def subset_output(self, idcs: List[int]) -> BatchedMultiOutputGPyTorchModel: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: The current model, subset to the specified output indices. """ try: subset_batch_dict = self._subset_batch_dict except AttributeError: raise NotImplementedError( "subset_output requires the model to define a `_subset_dict` attribute" ) m = len(idcs) new_model = deepcopy(self) subset_everything = self.num_outputs == m and idcs == list(range(m)) if subset_everything: return new_model tidxr = torch.tensor(idcs, device=new_model.train_targets.device) idxr = tidxr if m > 1 else idcs[0] new_tail_bs = torch.Size([m]) if m > 1 else torch.Size() new_model._num_outputs = m new_model._aug_batch_shape = new_model._aug_batch_shape[:-1] + new_tail_bs new_model.train_inputs = tuple( ti[..., idxr, :, :] for ti in new_model.train_inputs ) new_model.train_targets = new_model.train_targets[..., idxr, :] # adjust batch shapes of parameters/buffers if necessary for full_name, p in itertools.chain( new_model.named_parameters(), new_model.named_buffers() ): if full_name in subset_batch_dict: idx = subset_batch_dict[full_name] new_data = p.index_select(dim=idx, index=tidxr) if m == 1: new_data = new_data.squeeze(idx) p.data = new_data mod_name = full_name.split(".")[:-1] mod_batch_shape(new_model, mod_name, m if m > 1 else 0) # subset outcome transform if present try: subset_octf = new_model.outcome_transform.subset_output(idcs=idcs) new_model.outcome_transform = subset_octf except AttributeError: pass return new_model class ModelListGPyTorchModel(ModelList, GPyTorchModel, ABC): r"""Abstract base class for models based on multi-output GPyTorch models. This is meant to be used with a gpytorch ModelList wrapper for independent evaluation of submodels. :meta private: """ @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective, independent of the internal representation of the model (as e.g. in BatchedMultiOutputGPyTorchModel). For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ batch_shapes = {m.batch_shape for m in self.models} if len(batch_shapes) > 1: msg = ( f"Component models of {self.__class__.__name__} have different " "batch shapes" ) try: broadcast_shape = torch.broadcast_shapes(*batch_shapes) warnings.warn(msg + ". Broadcasting batch shapes.") return broadcast_shape except RuntimeError: raise NotImplementedError(msg + " that are not broadcastble.") return next(iter(batch_shapes)) # pyre-fixme[15]: Inconsistent override in return types def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> Union[GPyTorchPosterior, PosteriorList]: r"""Computes the posterior over model outputs at the provided points. Args: X: A `b x q x d`-dim Tensor, where `d` is the dimension of the feature space, `q` is the number of points considered jointly, and `b` is the batch dimension. output_indices: A list of indices, corresponding to the outputs over which to compute the posterior (if the model is multi-output). Can be used to speed up computation if only a subset of the model's outputs are required for optimization. If omitted, computes the posterior over all model outputs. observation_noise: If True, add the observation noise from the respective likelihoods to the posterior. If a Tensor of shape `(batch_shape) x q x m`, use it directly as the observation noise (with `observation_noise[...,i]` added to the posterior of the `i`-th model). posterior_transform: An optional PosteriorTransform. Returns: - If no `posterior_transform` is provided and the component models have no `outcome_transform`, or if the component models only use linear outcome transforms like `Standardize` (i.e. not `Log`), returns a `GPyTorchPosterior` or `FullyBayesianPosterior` object, representing `batch_shape` joint distributions over `q` points and the outputs selected by `output_indices` each. Includes measurement noise if `observation_noise` is specified. - If no `posterior_transform` is provided and component models have nonlinear transforms like `Log`, returns a `PosteriorList` with sub-posteriors of type `TransformedPosterior` - If `posterior_transform` is provided, that posterior transform will be applied and will determine the return type. This could potentially be any subclass of `Posterior`, but common choices give a `GPyTorchPosterior`. """ # Nonlinear transforms untransform to a `TransformedPosterior`, # which can't be made into a `GPyTorchPosterior` returns_untransformed = any( hasattr(mod, "outcome_transform") and (not mod.outcome_transform._is_linear) for mod in self.models ) # NOTE: We're not passing in the posterior transform here. We'll apply it later. posterior = ModelList.posterior( self, X=X, output_indices=output_indices, observation_noise=observation_noise, **kwargs, ) if not returns_untransformed: mvns = [p.distribution for p in posterior.posteriors] # Combining MTMVNs into a single MTMVN is currently not supported. if not any(isinstance(m, MultitaskMultivariateNormal) for m in mvns): # Return the result as a GPyTorchPosterior/FullyBayesianPosterior. mvn = ( mvns[0] if len(mvns) == 1 else MultitaskMultivariateNormal.from_independent_mvns(mvns=mvns) ) if any(is_fully_bayesian(m) for m in self.models): # Mixing fully Bayesian and other GP models is currently # not supported. posterior = FullyBayesianPosterior(distribution=mvn) else: posterior = GPyTorchPosterior(distribution=mvn) if posterior_transform is not None: return posterior_transform(posterior) return posterior def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Model: raise NotImplementedError() class MultiTaskGPyTorchModel(GPyTorchModel, ABC): r"""Abstract base class for multi-task models based on GPyTorch models. This class provides the `posterior` method to models that implement a "long-format" multi-task GP in the style of `MultiTaskGP`. :meta private: """ def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> Union[GPyTorchPosterior, TransformedPosterior]: r"""Computes the posterior over model outputs at the provided points. Args: X: A tensor of shape `batch_shape x q x d` or `batch_shape x q x (d + 1)`, where `d` is the dimension of the feature space (not including task indices) and `q` is the number of points considered jointly. The `+ 1` dimension is the optional task feature / index. If given, the model produces the outputs for the given task indices. If omitted, the model produces outputs for tasks in in `self._output_tasks` (specified as `output_tasks` while constructing the model), which can overwritten using `output_indices`. output_indices: A list of indices, corresponding to the tasks over which to compute the posterior. Only used if `X` does not include the task feature. If omitted, defaults to `self._output_tasks`. observation_noise: If True, add observation noise from the respective likelihoods. If a Tensor, specifies the observation noise levels to add. posterior_transform: An optional PosteriorTransform. Returns: A `GPyTorchPosterior` object, representing `batch_shape` joint distributions over `q` points. If the task features are included in `X`, the posterior will be single output. Otherwise, the posterior will be single or multi output corresponding to the tasks included in either the `output_indices` or `self._output_tasks`. """ includes_task_feature = X.shape[-1] == self.num_non_task_features + 1 if includes_task_feature: # Make sure all task feature values are valid. task_features = X[..., self._task_feature].unique() if not ( (task_features >= 0).all() and (task_features < self.num_tasks).all() ): raise ValueError( "Expected all task features in `X` to be between 0 and " f"self.num_tasks - 1. Got {task_features}." ) if output_indices is not None: raise ValueError( "`output_indices` must be None when `X` includes task features." ) num_outputs = 1 X_full = X else: # Add the task features to construct the full X for evaluation. if output_indices is None: output_indices = self._output_tasks num_outputs = len(output_indices) if not all(0 <= i < self.num_tasks for i in output_indices): raise ValueError( "Expected `output_indices` to be between 0 and self.num_tasks - 1. " f"Got {output_indices}." ) X_full = _make_X_full( X=X, output_indices=output_indices, tf=self._task_feature ) self.eval() # make sure model is in eval mode # input transforms are applied at `posterior` in `eval` mode, and at # `model.forward()` at the training time X_full = self.transform_inputs(X_full) with gpt_posterior_settings(): mvn = self(X_full) if observation_noise is not False: raise NotImplementedError( "Specifying observation noise is not yet supported by " f"{self.__class__.__name__}." ) # If single-output, return the posterior of a single-output model if num_outputs == 1: posterior = GPyTorchPosterior(distribution=mvn) else: # Otherwise, make a MultitaskMultivariateNormal out of this mtmvn = MultitaskMultivariateNormal( mean=mvn.mean.view(*mvn.mean.shape[:-1], num_outputs, -1).transpose( -1, -2 ), covariance_matrix=mvn.lazy_covariance_matrix, interleaved=False, ) posterior = GPyTorchPosterior(distribution=mtmvn) if hasattr(self, "outcome_transform"): posterior = self.outcome_transform.untransform_posterior(posterior) if posterior_transform is not None: return posterior_transform(posterior) return posterior
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional from botorch.models.gp_regression import FixedNoiseGP from botorch.models.kernels.contextual_lcea import LCEAKernel from botorch.models.kernels.contextual_sac import SACKernel from torch import Tensor class SACGP(FixedNoiseGP): r"""A GP using a Structural Additive Contextual(SAC) kernel.""" def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, decomposition: Dict[str, List[int]], ) -> None: r""" Args: train_X: (n x d) X training data. train_Y: (n x 1) Y training data. train_Yvar: (n x 1) Noise variances of each training Y. decomposition: Keys are context names. Values are the indexes of parameters belong to the context. The parameter indexes are in the same order across contexts. """ super().__init__(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self.covar_module = SACKernel( decomposition=decomposition, batch_shape=self._aug_batch_shape, device=train_X.device, ) self.decomposition = decomposition self.to(train_X) class LCEAGP(FixedNoiseGP): r"""A GP using a Latent Context Embedding Additive (LCE-A) Kernel. Note that the model does not support batch training. Input training data sets should have dim = 2. """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, decomposition: Dict[str, List[int]], train_embedding: bool = True, cat_feature_dict: Optional[Dict] = None, embs_feature_dict: Optional[Dict] = None, embs_dim_list: Optional[List[int]] = None, context_weight_dict: Optional[Dict] = None, ) -> None: r""" Args: train_X: (n x d) X training data. train_Y: (n x 1) Y training data. train_Yvar: (n x 1) Noise variance of Y. decomposition: Keys are context names. Values are the indexes of parameters belong to the context. cat_feature_dict: Keys are context names and values are list of categorical features i.e. {"context_name" : [cat_0, ..., cat_k]}, where k is the number of categorical variables. If None, we use context names in the decomposition as the only categorical feature, i.e., k = 1. embs_feature_dict: Pre-trained continuous embedding features of each context. embs_dim_list: Embedding dimension for each categorical variable. The length equals the number of categorical features k. If None, the embedding dimension is set to 1 for each categorical variable. context_weight_dict: Known population weights of each context. """ super().__init__(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self.covar_module = LCEAKernel( decomposition=decomposition, batch_shape=self._aug_batch_shape, train_embedding=train_embedding, cat_feature_dict=cat_feature_dict, embs_feature_dict=embs_feature_dict, embs_dim_list=embs_dim_list, context_weight_dict=context_weight_dict, device=train_X.device, ) self.decomposition = decomposition self.to(train_X)
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Gaussian Process Regression models with fully Bayesian inference. Fully Bayesian models use Bayesian inference over model hyperparameters, such as lengthscales and noise variance, learning a posterior distribution for the hyperparameters using the No-U-Turn-Sampler (NUTS). This is followed by sampling a small set of hyperparameters (often ~16) from the posterior that we will use for model predictions and for computing acquisition function values. By contrast, our “standard” models (e.g. `SingleTaskGP`) learn only a single best value for each hyperparameter using MAP. The fully Bayesian method generally results in a better and more well-calibrated model, but is more computationally intensive. For a full description, see [Eriksson2021saasbo]. We use a lightweight PyTorch implementation of a Matern-5/2 kernel as there are some performance issues with running NUTS on top of standard GPyTorch models. The resulting hyperparameter samples are loaded into a batched GPyTorch model after fitting. References: .. [Eriksson2021saasbo] D. Eriksson, M. Jankowiak. High-Dimensional Bayesian Optimization with Sparse Axis-Aligned Subspaces. Proceedings of the Thirty- Seventh Conference on Uncertainty in Artificial Intelligence, 2021. """ import math from abc import abstractmethod from typing import Any, Dict, List, Mapping, Optional, Tuple import pyro import torch from botorch.acquisition.objective import PosteriorTransform from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from botorch.models.utils import validate_input_scaling from botorch.models.utils.gpytorch_modules import MIN_INFERRED_NOISE_LEVEL from botorch.posteriors.fully_bayesian import FullyBayesianPosterior, MCMC_DIM from gpytorch.constraints import GreaterThan from gpytorch.distributions.multivariate_normal import MultivariateNormal from gpytorch.kernels import MaternKernel, ScaleKernel from gpytorch.kernels.kernel import dist, Kernel from gpytorch.likelihoods.gaussian_likelihood import ( FixedNoiseGaussianLikelihood, GaussianLikelihood, ) from gpytorch.likelihoods.likelihood import Likelihood from gpytorch.means.constant_mean import ConstantMean from gpytorch.means.mean import Mean from gpytorch.models.exact_gp import ExactGP from pyro.ops.integrator import register_exception_handler from torch import Tensor _sqrt5 = math.sqrt(5) def _handle_torch_linalg(exception: Exception) -> bool: return type(exception) is torch.linalg.LinAlgError def _handle_valerr_in_dist_init(exception: Exception) -> bool: if type(exception) is not ValueError: return False return "satisfy the constraint PositiveDefinite()" in str(exception) register_exception_handler("torch_linalg", _handle_torch_linalg) register_exception_handler("valerr_in_dist_init", _handle_valerr_in_dist_init) def matern52_kernel(X: Tensor, lengthscale: Tensor) -> Tensor: """Matern-5/2 kernel.""" dist = compute_dists(X=X, lengthscale=lengthscale) sqrt5_dist = _sqrt5 * dist return sqrt5_dist.add(1 + 5 / 3 * (dist**2)) * torch.exp(-sqrt5_dist) def compute_dists(X: Tensor, lengthscale: Tensor) -> Tensor: """Compute kernel distances.""" scaled_X = X / lengthscale return dist(scaled_X, scaled_X, x1_eq_x2=True) def reshape_and_detach(target: Tensor, new_value: Tensor) -> None: """Detach and reshape `new_value` to match `target`.""" return new_value.detach().clone().view(target.shape).to(target) class PyroModel: r""" Base class for a Pyro model; used to assist in learning hyperparameters. This class and its subclasses are not a standard BoTorch models; instead the subclasses are used as inputs to a `SaasFullyBayesianSingleTaskGP`, which should then have its hyperparameters fit with `fit_fully_bayesian_model_nuts`. (By default, its subclass `SaasPyroModel` is used). A `PyroModel`’s `sample` method should specify lightweight PyTorch functionality, which will be used for fast model fitting with NUTS. The utility of `PyroModel` is in enabling fast fitting with NUTS, since we would otherwise need to use GPyTorch, which is computationally infeasible in combination with Pyro. :meta private: """ def set_inputs( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor] = None ): """Set the training data. Args: train_X: Training inputs (n x d) train_Y: Training targets (n x 1) train_Yvar: Observed noise variance (n x 1). Inferred if None. """ self.train_X = train_X self.train_Y = train_Y self.train_Yvar = train_Yvar @abstractmethod def sample(self) -> None: r"""Sample from the model.""" pass # pragma: no cover @abstractmethod def postprocess_mcmc_samples( self, mcmc_samples: Dict[str, Tensor], **kwargs: Any ) -> Dict[str, Tensor]: """Post-process the final MCMC samples.""" pass # pragma: no cover @abstractmethod def load_mcmc_samples( self, mcmc_samples: Dict[str, Tensor] ) -> Tuple[Mean, Kernel, Likelihood]: pass # pragma: no cover class SaasPyroModel(PyroModel): r"""Implementation of the sparse axis-aligned subspace priors (SAAS) model. The SAAS model uses sparsity-inducing priors to identify the most important parameters. This model is suitable for high-dimensional BO with potentially hundreds of tunable parameters. See [Eriksson2021saasbo]_ for more details. `SaasPyroModel` is not a standard BoTorch model; instead, it is used as an input to `SaasFullyBayesianSingleTaskGP`. It is used as a default keyword argument, and end users are not likely to need to instantiate or modify a `SaasPyroModel` unless they want to customize its attributes (such as `covar_module`). """ def set_inputs( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor] = None ): super().set_inputs(train_X, train_Y, train_Yvar) self.ard_num_dims = self.train_X.shape[-1] def sample(self) -> None: r"""Sample from the SAAS model. This samples the mean, noise variance, outputscale, and lengthscales according to the SAAS prior. """ tkwargs = {"dtype": self.train_X.dtype, "device": self.train_X.device} outputscale = self.sample_outputscale(concentration=2.0, rate=0.15, **tkwargs) mean = self.sample_mean(**tkwargs) noise = self.sample_noise(**tkwargs) lengthscale = self.sample_lengthscale(dim=self.ard_num_dims, **tkwargs) K = matern52_kernel(X=self.train_X, lengthscale=lengthscale) K = outputscale * K + noise * torch.eye(self.train_X.shape[0], **tkwargs) pyro.sample( "Y", pyro.distributions.MultivariateNormal( loc=mean.view(-1).expand(self.train_X.shape[0]), covariance_matrix=K, ), obs=self.train_Y.squeeze(-1), ) def sample_outputscale( self, concentration: float = 2.0, rate: float = 0.15, **tkwargs: Any ) -> Tensor: r"""Sample the outputscale.""" return pyro.sample( "outputscale", pyro.distributions.Gamma( torch.tensor(concentration, **tkwargs), torch.tensor(rate, **tkwargs), ), ) def sample_mean(self, **tkwargs: Any) -> Tensor: r"""Sample the mean constant.""" return pyro.sample( "mean", pyro.distributions.Normal( torch.tensor(0.0, **tkwargs), torch.tensor(1.0, **tkwargs), ), ) def sample_noise(self, **tkwargs: Any) -> Tensor: r"""Sample the noise variance.""" if self.train_Yvar is None: return MIN_INFERRED_NOISE_LEVEL + pyro.sample( "noise", pyro.distributions.Gamma( torch.tensor(0.9, **tkwargs), torch.tensor(10.0, **tkwargs), ), ) else: return self.train_Yvar def sample_lengthscale( self, dim: int, alpha: float = 0.1, **tkwargs: Any ) -> Tensor: r"""Sample the lengthscale.""" tausq = pyro.sample( "kernel_tausq", pyro.distributions.HalfCauchy(torch.tensor(alpha, **tkwargs)), ) inv_length_sq = pyro.sample( "_kernel_inv_length_sq", pyro.distributions.HalfCauchy(torch.ones(dim, **tkwargs)), ) inv_length_sq = pyro.deterministic( "kernel_inv_length_sq", tausq * inv_length_sq ) lengthscale = pyro.deterministic( "lengthscale", inv_length_sq.rsqrt(), ) return lengthscale def postprocess_mcmc_samples( self, mcmc_samples: Dict[str, Tensor] ) -> Dict[str, Tensor]: r"""Post-process the MCMC samples. This computes the true lengthscales and removes the inverse lengthscales and tausq (global shrinkage). """ inv_length_sq = ( mcmc_samples["kernel_tausq"].unsqueeze(-1) * mcmc_samples["_kernel_inv_length_sq"] ) mcmc_samples["lengthscale"] = inv_length_sq.rsqrt() # Delete `kernel_tausq` and `_kernel_inv_length_sq` since they aren't loaded # into the final model. del mcmc_samples["kernel_tausq"], mcmc_samples["_kernel_inv_length_sq"] return mcmc_samples def load_mcmc_samples( self, mcmc_samples: Dict[str, Tensor] ) -> Tuple[Mean, Kernel, Likelihood]: r"""Load the MCMC samples into the mean_module, covar_module, and likelihood.""" tkwargs = {"device": self.train_X.device, "dtype": self.train_X.dtype} num_mcmc_samples = len(mcmc_samples["mean"]) batch_shape = torch.Size([num_mcmc_samples]) mean_module = ConstantMean(batch_shape=batch_shape).to(**tkwargs) covar_module = ScaleKernel( base_kernel=MaternKernel( ard_num_dims=self.ard_num_dims, batch_shape=batch_shape, ), batch_shape=batch_shape, ).to(**tkwargs) if self.train_Yvar is not None: likelihood = FixedNoiseGaussianLikelihood( # Reshape to shape `num_mcmc_samples x N` noise=self.train_Yvar.squeeze(-1).expand( num_mcmc_samples, len(self.train_Yvar) ), batch_shape=batch_shape, ).to(**tkwargs) else: likelihood = GaussianLikelihood( batch_shape=batch_shape, noise_constraint=GreaterThan(MIN_INFERRED_NOISE_LEVEL), ).to(**tkwargs) likelihood.noise_covar.noise = reshape_and_detach( target=likelihood.noise_covar.noise, new_value=mcmc_samples["noise"].clamp_min(MIN_INFERRED_NOISE_LEVEL), ) covar_module.base_kernel.lengthscale = reshape_and_detach( target=covar_module.base_kernel.lengthscale, new_value=mcmc_samples["lengthscale"], ) covar_module.outputscale = reshape_and_detach( target=covar_module.outputscale, new_value=mcmc_samples["outputscale"], ) mean_module.constant.data = reshape_and_detach( target=mean_module.constant.data, new_value=mcmc_samples["mean"], ) return mean_module, covar_module, likelihood class SaasFullyBayesianSingleTaskGP(ExactGP, BatchedMultiOutputGPyTorchModel): r"""A fully Bayesian single-task GP model with the SAAS prior. This model assumes that the inputs have been normalized to [0, 1]^d and that the output has been standardized to have zero mean and unit variance. You can either normalize and standardize the data before constructing the model or use an `input_transform` and `outcome_transform`. The SAAS model [Eriksson2021saasbo]_ with a Matern-5/2 kernel is used by default. You are expected to use `fit_fully_bayesian_model_nuts` to fit this model as it isn't compatible with `fit_gpytorch_model`. Example: >>> saas_gp = SaasFullyBayesianSingleTaskGP(train_X, train_Y) >>> fit_fully_bayesian_model_nuts(saas_gp) >>> posterior = saas_gp.posterior(test_X) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, pyro_model: Optional[PyroModel] = None, ) -> None: r"""Initialize the fully Bayesian single-task GP model. Args: train_X: Training inputs (n x d) train_Y: Training targets (n x 1) train_Yvar: Observed noise variance (n x 1). Inferred if None. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied in the model's forward pass. pyro_model: Optional `PyroModel`, defaults to `SaasPyroModel`. """ if not ( train_X.ndim == train_Y.ndim == 2 and len(train_X) == len(train_Y) and train_Y.shape[-1] == 1 ): raise ValueError( "Expected train_X to have shape n x d and train_Y to have shape n x 1" ) if train_Yvar is not None: if train_Y.shape != train_Yvar.shape: raise ValueError( "Expected train_Yvar to be None or have the same shape as train_Y" ) with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) self._validate_tensor_args(X=transformed_X, Y=train_Y) validate_input_scaling( train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar ) self._num_outputs = train_Y.shape[-1] self._input_batch_shape = train_X.shape[:-2] if train_Yvar is not None: # Clamp after transforming train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL) X_tf, Y_tf, _ = self._transform_tensor_args(X=train_X, Y=train_Y) super().__init__( train_inputs=X_tf, train_targets=Y_tf, likelihood=GaussianLikelihood() ) self.mean_module = None self.covar_module = None self.likelihood = None if pyro_model is None: pyro_model = SaasPyroModel() pyro_model.set_inputs( train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar ) self.pyro_model = pyro_model if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform def _check_if_fitted(self): r"""Raise an exception if the model hasn't been fitted.""" if self.covar_module is None: raise RuntimeError( "Model has not been fitted. You need to call " "`fit_fully_bayesian_model_nuts` to fit the model." ) @property def median_lengthscale(self) -> Tensor: r"""Median lengthscales across the MCMC samples.""" self._check_if_fitted() lengthscale = self.covar_module.base_kernel.lengthscale.clone() return lengthscale.median(0).values.squeeze(0) @property def num_mcmc_samples(self) -> int: r"""Number of MCMC samples in the model.""" self._check_if_fitted() return len(self.covar_module.outputscale) @property def batch_shape(self) -> torch.Size: r"""Batch shape of the model, equal to the number of MCMC samples. Note that `SaasFullyBayesianSingleTaskGP` does not support batching over input data at this point.""" return torch.Size([self.num_mcmc_samples]) @property def _aug_batch_shape(self) -> torch.Size: r"""The batch shape of the model, augmented to include the output dim.""" aug_batch_shape = self.batch_shape if self.num_outputs > 1: aug_batch_shape += torch.Size([self.num_outputs]) return aug_batch_shape def train(self, mode: bool = True) -> None: r"""Puts the model in `train` mode.""" super().train(mode=mode) if mode: self.mean_module = None self.covar_module = None self.likelihood = None def load_mcmc_samples(self, mcmc_samples: Dict[str, Tensor]) -> None: r"""Load the MCMC hyperparameter samples into the model. This method will be called by `fit_fully_bayesian_model_nuts` when the model has been fitted in order to create a batched SingleTaskGP model. """ ( self.mean_module, self.covar_module, self.likelihood, ) = self.pyro_model.load_mcmc_samples(mcmc_samples=mcmc_samples) def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True): r"""Custom logic for loading the state dict. The standard approach of calling `load_state_dict` currently doesn't play well with the `SaasFullyBayesianSingleTaskGP` since the `mean_module`, `covar_module` and `likelihood` aren't initialized until the model has been fitted. The reason for this is that we don't know the number of MCMC samples until NUTS is called. Given the state dict, we can initialize a new model with some dummy samples and then load the state dict into this model. This currently only works for a `SaasPyroModel` and supporting more Pyro models likely requires moving the model construction logic into the Pyro model itself. """ if not isinstance(self.pyro_model, SaasPyroModel): raise NotImplementedError("load_state_dict only works for SaasPyroModel") raw_mean = state_dict["mean_module.raw_constant"] num_mcmc_samples = len(raw_mean) dim = self.pyro_model.train_X.shape[-1] tkwargs = {"device": raw_mean.device, "dtype": raw_mean.dtype} # Load some dummy samples mcmc_samples = { "mean": torch.ones(num_mcmc_samples, **tkwargs), "lengthscale": torch.ones(num_mcmc_samples, dim, **tkwargs), "outputscale": torch.ones(num_mcmc_samples, **tkwargs), } if self.pyro_model.train_Yvar is None: mcmc_samples["noise"] = torch.ones(num_mcmc_samples, **tkwargs) ( self.mean_module, self.covar_module, self.likelihood, ) = self.pyro_model.load_mcmc_samples(mcmc_samples=mcmc_samples) # Load the actual samples from the state dict super().load_state_dict(state_dict=state_dict, strict=strict) def forward(self, X: Tensor) -> MultivariateNormal: """ Unlike in other classes' `forward` methods, there is no `if self.training` block, because it ought to be unreachable: If `self.train()` has been called, then `self.covar_module` will be None, `check_if_fitted()` will fail, and the rest of this method will not run. """ self._check_if_fitted() x = X.unsqueeze(MCMC_DIM) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) # pyre-ignore[14]: Inconsistent override def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> FullyBayesianPosterior: r"""Computes the posterior over model outputs at the provided points. Args: X: A `(batch_shape) x q x d`-dim Tensor, where `d` is the dimension of the feature space and `q` is the number of points considered jointly. output_indices: A list of indices, corresponding to the outputs over which to compute the posterior (if the model is multi-output). Can be used to speed up computation if only a subset of the model's outputs are required for optimization. If omitted, computes the posterior over all model outputs. observation_noise: If True, add the observation noise from the likelihood to the posterior. If a Tensor, use it directly as the observation noise (must be of shape `(batch_shape) x q x m`). posterior_transform: An optional PosteriorTransform. Returns: A `FullyBayesianPosterior` object. Includes observation noise if specified. """ self._check_if_fitted() posterior = super().posterior( X=X, output_indices=output_indices, observation_noise=observation_noise, posterior_transform=posterior_transform, **kwargs, ) posterior = FullyBayesianPosterior(distribution=posterior.distribution) return posterior
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" References .. [burt2020svgp] David R. Burt and Carl Edward Rasmussen and Mark van der Wilk, Convergence of Sparse Variational Inference in Gaussian Process Regression, Journal of Machine Learning Research, 2020, http://jmlr.org/papers/v21/19-1015.html. .. [hensman2013svgp] James Hensman and Nicolo Fusi and Neil D. Lawrence, Gaussian Processes for Big Data, Proceedings of the 29th Conference on Uncertainty in Artificial Intelligence, 2013, https://arxiv.org/abs/1309.6835. .. [moss2023ipa] Henry B. Moss and Sebastian W. Ober and Victor Picheny, Inducing Point Allocation for Sparse Gaussian Processes in High-Throughput Bayesian Optimization,Proceedings of the 25th International Conference on Artificial Intelligence and Statistics, 2023, https://arxiv.org/pdf/2301.10123.pdf. """ from __future__ import annotations import copy import warnings from typing import Optional, Type, TypeVar, Union import torch from botorch.models.gpytorch import GPyTorchModel from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from botorch.models.utils import validate_input_scaling from botorch.models.utils.gpytorch_modules import ( get_gaussian_likelihood_with_gamma_prior, get_matern_kernel_with_gamma_prior, ) from botorch.models.utils.inducing_point_allocators import ( GreedyVarianceReduction, InducingPointAllocator, ) from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import Kernel from gpytorch.likelihoods import ( GaussianLikelihood, Likelihood, MultitaskGaussianLikelihood, ) from gpytorch.means import ConstantMean, Mean from gpytorch.models import ApproximateGP from gpytorch.utils.memoize import clear_cache_hook from gpytorch.variational import ( _VariationalDistribution, _VariationalStrategy, CholeskyVariationalDistribution, IndependentMultitaskVariationalStrategy, VariationalStrategy, ) from torch import Tensor from torch.nn import Module TApproxModel = TypeVar("TApproxModel", bound="ApproximateGPyTorchModel") class ApproximateGPyTorchModel(GPyTorchModel): r""" Botorch wrapper class for various (variational) approximate GP models in GPyTorch. This can either include stochastic variational GPs (SVGPs) or variational implementations of weight space approximate GPs. """ def __init__( self, model: Optional[ApproximateGP] = None, likelihood: Optional[Likelihood] = None, num_outputs: int = 1, *args, **kwargs, ) -> None: r""" Args: model: Instance of gpytorch.approximate GP models. If omitted, constructs a `_SingleTaskVariationalGP`. likelihood: Instance of a GPyTorch likelihood. If omitted, uses a either a `GaussianLikelihood` (if `num_outputs=1`) or a `MultitaskGaussianLikelihood`(if `num_outputs>1`). num_outputs: Number of outputs expected for the GP model. args: Optional positional arguments passed to the `_SingleTaskVariationalGP` constructor if no model is provided. kwargs: Optional keyword arguments passed to the `_SingleTaskVariationalGP` constructor if no model is provided. """ super().__init__() self.model = ( _SingleTaskVariationalGP(num_outputs=num_outputs, *args, **kwargs) if model is None else model ) if likelihood is None: if num_outputs == 1: self.likelihood = GaussianLikelihood() else: self.likelihood = MultitaskGaussianLikelihood(num_tasks=num_outputs) else: self.likelihood = likelihood self._desired_num_outputs = num_outputs @property def num_outputs(self): return self._desired_num_outputs def eval(self: TApproxModel) -> TApproxModel: r"""Puts the model in `eval` mode.""" return Module.eval(self) def train(self: TApproxModel, mode: bool = True) -> TApproxModel: r"""Put the model in `train` mode. Args: mode: A boolean denoting whether to put in `train` or `eval` mode. If `False`, model is put in `eval` mode. """ return Module.train(self, mode=mode) def posterior( self, X, output_indices=None, observation_noise=False, *args, **kwargs ) -> GPyTorchPosterior: self.eval() # make sure model is in eval mode # input transforms are applied at `posterior` in `eval` mode, and at # `model.forward()` at the training time X = self.transform_inputs(X) # check for the multi-batch case for multi-outputs b/c this will throw # warnings X_ndim = X.ndim if self.num_outputs > 1 and X_ndim > 2: X = X.unsqueeze(-3).repeat(*[1] * (X_ndim - 2), self.num_outputs, 1, 1) dist = self.model(X) if observation_noise: dist = self.likelihood(dist, *args, **kwargs) posterior = GPyTorchPosterior(distribution=dist) if hasattr(self, "outcome_transform"): posterior = self.outcome_transform.untransform_posterior(posterior) return posterior def forward(self, X, *args, **kwargs) -> MultivariateNormal: if self.training: X = self.transform_inputs(X) return self.model(X) class _SingleTaskVariationalGP(ApproximateGP): """ Base class wrapper for a stochastic variational Gaussian Process (SVGP) model [hensman2013svgp]_. Uses by default pivoted Cholesky initialization for allocating inducing points, however, custom inducing point allocators can be provided. """ def __init__( self, train_X: Tensor, train_Y: Optional[Tensor] = None, num_outputs: int = 1, learn_inducing_points=True, covar_module: Optional[Kernel] = None, mean_module: Optional[Mean] = None, variational_distribution: Optional[_VariationalDistribution] = None, variational_strategy: Type[_VariationalStrategy] = VariationalStrategy, inducing_points: Optional[Union[Tensor, int]] = None, inducing_point_allocator: Optional[InducingPointAllocator] = None, ) -> None: r""" Args: train_X: Training inputs (due to the ability of the SVGP to sub-sample this does not have to be all of the training inputs). train_Y: Not used. num_outputs: Number of output responses per input. covar_module: Kernel function. If omitted, uses a `MaternKernel`. mean_module: Mean of GP model. If omitted, uses a `ConstantMean`. variational_distribution: Type of variational distribution to use (default: CholeskyVariationalDistribution), the properties of the variational distribution will encourage scalability or ease of optimization. variational_strategy: Type of variational strategy to use (default: VariationalStrategy). The default setting uses "whitening" of the variational distribution to make training easier. inducing_points: The number or specific locations of the inducing points. inducing_point_allocator: The `InducingPointAllocator` used to initialize the inducing point locations. If omitted, uses `GreedyVarianceReduction`. """ # We use the model subclass wrapper to deal with input / outcome transforms. # The number of outputs will be correct here due to the check in # SingleTaskVariationalGP. input_batch_shape = train_X.shape[:-2] aug_batch_shape = copy.deepcopy(input_batch_shape) if num_outputs > 1: aug_batch_shape += torch.Size((num_outputs,)) self._aug_batch_shape = aug_batch_shape if covar_module is None: covar_module = get_matern_kernel_with_gamma_prior( ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, ).to(train_X) self._subset_batch_dict = { "mean_module.constant": -2, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } if inducing_point_allocator is None: inducing_point_allocator = GreedyVarianceReduction() # initialize inducing points if they are not given if not isinstance(inducing_points, Tensor): if inducing_points is None: # number of inducing points is 25% the number of data points # as a heuristic inducing_points = int(0.25 * train_X.shape[-2]) inducing_points = inducing_point_allocator.allocate_inducing_points( inputs=train_X, covar_module=covar_module, num_inducing=inducing_points, input_batch_shape=input_batch_shape, ) if variational_distribution is None: variational_distribution = CholeskyVariationalDistribution( num_inducing_points=inducing_points.shape[-2], batch_shape=self._aug_batch_shape, ) variational_strategy_instance = variational_strategy( self, inducing_points=inducing_points, variational_distribution=variational_distribution, learn_inducing_locations=learn_inducing_points, ) # wrap variational models in independent multi-task variational strategy if num_outputs > 1: variational_strategy_instance = IndependentMultitaskVariationalStrategy( base_variational_strategy=variational_strategy_instance, num_tasks=num_outputs, task_dim=-1, ) super().__init__(variational_strategy=variational_strategy_instance) self.mean_module = ( ConstantMean(batch_shape=self._aug_batch_shape).to(train_X) if mean_module is None else mean_module ) self.covar_module = covar_module def forward(self, X) -> MultivariateNormal: mean_x = self.mean_module(X) covar_x = self.covar_module(X) latent_dist = MultivariateNormal(mean_x, covar_x) return latent_dist class SingleTaskVariationalGP(ApproximateGPyTorchModel): r"""A single-task variational GP model following [hensman2013svgp]_. By default, the inducing points are initialized though the `GreedyVarianceReduction` of [burt2020svgp]_, which is known to be effective for building globally accurate models. However, custom inducing point allocators designed for specific down-stream tasks can also be provided (see [moss2023ipa]_ for details), e.g. `GreedyImprovementReduction` when the goal is to build a model suitable for standard BO. A single-task variational GP using relatively strong priors on the Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). This model works in batch mode (each batch having its own hyperparameters). When the training observations include multiple outputs, this model will use batching to model outputs independently. However, batches of multi-output models are not supported at this time, if you need to use those, please use a ModelListGP. Use this model if you have a lot of data or if your responses are non-Gaussian. To train this model, you should use gpytorch.mlls.VariationalELBO and not the exact marginal log likelihood. Example: >>> import torch >>> from botorch.models import SingleTaskVariationalGP >>> from gpytorch.mlls import VariationalELBO >>> >>> train_X = torch.rand(20, 2) >>> model = SingleTaskVariationalGP(train_X) >>> mll = VariationalELBO( >>> model.likelihood, model.model, num_data=train_X.shape[-2] >>> ) """ def __init__( self, train_X: Tensor, train_Y: Optional[Tensor] = None, likelihood: Optional[Likelihood] = None, num_outputs: int = 1, learn_inducing_points: bool = True, covar_module: Optional[Kernel] = None, mean_module: Optional[Mean] = None, variational_distribution: Optional[_VariationalDistribution] = None, variational_strategy: Type[_VariationalStrategy] = VariationalStrategy, inducing_points: Optional[Union[Tensor, int]] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, inducing_point_allocator: Optional[InducingPointAllocator] = None, ) -> None: r""" Args: train_X: Training inputs (due to the ability of the SVGP to sub-sample this does not have to be all of the training inputs). train_Y: Training targets (optional). likelihood: Instance of a GPyTorch likelihood. If omitted, uses a either a `GaussianLikelihood` (if `num_outputs=1`) or a `MultitaskGaussianLikelihood`(if `num_outputs>1`). num_outputs: Number of output responses per input (default: 1). covar_module: Kernel function. If omitted, uses a `MaternKernel`. mean_module: Mean of GP model. If omitted, uses a `ConstantMean`. variational_distribution: Type of variational distribution to use (default: CholeskyVariationalDistribution), the properties of the variational distribution will encourage scalability or ease of optimization. variational_strategy: Type of variational strategy to use (default: VariationalStrategy). The default setting uses "whitening" of the variational distribution to make training easier. inducing_points: The number or specific locations of the inducing points. inducing_point_allocator: The `InducingPointAllocator` used to initialize the inducing point locations. If omitted, uses `GreedyVarianceReduction`. """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if train_Y is not None: if outcome_transform is not None: train_Y, _ = outcome_transform(train_Y) self._validate_tensor_args(X=transformed_X, Y=train_Y) validate_input_scaling(train_X=transformed_X, train_Y=train_Y) if train_Y.shape[-1] != num_outputs: num_outputs = train_Y.shape[-1] self._num_outputs = num_outputs self._input_batch_shape = train_X.shape[:-2] aug_batch_shape = copy.deepcopy(self._input_batch_shape) if num_outputs > 1: aug_batch_shape += torch.Size([num_outputs]) self._aug_batch_shape = aug_batch_shape if likelihood is None: if num_outputs == 1: likelihood = get_gaussian_likelihood_with_gamma_prior( batch_shape=self._aug_batch_shape ) else: likelihood = MultitaskGaussianLikelihood(num_tasks=num_outputs) else: self._is_custom_likelihood = True if learn_inducing_points and (inducing_point_allocator is not None): warnings.warn( "After all the effort of specifying an inducing point allocator, " "you probably want to stop the inducing point locations " "being further optimized during the model fit. If so " "then set `learn_inducing_points` to False.", UserWarning, ) if inducing_point_allocator is None: self._inducing_point_allocator = GreedyVarianceReduction() else: self._inducing_point_allocator = inducing_point_allocator model = _SingleTaskVariationalGP( train_X=transformed_X, num_outputs=num_outputs, learn_inducing_points=learn_inducing_points, covar_module=covar_module, mean_module=mean_module, variational_distribution=variational_distribution, variational_strategy=variational_strategy, inducing_points=inducing_points, inducing_point_allocator=self._inducing_point_allocator, ) super().__init__(model=model, likelihood=likelihood, num_outputs=num_outputs) if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform # for model fitting utilities # TODO: make this a flag? self.model.train_inputs = [transformed_X] if train_Y is not None: self.model.train_targets = train_Y.squeeze(-1) self.to(train_X) @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective. For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ return self._input_batch_shape def init_inducing_points( self, inputs: Tensor, ) -> Tensor: r""" Reinitialize the inducing point locations in-place with the current kernel applied to `inputs` through the model's inducing point allocation strategy. The variational distribution and variational strategy caches are reset. Args: inputs: (\*batch_shape, n, d)-dim input data tensor. Returns: (\*batch_shape, m, d)-dim tensor of selected inducing point locations. """ var_strat = self.model.variational_strategy clear_cache_hook(var_strat) if hasattr(var_strat, "base_variational_strategy"): var_strat = var_strat.base_variational_strategy clear_cache_hook(var_strat) with torch.no_grad(): num_inducing = var_strat.inducing_points.size(-2) inducing_points = self._inducing_point_allocator.allocate_inducing_points( inputs=inputs, covar_module=self.model.covar_module, num_inducing=num_inducing, input_batch_shape=self._input_batch_shape, ) var_strat.inducing_points.copy_(inducing_points) var_strat.variational_params_initialized.fill_(0) return inducing_points
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Utilities for converting between different models. """ from __future__ import annotations from copy import deepcopy from typing import Dict, Optional, Set, Tuple import torch from botorch.exceptions import UnsupportedError from botorch.models.gp_regression import FixedNoiseGP, HeteroskedasticSingleTaskGP from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP from botorch.models.gp_regression_mixed import MixedSingleTaskGP from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from torch import Tensor from torch.nn import Module def _get_module(module: Module, name: str) -> Module: """Recursively get a sub-module from a module. Args: module: A `torch.nn.Module`. name: The name of the submodule to return, in the form of a period-delinated string: `sub_module.subsub_module.[...].leaf_module`. Returns: The requested sub-module. Example: >>> gp = SingleTaskGP(train_X, train_Y) >>> noise_prior = _get_module(gp, "likelihood.noise_covar.noise_prior") """ current = module if name != "": for a in name.split("."): current = getattr(current, a) return current def _check_compatibility(models: ModelListGP) -> None: """Check if a ModelListGP can be converted.""" # Check that all submodules are of the same type. for modn, mod in models[0].named_modules(): mcls = mod.__class__ if not all(isinstance(_get_module(m, modn), mcls) for m in models[1:]): raise UnsupportedError( "Sub-modules must be of the same type across models." ) # Check that each model is a BatchedMultiOutputGPyTorchModel. if not all(isinstance(m, BatchedMultiOutputGPyTorchModel) for m in models): raise UnsupportedError( "All models must be of type BatchedMultiOutputGPyTorchModel." ) # TODO: Add support for HeteroskedasticSingleTaskGP. if any(isinstance(m, HeteroskedasticSingleTaskGP) for m in models): raise NotImplementedError( "Conversion of HeteroskedasticSingleTaskGP is currently unsupported." ) # TODO: Add support for custom likelihoods. if any(getattr(m, "_is_custom_likelihood", False) for m in models): raise NotImplementedError( "Conversion of models with custom likelihoods is currently unsupported." ) # TODO: Add support for outcome transforms. if any(getattr(m, "outcome_transform", None) is not None for m in models): raise UnsupportedError( "Conversion of models with outcome transforms is currently unsupported." ) # check that each model is single-output if not all(m._num_outputs == 1 for m in models): raise UnsupportedError("All models must be single-output.") # check that training inputs are the same if not all( torch.equal(ti, tj) for m in models[1:] for ti, tj in zip(models[0].train_inputs, m.train_inputs) ): raise UnsupportedError("training inputs must agree for all sub-models.") # check that there are no batched input transforms default_size = torch.Size([]) for m in models: if hasattr(m, "input_transform"): if ( m.input_transform is not None and len(getattr(m.input_transform, "batch_shape", default_size)) != 0 ): raise UnsupportedError("Batched input_transforms are not supported.") # check that all models have the same input transforms if any(hasattr(m, "input_transform") for m in models): if not all( m.input_transform.equals(models[0].input_transform) for m in models[1:] ): raise UnsupportedError("All models must have the same input_transforms.") def model_list_to_batched(model_list: ModelListGP) -> BatchedMultiOutputGPyTorchModel: """Convert a ModelListGP to a BatchedMultiOutputGPyTorchModel. Args: model_list: The `ModelListGP` to be converted to the appropriate `BatchedMultiOutputGPyTorchModel`. All sub-models must be of the same type and have the shape (batch shape and number of training inputs). Returns: The model converted into a `BatchedMultiOutputGPyTorchModel`. Example: >>> list_gp = ModelListGP(gp1, gp2) >>> batch_gp = model_list_to_batched(list_gp) """ was_training = model_list.training model_list.train() models = model_list.models _check_compatibility(models) # if the list has only one model, we can just return a copy of that if len(models) == 1: return deepcopy(models[0]) # construct inputs train_X = deepcopy(models[0].train_inputs[0]) train_Y = torch.stack([m.train_targets.clone() for m in models], dim=-1) kwargs = {"train_X": train_X, "train_Y": train_Y} if isinstance(models[0], FixedNoiseGP): kwargs["train_Yvar"] = torch.stack( [m.likelihood.noise_covar.noise.clone() for m in models], dim=-1 ) if isinstance(models[0], SingleTaskMultiFidelityGP): init_args = models[0]._init_args if not all( v == m._init_args[k] for m in models[1:] for k, v in init_args.items() ): raise UnsupportedError("All models must have the same fidelity parameters.") kwargs.update(init_args) # add batched kernel, except if the model type is SingleTaskMultiFidelityGP, # which does not have a `covar_module` if not isinstance(models[0], SingleTaskMultiFidelityGP): batch_length = len(models) covar_module = _batched_kernel(models[0].covar_module, batch_length) kwargs["covar_module"] = covar_module # construct the batched GP model input_transform = getattr(models[0], "input_transform", None) batch_gp = models[0].__class__(input_transform=input_transform, **kwargs) adjusted_batch_keys, non_adjusted_batch_keys = _get_adjusted_batch_keys( batch_state_dict=batch_gp.state_dict(), input_transform=input_transform ) input_batch_dims = len(models[0]._input_batch_shape) # ensure scalars agree (TODO: Allow different priors for different outputs) for n in non_adjusted_batch_keys: v0 = _get_module(models[0], n) if not all(torch.equal(_get_module(m, n), v0) for m in models[1:]): raise UnsupportedError("All scalars must have the same value.") # ensure dimensions of all tensors agree for n in adjusted_batch_keys: shape0 = _get_module(models[0], n).shape if not all(_get_module(m, n).shape == shape0 for m in models[1:]): raise UnsupportedError("All tensors must have the same shape.") # now construct the batched state dict non_adjusted_batch_state_dict = { s: p.clone() for s, p in models[0].state_dict().items() if s in non_adjusted_batch_keys } adjusted_batch_state_dict = { t: ( torch.stack( [m.state_dict()[t].clone() for m in models], dim=input_batch_dims ) if "active_dims" not in t else models[0].state_dict()[t].clone() ) for t in adjusted_batch_keys } batch_state_dict = {**non_adjusted_batch_state_dict, **adjusted_batch_state_dict} # load the state dict into the new model batch_gp.load_state_dict(batch_state_dict) return batch_gp.train(mode=was_training) def _batched_kernel(kernel, batch_length: int): """Adds a batch dimension of size `batch_length` to all non-scalar Tensor parameters that govern the kernel function `kernel`. NOTE: prior or constraint parameters are excluded from batching. """ # copy just in case there are non-tensor parameters that are passed by reference kernel = deepcopy(kernel) search_str = "raw_outputscale" for key, attr in kernel.state_dict().items(): if isinstance(attr, Tensor) and ( attr.ndim > 0 or (search_str == key.rpartition(".")[-1]) ): attr = attr.unsqueeze(0).expand(batch_length, *attr.shape).clone() set_attribute(kernel, key, torch.nn.Parameter(attr)) return kernel # two helper functions for `batched_kernel` # like `setattr` and `getattr` for object hierarchies def set_attribute(obj, attr: str, val): """Like `setattr` but works with hierarchical attribute specification. E.g. if obj=Zoo(), and attr="tiger.age", set_attribute(obj, attr, 3), would set the Zoo's tiger's age to three. """ path_to_leaf, _, attr_name = attr.rpartition(".") leaf = get_attribute(obj, path_to_leaf) if path_to_leaf else obj setattr(leaf, attr_name, val) def get_attribute(obj, attr: str): """Like `getattr` but works with hierarchical attribute specification. E.g. if obj=Zoo(), and attr="tiger.age", get_attribute(obj, attr), would return the Zoo's tiger's age. """ attr_names = attr.split(".") while attr_names: obj = getattr(obj, attr_names.pop(0)) return obj def batched_to_model_list(batch_model: BatchedMultiOutputGPyTorchModel) -> ModelListGP: """Convert a BatchedMultiOutputGPyTorchModel to a ModelListGP. Args: batch_model: The `BatchedMultiOutputGPyTorchModel` to be converted to a `ModelListGP`. Returns: The model converted into a `ModelListGP`. Example: >>> train_X = torch.rand(5, 2) >>> train_Y = torch.rand(5, 2) >>> batch_gp = SingleTaskGP(train_X, train_Y) >>> list_gp = batched_to_model_list(batch_gp) """ was_training = batch_model.training batch_model.train() # TODO: Add support for HeteroskedasticSingleTaskGP. if isinstance(batch_model, HeteroskedasticSingleTaskGP): raise NotImplementedError( "Conversion of HeteroskedasticSingleTaskGP is currently not supported." ) if isinstance(batch_model, MixedSingleTaskGP): raise NotImplementedError( "Conversion of MixedSingleTaskGP is currently not supported." ) input_transform = getattr(batch_model, "input_transform", None) outcome_transform = getattr(batch_model, "outcome_transform", None) batch_sd = batch_model.state_dict() adjusted_batch_keys, non_adjusted_batch_keys = _get_adjusted_batch_keys( batch_state_dict=batch_sd, input_transform=input_transform, outcome_transform=outcome_transform, ) input_bdims = len(batch_model._input_batch_shape) models = [] for i in range(batch_model._num_outputs): non_adjusted_batch_sd = { s: batch_sd[s].clone() for s in non_adjusted_batch_keys } adjusted_batch_sd = { t: ( batch_sd[t].select(input_bdims, i).clone() if "active_dims" not in t else batch_sd[t].clone() ) for t in adjusted_batch_keys } sd = {**non_adjusted_batch_sd, **adjusted_batch_sd} kwargs = { "train_X": batch_model.train_inputs[0].select(input_bdims, i).clone(), "train_Y": batch_model.train_targets.select(input_bdims, i) .clone() .unsqueeze(-1), } if isinstance(batch_model, FixedNoiseGP): noise_covar = batch_model.likelihood.noise_covar kwargs["train_Yvar"] = ( noise_covar.noise.select(input_bdims, i).clone().unsqueeze(-1) ) if isinstance(batch_model, SingleTaskMultiFidelityGP): kwargs.update(batch_model._init_args) # NOTE: Adding outcome transform to kwargs to avoid the multiple # values for same kwarg issue with SingleTaskMultiFidelityGP. if outcome_transform is not None: octf = outcome_transform.subset_output(idcs=[i]) kwargs["outcome_transform"] = octf # Update the outcome transform state dict entries. sd = { **sd, **{"outcome_transform." + k: v for k, v in octf.state_dict().items()}, } else: kwargs["outcome_transform"] = None model = batch_model.__class__(input_transform=input_transform, **kwargs) model.load_state_dict(sd) models.append(model) return ModelListGP(*models).train(mode=was_training) def batched_multi_output_to_single_output( batch_mo_model: BatchedMultiOutputGPyTorchModel, ) -> BatchedMultiOutputGPyTorchModel: """Convert a model from batched multi-output to a batched single-output. Note: the underlying GPyTorch GP does not change. The GPyTorch GP's batch_shape (referred to as `_aug_batch_shape`) is still `_input_batch_shape x num_outputs`. The only things that change are the attributes of the BatchedMultiOutputGPyTorchModel that are responsible the internal accounting of the number of outputs: namely, num_outputs, _input_batch_shape, and _aug_batch_shape. Initially for the batched MO models these are: `num_outputs = m`, `_input_batch_shape = train_X.batch_shape`, and `_aug_batch_shape = train_X.batch_shape + torch.Size([num_outputs])`. In the new SO model, these are: `num_outputs = 1`, `_input_batch_shape = train_X.batch_shape + torch.Size([num_outputs])`, and `_aug_batch_shape = train_X.batch_shape + torch.Size([num_outputs])`. This is a (hopefully) temporary measure until multi-output MVNs with independent outputs have better support in GPyTorch (see https://github.com/cornellius-gp/gpytorch/pull/1083). Args: batched_mo_model: The BatchedMultiOutputGPyTorchModel Returns: The model converted into a batch single-output model. Example: >>> train_X = torch.rand(5, 2) >>> train_Y = torch.rand(5, 2) >>> batch_mo_gp = SingleTaskGP(train_X, train_Y) >>> batch_so_gp = batched_multioutput_to_single_output(batch_gp) """ was_training = batch_mo_model.training batch_mo_model.train() # TODO: Add support for HeteroskedasticSingleTaskGP. if isinstance(batch_mo_model, HeteroskedasticSingleTaskGP): raise NotImplementedError( "Conversion of HeteroskedasticSingleTaskGP currently not supported." ) elif not isinstance(batch_mo_model, BatchedMultiOutputGPyTorchModel): raise UnsupportedError("Only BatchedMultiOutputGPyTorchModels are supported.") # TODO: Add support for custom likelihoods. elif getattr(batch_mo_model, "_is_custom_likelihood", False): raise NotImplementedError( "Conversion of models with custom likelihoods is currently unsupported." ) input_transform = getattr(batch_mo_model, "input_transform", None) batch_sd = batch_mo_model.state_dict() # TODO: add support for outcome transforms. if hasattr(batch_mo_model, "outcome_transform"): raise NotImplementedError( "Converting batched multi-output models with outcome transforms " "is not currently supported." ) kwargs = { "train_X": batch_mo_model.train_inputs[0].clone(), "train_Y": batch_mo_model.train_targets.clone().unsqueeze(-1), } if isinstance(batch_mo_model, FixedNoiseGP): noise_covar = batch_mo_model.likelihood.noise_covar kwargs["train_Yvar"] = noise_covar.noise.clone().unsqueeze(-1) if isinstance(batch_mo_model, SingleTaskMultiFidelityGP): kwargs.update(batch_mo_model._init_args) single_outcome_model = batch_mo_model.__class__( input_transform=input_transform, **kwargs ) single_outcome_model.load_state_dict(batch_sd) return single_outcome_model.train(mode=was_training) def _get_adjusted_batch_keys( batch_state_dict: Dict[str, Tensor], input_transform: Optional[InputTransform], outcome_transform: Optional[OutcomeTransform] = None, ) -> Tuple[Set[str], Set[str]]: r"""Group the keys based on whether the value requires batch shape changes. Args: batch_state_dict: The state dict of the batch model. input_transform: The input transform. outcome_transform: The outcome transform. Returns: A two-element tuple containing: - The keys of the parameters/buffers that require a batch shape adjustment. - The keys of the parameters/buffers that do not require a batch shape adjustment. """ # These are the names of the params/buffers that need their batch shape adjusted. adjusted_batch_keys = {n for n, p in batch_state_dict.items() if len(p.shape) > 0} # Don't modify transform buffers, so add them to non-adjusted set and remove # them from tensors. for transform, transform_type in [ (input_transform, "input_transform."), (outcome_transform, "outcome_transform."), ]: if transform is not None: transform_keys = { transform_type + n for n, p in transform.state_dict().items() } adjusted_batch_keys = adjusted_batch_keys - transform_keys # These are the names of the parameters/buffers that don't need their # batch shape adjusted. non_adjusted_batch_keys = set(batch_state_dict) - adjusted_batch_keys return adjusted_batch_keys, non_adjusted_batch_keys
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.models.approximate_gp import ( ApproximateGPyTorchModel, SingleTaskVariationalGP, ) from botorch.models.cost import AffineFidelityCostModel from botorch.models.deterministic import ( AffineDeterministicModel, GenericDeterministicModel, PosteriorMeanModel, ) from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP from botorch.models.gp_regression import ( FixedNoiseGP, HeteroskedasticSingleTaskGP, SingleTaskGP, ) from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP from botorch.models.gp_regression_mixed import MixedSingleTaskGP from botorch.models.higher_order_gp import HigherOrderGP from botorch.models.model import ModelList from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.multitask import ( FixedNoiseMultiTaskGP, KroneckerMultiTaskGP, MultiTaskGP, ) from botorch.models.pairwise_gp import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood __all__ = [ "AffineDeterministicModel", "AffineFidelityCostModel", "ApproximateGPyTorchModel", "FixedNoiseGP", "FixedNoiseMultiTaskGP", "SaasFullyBayesianSingleTaskGP", "SaasFullyBayesianMultiTaskGP", "GenericDeterministicModel", "HeteroskedasticSingleTaskGP", "HigherOrderGP", "KroneckerMultiTaskGP", "MixedSingleTaskGP", "ModelList", "ModelListGP", "MultiTaskGP", "PairwiseGP", "PairwiseLaplaceMarginalLogLikelihood", "PosteriorMeanModel", "SingleTaskGP", "SingleTaskMultiFidelityGP", "SingleTaskVariationalGP", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Preference Learning with Gaussian Process .. [Chu2005preference] Wei Chu, and Zoubin Ghahramani. Preference learning with Gaussian processes. Proceedings of the 22nd international conference on Machine learning. 2005. .. [Brochu2010tutorial] Eric Brochu, Vlad M. Cora, and Nando De Freitas. A tutorial on Bayesian optimization of expensive cost functions, with application to active user modeling and hierarchical reinforcement learning. arXiv preprint arXiv:1012.2599 (2010). """ from __future__ import annotations import warnings from copy import deepcopy from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np import torch from botorch.acquisition.objective import PosteriorTransform from botorch.exceptions import UnsupportedError from botorch.exceptions.warnings import _get_single_precision_warning, InputDataWarning from botorch.models.likelihoods.pairwise import ( PairwiseLikelihood, PairwiseProbitLikelihood, ) from botorch.models.model import FantasizeMixin, Model from botorch.models.transforms.input import InputTransform from botorch.models.utils.assorted import consolidate_duplicates from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.posteriors.posterior import Posterior from gpytorch import settings from gpytorch.constraints import GreaterThan, Interval from gpytorch.distributions.multivariate_normal import MultivariateNormal from gpytorch.kernels.rbf_kernel import RBFKernel from gpytorch.kernels.scale_kernel import ScaleKernel from gpytorch.means.constant_mean import ConstantMean from gpytorch.mlls import MarginalLogLikelihood from gpytorch.models.gp import GP from gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior from gpytorch.priors.torch_priors import GammaPrior from linear_operator.operators import LinearOperator, RootLinearOperator from linear_operator.utils.cholesky import psd_safe_cholesky from linear_operator.utils.errors import NotPSDError from scipy import optimize from torch import float32, float64, Tensor from torch.nn.modules.module import _IncompatibleKeys # Helper functions def _check_strict_input( inputs: Iterable[Tensor], t_inputs: List[Tensor], target_or_inputs: str ): for input_, t_input in zip(inputs, t_inputs or (None,)): for attr in {"shape", "dtype", "device"}: expected_attr = getattr(t_input, attr, None) found_attr = getattr(input_, attr, None) if expected_attr != found_attr: msg = ( "Cannot modify {attr} of {t_or_i} " "(expected {e_attr}, found {f_attr})." ) msg = msg.format( attr=attr, e_attr=expected_attr, f_attr=found_attr, t_or_i=target_or_inputs, ) raise RuntimeError(msg) def _scaled_psd_safe_cholesky( matrix: Tensor, scale: Tensor, jitter: Optional[float] = None ) -> Tensor: r"""scale matrix by 1/outputscale before cholesky for better numerical stability""" matrix = matrix / scale chol = psd_safe_cholesky(matrix, jitter=jitter) chol = chol * scale.sqrt() return chol def _ensure_psd_with_jitter( matrix: Tensor, scale: Union[float, Tensor] = 1.0, jitter: float = 1e-8, max_tries: int = 3, ) -> Tensor: scaled_matrix = matrix / scale new_jitter = 0 for i in range(max_tries): scaled_matrix = scaled_matrix + new_jitter * torch.diag_embed( torch.ones( scaled_matrix.shape[:-1], device=scaled_matrix.device, dtype=scaled_matrix.dtype, ) ) _, info = torch.linalg.cholesky_ex(scaled_matrix) psd = (info == 0).all() if psd: break else: new_jitter = jitter * (10**i) - new_jitter if not psd: raise NotPSDError( "Matrix not positive definite after repeatedly adding jitter " f"up to {jitter * (10**i):.1e}." ) return scaled_matrix * scale # Why we subclass GP even though it provides no functionality: # if this subclassing is removed, we get the following GPyTorch error: # "RuntimeError: All MarginalLogLikelihood objects must be given a GP object as # a model. If you are using a more complicated model involving a GP, pass the # underlying GP object as the model, not a full PyTorch module." class PairwiseGP(Model, GP, FantasizeMixin): r"""Probit GP for preference learning with Laplace approximation A probit-likelihood GP that learns via pairwise comparison data, using a Laplace approximation of the posterior of the estimated utility values. By default it uses a scaled RBF kernel. Implementation is based on [Chu2005preference]_. Also see [Brochu2010tutorial]_ for additional reference. Note that in [Chu2005preference]_ the likelihood of a pairwise comparison is :math:`\left(\frac{f(x_1) - f(x_2)}{\sqrt{2}\sigma}\right)`, i.e. a scale is used in the denominator. To maintain consistency with usage of kernels elsewhere in BoTorch, we instead do not include :math:`\sigma` in the code (implicitly setting it to 1) and use ScaleKernel to scale the function. In the example below, the user/decision maker has stated that they prefer the first item over the second item and the third item over the second item, generating comparisons [0, 1] and [2, 1]. Example: >>> from botorch.models import PairwiseGP >>> import torch >>> datapoints = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> comparisons = torch.Tensor([[0, 1], [2, 1]]) >>> model = PairwiseGP(datapoints, comparisons) """ _buffer_names = [ "consolidated_datapoints", "consolidated_comparisons", "D", "DT", "utility", "covar_chol", "likelihood_hess", "hlcov_eye", "covar", "covar_inv", "unconsolidated_datapoints", "unconsolidated_comparisons", "consolidated_indices", ] def __init__( self, datapoints: Optional[Tensor], comparisons: Optional[Tensor], likelihood: Optional[PairwiseLikelihood] = None, covar_module: Optional[ScaleKernel] = None, input_transform: Optional[InputTransform] = None, *, jitter: float = 1e-6, xtol: Optional[float] = None, consolidate_rtol: float = 0.0, consolidate_atol: float = 1e-4, maxfev: Optional[int] = None, ) -> None: r""" Args: datapoints: Either `None` or a `batch_shape x n x d` tensor of training features. If either `datapoints` or `comparisons` is `None`, construct a prior-only model. comparisons: Either `None` or a `batch_shape x m x 2` tensor of training comparisons; comparisons[i] is a noisy indicator suggesting the utility value of comparisons[i, 0]-th is greater than comparisons[i, 1]-th. If either `comparisons` or `datapoints` is `None`, construct a prior-only model. likelihood: A PairwiseLikelihood. covar_module: Covariance module. input_transform: An input transform that is applied in the model's forward pass. jitter: Value added to diagonal for numerical stability in `psd_safe_cholesky`. xtol: Stopping creteria in scipy.optimize.fsolve used to find f_map in `PairwiseGP._update`. If None, default behavior is handled by `PairwiseGP._update`. consolidate_rtol: `rtol` passed to `consolidate_duplicates`. consolidate_atol: `atol` passed to `consolidate_duplicates`. maxfev: The maximum number of calls to the function in scipy.optimize.fsolve. If None, default behavior is handled by `PairwiseGP._update`. """ super().__init__() # Input data validation if datapoints is not None and datapoints.dtype == torch.float32: warnings.warn( _get_single_precision_warning(str(datapoints.dtype)), category=InputDataWarning, stacklevel=2, ) # Set optional parameters self._jitter = jitter self._xtol = xtol self._consolidate_rtol = consolidate_rtol self._consolidate_atol = consolidate_atol self._maxfev = maxfev if input_transform is not None: input_transform.to(datapoints) # input transformation is applied in set_train_data self.input_transform = input_transform # Compatibility variables with fit_gpytorch_*: Dummy likelihood # Likelihood is tightly tied with this model and # it doesn't make much sense to keep it separate self.likelihood = ( PairwiseProbitLikelihood() if likelihood is None else likelihood ) for key in self._buffer_names: self.register_buffer(key, None) self.train_inputs = [] self.train_targets = None self.utility = None self.pred_cov_fac_need_update = True self.dim = None self.unconsolidated_datapoints = None self.unconsolidated_comparisons = None self.consolidated_datapoints = None self.consolidated_comparisons = None self.consolidated_indices = None # See set_train_data for additional compatibility variables. # Not that the datapoints here are not transformed even if input_transform # is not None to avoid double transformation during model fitting. # self.transform_inputs is called in `forward` self.set_train_data(datapoints, comparisons, update_model=False) # Set hyperparameters # Do not set the batch_shape explicitly so mean_module can operate in both mode # once fsolve used in _update can run in batch mode, we should explicitly set # the bacth shape here self.mean_module = ConstantMean() # Do not optimize constant mean prior for param in self.mean_module.parameters(): param.requires_grad = False # set covariance module # the default outputscale here is only a rule of thumb, meant to keep # estimates away from scale value that would make Phi(f(x)) saturate # at 0 or 1 if covar_module is None: os_lb, os_ub = 1e-2, 1e2 ls_prior = GammaPrior(concentration=2.4, rate=2.7) ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate covar_module = ScaleKernel( RBFKernel( batch_shape=self.batch_shape, ard_num_dims=self.dim, lengthscale_prior=ls_prior, lengthscale_constraint=GreaterThan( lower_bound=1e-4, transform=None, initial_value=ls_prior_mode ), dtype=torch.float64, ), outputscale_prior=SmoothedBoxPrior(a=os_lb, b=os_ub), # make sure we won't get extreme values for the output scale outputscale_constraint=Interval( lower_bound=os_lb * 0.5, upper_bound=os_ub * 2.0, initial_value=1.0, ), dtype=torch.float64, ) if not isinstance(covar_module, ScaleKernel): raise UnsupportedError("PairwiseGP must be used with a ScaleKernel.") self.covar_module = covar_module self._x0 = None # will store temporary results for warm-starting if self.datapoints is not None and self.comparisons is not None: self.to(dtype=self.datapoints.dtype, device=self.datapoints.device) # Find f_map for initial parameters with transformed datapoints transformed_dp = self.transform_inputs(self.datapoints) self._update(transformed_dp) self.to(self.datapoints) def __deepcopy__(self, memo) -> PairwiseGP: attrs = ( "consolidated_datapoints", "consolidated_comparisons", "covar", "covar_inv", "covar_chol", "likelihood_hess", "utility", "hlcov_eye", "unconsolidated_datapoints", "unconsolidated_comparisons", "consolidated_indices", ) if any(getattr(self, attr) is not None for attr in attrs): # Temporarily remove non-leaf tensors so that pytorch allows deepcopy old_attr = {} for attr in attrs: old_attr[attr] = getattr(self, attr) setattr(self, attr, None) new_model = deepcopy(self, memo) # now set things back for attr in attrs: setattr(self, attr, old_attr[attr]) return new_model else: dcp = self.__deepcopy__ # make sure we don't fall into the infinite recursive loop self.__deepcopy__ = None new_model = deepcopy(self, memo) self.__deepcopy__ = dcp return new_model def _has_no_data(self): r"""Return true if the model does not have both datapoints and comparisons""" return ( self.datapoints is None or len(self.datapoints.size()) == 0 or self.comparisons is None ) def _calc_covar(self, X1: Tensor, X2: Tensor) -> Union[Tensor, LinearOperator]: r"""Calculate the covariance matrix given two sets of datapoints""" covar = self.covar_module(X1, X2).to_dense() # making sure covar is PSD when it's a covariance matrix if X1 is X2: scale = self.covar_module.outputscale.unsqueeze(-1).unsqueeze(-1).detach() covar = _ensure_psd_with_jitter( matrix=covar, scale=scale, jitter=self._jitter, ) return covar def _update_covar(self, datapoints: Tensor) -> None: r"""Update values derived from the data and hyperparameters covar, covar_chol, and covar_inv will be of shape batch_shape x n x n Args: datapoints: (Transformed) datapoints for finding f_max """ self.covar = self._calc_covar(datapoints, datapoints) scale = self.covar_module.outputscale.unsqueeze(-1).unsqueeze(-1).detach() self.covar_chol = _scaled_psd_safe_cholesky( matrix=self.covar, scale=scale, jitter=self._jitter, ) self.covar_inv = torch.cholesky_inverse(self.covar_chol) def _prior_mean(self, X: Tensor) -> Union[Tensor, LinearOperator]: r"""Return point prediction using prior only Args: X: A `batch_size x n' x d`-dim Tensor at which to evaluate prior Returns: Prior mean prediction """ return self.mean_module(X) def _prior_predict(self, X: Tensor) -> Tuple[Tensor, Tensor]: r"""Predict utility based on prior info only Args: X: A `batch_size x n' x d`-dim Tensor at which to evaluate prior Returns: pred_mean: predictive mean pred_covar: predictive covariance """ pred_mean = self._prior_mean(X) pred_covar = self._calc_covar(X, X) return pred_mean, pred_covar def _grad_posterior_f( self, utility: Union[Tensor, np.ndarray], datapoints: Tensor, D: Tensor, DT: Tensor, covar_chol: Tensor, covar_inv: Tensor, ret_np: bool = False, ) -> Union[Tensor, np.ndarray]: r"""Compute the gradient of S loss wrt to f/utility in [Chu2005preference]_. For finding f_map, which is negative of the log posterior, i.e., -log(p(f|D)) Derivative of (10) in [Chu2005preference]_. Also see [Brochu2010tutorial]_ page 26. This is needed for estimating f_map. Args: utility: A Tensor of shape `batch_size x n` datapoints: A Tensor of shape `batch_size x n x d` as in self.datapoints D: A Tensor of shape `batch_size x m x n` as in self.D DT: Transpose of D. A Tensor of shape `batch_size x n x m` as in self.DT covar_chol: A Tensor of shape `batch_size x n x n`, as in self.covar_chol covar_inv: A Tensor of shape `batch_size x n x n`, as in self.covar_inv ret_np: return a numpy array if true, otherwise a Tensor """ prior_mean = self._prior_mean(datapoints) if ret_np: utility = torch.tensor(utility, dtype=self.datapoints.dtype) prior_mean = prior_mean.cpu() # NOTE: During the optimization, it can occur that b, p, and g_ are NaNs, though # in the cases that occured during testing, the optimization routine escaped and # terminated successfully without NaNs in the result. b = self.likelihood.negative_log_gradient_sum(utility=utility, D=D) # g_ = covar_inv x (utility - pred_prior) p = (utility - prior_mean).unsqueeze(-1).to(covar_chol) g_ = torch.cholesky_solve(p, covar_chol).squeeze(-1) g = g_ + b if ret_np: return g.cpu().numpy() else: return g def _hess_posterior_f( self, utility: Union[Tensor, np.ndarray], datapoints: Tensor, D: Tensor, DT: Tensor, covar_chol: Tensor, covar_inv: Tensor, ret_np: bool = False, ) -> Union[Tensor, np.ndarray]: r"""Compute the hessian of S loss wrt utility for finding f_map. which is negative of the log posterior, i.e., -log(p(f|D)) Following [Chu2005preference]_ section 2.2.1. This is needed for estimating f_map Args: utility: A Tensor of shape `batch_size x n` datapoints: A Tensor of shape `batch_size x n x d` as in self.datapoints D: A Tensor of shape `batch_size x m x n` as in self.D DT: Transpose of D. A Tensor of shape `batch_size x n x m` as in self.DT covar_chol: A Tensor of shape `batch_size x n x n`, as in self.covar_chol covar_inv: A Tensor of shape `batch_size x n x n`, as in self.covar_inv ret_np: return a numpy array if true, otherwise a Tensor """ if ret_np: utility = torch.tensor(utility, dtype=self.datapoints.dtype) hl = self.likelihood.negative_log_hessian_sum(utility=utility, D=D) hess = hl + covar_inv return hess.numpy() if ret_np else hess def _update_utility_derived_values(self) -> None: r"""Calculate utility-derived values not needed during optimization Using subsitution method for better numerical stability Let `pred_cov_fac = (covar + hl^-1)`, which is needed for calculate predictive covariance = `K - k.T @ pred_cov_fac^-1 @ k` (Also see posterior mode in `forward`) Instead of inverting `pred_cov_fac`, let `hlcov_eye = (hl @ covar + I)` Then we can obtain `pred_cov_fac^-1 @ k` by solving for p in `(hl @ k) p = hlcov_eye` `hlcov_eye p = hl @ k` """ hl = self.likelihood_hess # "C" from page 27, [Brochu2010tutorial]_ hlcov = hl @ self.covar eye = torch.eye( hlcov.size(-1), dtype=self.datapoints.dtype, device=self.datapoints.device ).expand(hlcov.shape) self.hlcov_eye = hlcov + eye self.pred_cov_fac_need_update = False def _update(self, datapoints: Tensor, **kwargs) -> None: r"""Update the model by updating the covar matrix and MAP utility values Update the model by 1. Re-evaluating the covar matrix as the data or hyperparams may have changed 2. Approximating maximum a posteriori of the utility function f using fsolve Should be called after data or hyperparameters are changed to update f_map and related values self._xtol and self._maxfev are passed to fsolve as xtol and maxfev to control stopping criteria Args: datapoints: (transformed) datapoints for finding f_max """ xtol = 1e-6 if self._xtol is None else self._xtol maxfev = 100 if self._maxfev is None else self._maxfev # Using the latest param for covariance before calculating f_map self._update_covar(datapoints) # scipy newton raphson with torch.no_grad(): # warm start init_x0_size = self.batch_shape + torch.Size([self.n]) if self._x0 is None or torch.Size(self._x0.shape) != init_x0_size: sqrt_scale = ( self.covar_module.outputscale.sqrt() .unsqueeze(-1) .detach() .cpu() .numpy() ) # Heuristic intialization using winning count with perturbation # to avoid extreme or unprobable likelihood values win_count = self.D.sum(dim=-2).detach().cpu().numpy() wc_mean, wc_std = ( win_count.mean(axis=-1, keepdims=True), win_count.std(axis=-1, keepdims=True).clip(min=1e-6), ) x0 = (win_count - wc_mean) / wc_std # adding random perturbation to in case get stuck at strange init values x0 = x0 + 0.05 * np.random.standard_normal(init_x0_size) # scale x0 to be on roughly the right scale x0 = x0 * sqrt_scale else: x0 = self._x0 if len(self.batch_shape) > 0: # batch mode, do optimize.fsolve sequentially on CPU # TODO: enable vectorization/parallelization here x0 = x0.reshape(-1, self.n) dp_v = datapoints.view(-1, self.n, self.dim).cpu() D_v = self.D.view(-1, self.m, self.n).cpu() DT_v = self.DT.view(-1, self.n, self.m).cpu() ch_v = self.covar_chol.view(-1, self.n, self.n).cpu() ci_v = self.covar_inv.view(-1, self.n, self.n).cpu() x = np.empty(x0.shape) for i in range(x0.shape[0]): fsolve_args = (dp_v[i], D_v[i], DT_v[i], ch_v[i], ci_v[i], True) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=RuntimeWarning) x[i] = optimize.fsolve( x0=x0[i], func=self._grad_posterior_f, fprime=self._hess_posterior_f, xtol=xtol, maxfev=maxfev, args=fsolve_args, **kwargs, ) x = x.reshape(*init_x0_size) else: # fsolve only works on CPU fsolve_args = ( datapoints.cpu(), self.D.cpu(), self.DT.cpu(), self.covar_chol.cpu(), self.covar_inv.cpu(), True, ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=RuntimeWarning) x = optimize.fsolve( x0=x0, func=self._grad_posterior_f, fprime=self._hess_posterior_f, xtol=xtol, maxfev=maxfev, args=fsolve_args, **kwargs, ) self._x0 = x.copy() # save for warm-starting f = torch.tensor(x, dtype=datapoints.dtype, device=datapoints.device) # To perform hyperparameter optimization, this needs to be recalculated # when calling forward() in order to obtain correct gradients # self.likelihood_hess is updated here is for the rare case where we # do not want to call forward() self.likelihood_hess = self.likelihood.negative_log_hessian_sum( utility=f, D=self.D ) # Lazy update hlcov_eye, which is used in calculating posterior during training self.pred_cov_fac_need_update = True # fill in dummy values for hlcov_eye so that load_state_dict can function hlcov_eye_size = torch.Size((*self.likelihood_hess.shape[:-2], self.n, self.n)) self.hlcov_eye = torch.empty(hlcov_eye_size) # Take two newton step on the posterior MAP point to fill # in gradients for pytorch. Using 2 instead of 1 since empirically sometimes # the first step results in gradients in the order of 1e-7 while the 2nd step # allows it go down further to the order of 1e-12 and stay there. self.utility = self._util_newton_updates( datapoints, f.clone().requires_grad_(True), max_iter=2 ) def _transform_batch_shape(self, X: Tensor, X_new: Tensor) -> Tuple[Tensor, Tensor]: r"""Transform X and X_new into the same shape Transform the batch shape of X to be compatible with `X_new` to calculate the posterior. If X has the same batch size as `X_new`, return it as is. If one is in batch mode and the other one is not, convert both into batch mode. If both are in batch mode, this will only work if X_batch_shape can propagate to X_new_batch_shape Args: X: A `batch_shape x q x d`-dim or `(1 x) q x d`-dim Tensor X_new: A `batch_shape x q x d`-dim Tensor Returns: Transformed X and X_new pair """ X_bs = X.shape[:-2] # X batch shape X_new_bs = X_new.shape[:-2] # X_new batch shape if X_new_bs == X_bs: # if batch shapes match, there's no need to transform # X_new may or may not have batch_shape dimensions return X, X_new elif len(X_new_bs) < len(X_bs): # if X_new has fewer dimension, try to expand it to X's shape return X, X_new.expand(X_bs + X_new.shape[-2:]) else: # if X has fewer dimension, try to expand it to X_new's shape return X.expand(X_new_bs + X.shape[-2:]), X_new def _util_newton_updates(self, dp, x0, max_iter=1, xtol=None) -> Tensor: r"""Make `max_iter` newton updates on utility. This is used in `forward` to calculate and fill in gradient into tensors. Instead of doing utility -= H^-1 @ g, use substition method. See more explanation in _update_utility_derived_values. By default only need to run one iteration just to fill the the gradients. Args: dp: (Transformed) datapoints. x0: A `batch_size x n` dimension tensor, initial values. max_iter: Max number of iterations. xtol: Stop creteria. If `None`, do not stop until finishing `max_iter` updates. """ xtol = float("-Inf") if xtol is None else xtol D, DT, ch, ci = ( self.D, self.DT, self.covar_chol, self.covar_inv, ) covar = self.covar diff = float("Inf") i = 0 x = x0 eye = None while i < max_iter and diff > xtol: hl = self.likelihood.negative_log_hessian_sum(utility=x, D=D) self.likelihood_hess = hl cov_hl = covar @ hl if eye is None: eye = torch.diag_embed( torch.ones( cov_hl.shape[:-1], device=cov_hl.device, dtype=cov_hl.dtype ) ) cov_hl = cov_hl + eye # add 1 to cov_hl g = self._grad_posterior_f(x, dp, D, DT, ch, ci) cov_g = covar @ g.unsqueeze(-1) x_update = torch.linalg.solve(cov_hl, cov_g).squeeze(-1) x_next = x - x_update diff = torch.linalg.norm(x - x_next) x = x_next i += 1 return x def _consolidate_duplicates( self, datapoints: Tensor, comparisons: Tensor ) -> Tuple[Tensor, Tensor]: """Consolidate and cache datapoints and comparisons""" # check if consolidated datapoints/comparisons are cached if ( (datapoints is not self.unconsolidated_datapoints) or (comparisons is not self.unconsolidated_comparisons) or (self.consolidated_datapoints is None) or (self.consolidated_comparisons is None) ): self.unconsolidated_datapoints, self.unconsolidated_comparisons = ( datapoints, comparisons, ) if len(datapoints.shape) > 2 or len(comparisons.shape) > 2: # Do not perform consolidation in batch mode as block design # cannot be guaranteed self.consolidated_datapoints = datapoints self.consolidated_comparisons = comparisons self.consolidated_indices = None else: ( self.consolidated_datapoints, self.consolidated_comparisons, self.consolidated_indices, ) = consolidate_duplicates( datapoints, comparisons, rtol=self._consolidate_rtol, atol=self._consolidate_atol, ) return self.consolidated_datapoints, self.consolidated_comparisons # ============== public APIs ============== @property def datapoints(self) -> Tensor: r"""Alias for consolidated datapoints""" return self.consolidated_datapoints @property def comparisons(self) -> Tensor: r"""Alias for consolidated comparisons""" return self.consolidated_comparisons @property def unconsolidated_utility(self) -> Tensor: r"""Utility of the unconsolidated datapoints""" if self.consolidated_indices is None: # self.consolidated_indices is None in batch mode return self.utility else: return self.utility[self.consolidated_indices] @property def num_outputs(self) -> int: r"""The number of outputs of the model.""" return self._num_outputs @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective, independent of the internal representation of the model (as e.g. in BatchedMultiOutputGPyTorchModel). For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ if self.datapoints is None: # this could happen in prior mode return torch.Size() else: return self.datapoints.shape[:-2] def set_train_data( self, datapoints: Optional[Tensor] = None, comparisons: Optional[Tensor] = None, strict: bool = False, update_model: bool = True, ) -> None: r"""Set datapoints and comparisons and update model properties if needed Args: datapoints: Either `None` or a `batch_shape x n x d` dimension tensor X. If there are input transformations, assume the datapoints are not transformed. If either `datapoints` or `comparisons` is `None`, construct a prior-only model. comparisons: Either `None` or a tensor of size `batch_shape x m x 2`. (i, j) means f_i is preferred over f_j. If either `comparisons` or `datapoints` is `None`, construct a prior-only model. strict: `strict` argument as in gpytorch.models.exact_gp for compatibility when using fit_gpytorch_model with input_transform. update_model: True if we want to refit the model (see _update) after re-setting the data. """ # When datapoints and/or comparisons are None, we are constructing # a prior-only model if datapoints is None or comparisons is None: return # following gpytorch.models.exact_gp.set_train_data if datapoints is not None: if torch.is_tensor(datapoints): inputs = [datapoints] inputs = tuple( input_.unsqueeze(-1) if input_.ndimension() == 1 else input_ for input_ in inputs ) if strict: _check_strict_input(inputs, self.train_inputs, "inputs") datapoints = inputs[0] # Compatibility variables with fit_gpytorch_* # alias for datapoints ("train_inputs") self.train_inputs = inputs if comparisons is not None: if strict: _check_strict_input([comparisons], [self.train_targets], "targets") # convert to long so that it can be used as index and # compatible with Tensor.scatter_ comparisons = comparisons.long() # Compatibility variables with fit_gpytorch_* # alias for comparisons ("train_targets" here) self.train_targets = comparisons # self.datapoints and self.comparisons are being updated here self._consolidate_duplicates(datapoints, comparisons) # Compatibility variables with optimize_acqf self._dtype = self.datapoints.dtype self._num_outputs = 1 # 1 latent value output per observation self.dim = self.datapoints.shape[-1] # feature dimensions self.n = self.datapoints.shape[-2] # num datapoints self.m = self.comparisons.shape[-2] # num pairwise comparisons # D is batch_size x m x n or num_comparison x num_datapoints. # D_k_i is the s_k(x_i) value as in equation (6) in [Chu2005preference]_ # D will usually be very sparse as well # TODO swap out scatter_ so that comparisons could be int instead of long # TODO: make D a sparse matrix once pytorch has better support for # sparse tensors D_size = torch.Size((*(self.batch_shape), self.m, self.n)) self.D = torch.zeros( D_size, dtype=self.datapoints.dtype, device=self.datapoints.device ) comp_view = self.comparisons.view(-1, self.m, 2).long() for i, sub_D in enumerate(self.D.view(-1, self.m, self.n)): sub_D.scatter_(1, comp_view[i, :, [0]], 1) sub_D.scatter_(1, comp_view[i, :, [1]], -1) self.DT = self.D.transpose(-1, -2) if update_model: transformed_dp = self.transform_inputs(self.datapoints) self._update(transformed_dp) self.to(self.datapoints) def load_state_dict( self, state_dict: Dict[str, Tensor], strict: bool = False ) -> _IncompatibleKeys: r"""Removes data related buffers from the `state_dict` and calls `super().load_state_dict` with `strict=False`. Args: state_dict: The state dict. strict: Boolean specifying whether or not given and instance-bound state_dicts should have identical keys. Only implemented for `strict=False` since buffers will filters out when calling `_load_from_state_dict`. Returns: A named tuple `_IncompatibleKeys`, containing the `missing_keys` and `unexpected_keys`. """ if strict: raise UnsupportedError("Passing strict=True is not supported.") return super().load_state_dict(state_dict=state_dict, strict=False) def _load_from_state_dict( self, state_dict: Dict[str, Tensor], prefix: str, local_metadata: Dict[str, Any], strict: bool, missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str], ) -> None: super()._load_from_state_dict( state_dict={ k: v for k, v in state_dict.items() if k not in self._buffer_names }, prefix=prefix, local_metadata=local_metadata, strict=False, missing_keys=missing_keys, unexpected_keys=unexpected_keys, error_msgs=error_msgs, ) def forward(self, datapoints: Tensor) -> MultivariateNormal: r"""Calculate a posterior or prior prediction. During training mode, forward implemented solely for gradient-based hyperparam opt. Essentially what it does is to re-calculate the utility f using its analytical form at f_map so that we are able to obtain gradients of the hyperparameters. Args: datapoints: A `batch_shape x n x d` Tensor, should be the same as self.datapoints during training Returns: A MultivariateNormal object, being one of the followings: 1. Posterior centered at MAP points for training data (training mode) 2. Prior predictions (prior mode) 3. Predictive posterior (eval mode) """ # Training mode: optimizing if self.training: if self._has_no_data(): raise RuntimeError( "datapoints and comparisons cannot be None in training mode. " "Call .eval() for prior predictions, " "or call .set_train_data() to add training data." ) if datapoints is not self.unconsolidated_datapoints: raise RuntimeError("Must train on training data") # We pass in the untransformed datapoints into set_train_data # as we will be setting self.datapoints as the untransformed datapoints # self.transform_inputs will be called inside before calling _update() self.set_train_data( datapoints=datapoints, comparisons=self.unconsolidated_comparisons, update_model=True, ) transformed_dp = self.transform_inputs(self.datapoints) hl = self.likelihood_hess covar = self.covar # Apply matrix inversion lemma on eq. in page 27 of [Brochu2010tutorial]_ # (A + B)^-1 = A^-1 - A^-1 @ (I + BA^-1)^-1 @ BA^-1 # where A = covar_inv, B = hl hl_cov = hl @ covar eye = torch.eye( hl_cov.size(-1), dtype=self.datapoints.dtype, device=self.datapoints.device, ).expand(hl_cov.shape) hl_cov_I = hl_cov + eye # add I to hl_cov train_covar_map = covar - covar @ torch.linalg.solve(hl_cov_I, hl_cov) output_mean, output_covar = self.utility, train_covar_map # Prior mode elif settings.prior_mode.on() or self._has_no_data(): transformed_new_dp = self.transform_inputs(datapoints) # if we don't have any data yet, use prior GP to make predictions output_mean, output_covar = self._prior_predict(transformed_new_dp) # Posterior mode else: transformed_dp = self.transform_inputs(self.datapoints) transformed_new_dp = self.transform_inputs(datapoints).to(transformed_dp) # self.utility might be None if exception was raised and _update # was failed to be called during hyperparameter optimization # procedures (e.g., fit_gpytorch_mll_scipy) if self.utility is None: self._update(transformed_dp) if self.pred_cov_fac_need_update: self._update_utility_derived_values() X, X_new = self._transform_batch_shape(transformed_dp, transformed_new_dp) covar_chol, _ = self._transform_batch_shape(self.covar_chol, X_new) hl, _ = self._transform_batch_shape(self.likelihood_hess, X_new) hlcov_eye, _ = self._transform_batch_shape(self.hlcov_eye, X_new) # otherwise compute predictive mean and covariance covar_xnew_x = self._calc_covar(X_new, X) covar_x_xnew = covar_xnew_x.transpose(-1, -2) covar_xnew = self._calc_covar(X_new, X_new) p = self.utility - self._prior_mean(X) covar_inv_p = torch.cholesky_solve(p.unsqueeze(-1), covar_chol) pred_mean = (covar_xnew_x @ covar_inv_p).squeeze(-1) pred_mean = pred_mean + self._prior_mean(X_new) # [Brochu2010tutorial]_ page 27 # Preictive covariance fatcor: hlcov_eye = (K + C^-1) # fac = (K + C^-1)^-1 @ k = pred_cov_fac_inv @ covar_x_xnew # used substitution method here to calculate fac fac = torch.linalg.solve(hlcov_eye, hl @ covar_x_xnew) pred_covar = covar_xnew - (covar_xnew_x @ fac) output_mean, output_covar = pred_mean, pred_covar scale = self.covar_module.outputscale.unsqueeze(-1).unsqueeze(-1).detach() post = MultivariateNormal( mean=output_mean, # output_covar is sometimes non-PSD # perform a cholesky decomposition to check and amend covariance_matrix=RootLinearOperator( _scaled_psd_safe_cholesky( matrix=output_covar, scale=scale, jitter=self._jitter, ) ), ) return post # ============== botorch.models.model.Model interfaces ============== def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> Posterior: r"""Computes the posterior over model outputs at the provided points. Args: X: A `batch_shape x q x d`-dim Tensor, where `d` is the dimension of the feature space and `q` is the number of points considered jointly. output_indices: As defined in parent Model class, not used for this model. observation_noise: Ignored (since noise is not identifiable from scale in probit models). posterior_transform: An optional PosteriorTransform. Returns: A `Posterior` object, representing joint distributions over `q` points. """ self.eval() # make sure model is in eval mode if output_indices is not None: raise RuntimeError( "output_indices is not None. PairwiseGP should not be a" "multi-output model." ) post = self(X) posterior = GPyTorchPosterior(post) if posterior_transform is not None: return posterior_transform(posterior) else: return posterior def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Model: r"""Condition the model on new observations. Note that unlike other BoTorch models, PairwiseGP requires Y to be pairwise comparisons Args: X: A `batch_shape x n x d` dimension tensor X Y: A tensor of size `batch_shape x m x 2`. (i, j) means f_i is preferred over f_j Returns: A (deepcopied) `Model` object of the same type, representing the original model conditioned on the new observations `(X, Y)`. """ new_model = deepcopy(self) if self._has_no_data(): # If the model previously has no data, set X and Y as the data directly new_model.set_train_data(X, Y, update_model=True) else: # Can only condition on pairwise comparisons instead of the directly # observed values. Raise a RuntimeError if Y is not a tensor presenting # pairwise comparisons if Y.dtype in (float32, float64) or Y.shape[-1] != 2: raise RuntimeError( "Conditioning on non-pairwise comparison observations." ) # Reshaping datapoints and comparisons by batches Y_new_batch_shape = Y.shape[:-2] new_datapoints = self.datapoints.expand( Y_new_batch_shape + self.datapoints.shape[-2:] ) new_comparisons = self.comparisons.expand( Y_new_batch_shape + self.comparisons.shape[-2:] ) # Reshape X since Y may have additional batch dim. from fantasy models X = X.expand(Y_new_batch_shape + X.shape[-2:]) new_datapoints = torch.cat((new_datapoints, X.to(new_datapoints)), dim=-2) shifted_comp = Y.to(new_comparisons) + self.n new_comparisons = torch.cat((new_comparisons, shifted_comp), dim=-2) # TODO: be smart about how we can update covar matrix here new_model.set_train_data(new_datapoints, new_comparisons, update_model=True) return new_model class PairwiseLaplaceMarginalLogLikelihood(MarginalLogLikelihood): r"""Laplace-approximated marginal log likelihood/evidence for PairwiseGP See (12) from [Chu2005preference]_. """ def __init__(self, likelihood, model: GP): """ Args: likelihood: Used as in args to GPyTorch MarginalLogLikelihood model: Used as in args to GPyTorch MarginalLogLikelihood """ super().__init__(likelihood, model) def forward(self, post: Posterior, comp: Tensor) -> Tensor: r"""Calculate approximated log evidence, i.e., log(P(D|theta)) Note that post will be based on the consolidated/deduped datapoints for numerical stability, but comp will still be the unconsolidated comparisons so that it's still compatible with fit_gpytorch_*. Args: post: training posterior distribution from self.model (after consolidation) comp: Comparisons pairs (before consolidation) Returns: The approximated evidence, i.e., the marginal log likelihood """ model = self.model likelihood = self.likelihood if comp is not model.unconsolidated_comparisons: raise RuntimeError("Must train on training data") f_map = post.mean.squeeze(-1) log_likelihood = likelihood.log_p(utility=f_map, D=model.D) neg_log_likelihood_sum = -(torch.sum(log_likelihood, dim=-1)) # 1/2 f_map^T @ covar_inv @ f_map inv_prod = torch.cholesky_solve(f_map.unsqueeze(-1), model.covar_chol) log_prior = 0.5 * (f_map.unsqueeze(-2) @ inv_prod).squeeze(-1).squeeze(-1) log_posterior = neg_log_likelihood_sum + log_prior # log_posterior is the S loss function in [Chu2005preference]_ log_posterior = -log_posterior.clamp(min=0) mll = model.covar @ model.likelihood_hess mll = mll + torch.diag_embed( torch.ones(mll.shape[:-1], device=mll.device, dtype=mll.dtype) ) mll = -0.5 * torch.logdet(mll) mll = mll + log_posterior # Sum up mll first so that when adding parameter prior probs it won't # propagate and double count mll = mll.sum() # Add log probs of priors on the (functions of) parameters for _, module, prior, closure, _ in self.named_priors(): mll = mll.add(prior.log_prob(closure(module)).sum()) return mll
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Gaussian Process Regression models based on GPyTorch models. These models are often a good starting point and are further documented in the tutorials. `SingleTaskGP`, `FixedNoiseGP`, and `HeteroskedasticSingleTaskGP` are all single-task exact GP models, differing in how they treat noise. They use relatively strong priors on the Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). These models all work in batch mode (each batch having its own hyperparameters). When the training observations include multiple outputs, these models use batching to model outputs independently. These models all support multiple outputs. However, as single-task models, `SingleTaskGP`, `FixedNoiseGP`, and `HeteroskedasticSingleTaskGP` should be used only when the outputs are independent and all use the same training data. If outputs are independent and outputs have different training data, use the `ModelListGP`. When modeling correlations between outputs, use a multi-task model like `MultiTaskGP`. """ from __future__ import annotations from typing import Any, List, NoReturn, Optional, Union import torch from botorch import settings from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel from botorch.models.model import FantasizeMixin from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import Log, OutcomeTransform from botorch.models.utils import fantasize as fantasize_flag, validate_input_scaling from botorch.models.utils.gpytorch_modules import ( get_gaussian_likelihood_with_gamma_prior, get_matern_kernel_with_gamma_prior, MIN_INFERRED_NOISE_LEVEL, ) from botorch.sampling.base import MCSampler from gpytorch.constraints.constraints import GreaterThan from gpytorch.distributions.multivariate_normal import MultivariateNormal from gpytorch.likelihoods.gaussian_likelihood import ( _GaussianLikelihoodBase, FixedNoiseGaussianLikelihood, GaussianLikelihood, ) from gpytorch.likelihoods.likelihood import Likelihood from gpytorch.likelihoods.noise_models import HeteroskedasticNoise from gpytorch.means.constant_mean import ConstantMean from gpytorch.means.mean import Mean from gpytorch.mlls.noise_model_added_loss_term import NoiseModelAddedLossTerm from gpytorch.models.exact_gp import ExactGP from gpytorch.module import Module from gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior from torch import Tensor class SingleTaskGP(BatchedMultiOutputGPyTorchModel, ExactGP, FantasizeMixin): r"""A single-task exact GP model. A single-task exact GP using relatively strong priors on the Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). This model works in batch mode (each batch having its own hyperparameters). When the training observations include multiple outputs, this model will use batching to model outputs independently. Use this model when you have independent output(s) and all outputs use the same training data. If outputs are independent and outputs have different training data, use the ModelListGP. When modeling correlations between outputs, use the MultiTaskGP. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> model = SingleTaskGP(train_X, train_Y) """ def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_module: Optional[Module] = None, mean_module: Optional[Mean] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. covar_module: The module computing the covariance (Kernel) matrix. If omitted, use a `MaternKernel`. mean_module: The mean function to be used. If omitted, use a `ConstantMean`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied in the model's forward pass. """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, _ = outcome_transform(train_Y) self._validate_tensor_args(X=transformed_X, Y=train_Y) ignore_X_dims = getattr(self, "_ignore_X_dims_scaling_check", None) validate_input_scaling( train_X=transformed_X, train_Y=train_Y, ignore_X_dims=ignore_X_dims ) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y) if likelihood is None: likelihood = get_gaussian_likelihood_with_gamma_prior( batch_shape=self._aug_batch_shape ) else: self._is_custom_likelihood = True ExactGP.__init__( self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood ) if mean_module is None: mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.mean_module = mean_module if covar_module is None: covar_module = get_matern_kernel_with_gamma_prior( ard_num_dims=transformed_X.shape[-1], batch_shape=self._aug_batch_shape, ) self._subset_batch_dict = { "likelihood.noise_covar.raw_noise": -2, "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } self.covar_module = covar_module # TODO: Allow subsetting of other covar modules if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform self.to(train_X) def forward(self, x: Tensor) -> MultivariateNormal: if self.training: x = self.transform_inputs(x) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class FixedNoiseGP(BatchedMultiOutputGPyTorchModel, ExactGP): r"""A single-task exact GP model using fixed noise levels. A single-task exact GP that uses fixed observation noise levels, differing from `SingleTaskGP` only in that noise levels are provided rather than inferred. This model also uses relatively strong priors on the Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). This model works in batch mode (each batch having its own hyperparameters). An example of a case in which noise levels are known is online experimentation, where noise can be measured using the variability of different observations from the same arm, or provided by outside software. Another use case is simulation optimization, where the evaluation can provide variance estimates, perhaps from bootstrapping. In any case, these noise levels must be provided to `FixedNoiseGP` as `train_Yvar`. `FixedNoiseGP` is also commonly used when the observations are known to be noise-free. Noise-free observations can be modeled using arbitrarily small noise values, such as `train_Yvar=torch.full_like(train_Y, 1e-6)`. `FixedNoiseGP` cannot predict noise levels out of sample. If this is needed, use `HeteroskedasticSingleTaskGP`, which will create another model for the observation noise. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> train_Yvar = torch.full_like(train_Y, 0.2) >>> model = FixedNoiseGP(train_X, train_Y, train_Yvar) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, covar_module: Optional[Module] = None, mean_module: Optional[Mean] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. covar_module: The module computing the covariance (Kernel) matrix. If omitted, use a `MaternKernel`. mean_module: The mean function to be used. If omitted, use a `ConstantMean`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transfrom that is applied in the model's forward pass. """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar) validate_input_scaling( train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar ) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, train_Yvar = self._transform_tensor_args( X=train_X, Y=train_Y, Yvar=train_Yvar ) likelihood = FixedNoiseGaussianLikelihood( noise=train_Yvar, batch_shape=self._aug_batch_shape ) ExactGP.__init__( self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood ) if mean_module is None: mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.mean_module = mean_module if covar_module is None: covar_module = get_matern_kernel_with_gamma_prior( ard_num_dims=transformed_X.shape[-1], batch_shape=self._aug_batch_shape, ) self._subset_batch_dict = { "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } self.covar_module = covar_module # TODO: Allow subsetting of other covar modules if input_transform is not None: self.input_transform = input_transform if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X) def fantasize( self, X: Tensor, sampler: MCSampler, observation_noise: Union[bool, Tensor] = True, **kwargs: Any, ) -> FixedNoiseGP: r"""Construct a fantasy model. Constructs a fantasy model in the following fashion: (1) compute the model posterior at `X` (if `observation_noise=True`, this includes observation noise taken as the mean across the observation noise in the training data. If `observation_noise` is a Tensor, use it directly as the observation noise to add). (2) sample from this posterior (using `sampler`) to generate "fake" observations. (3) condition the model on the new fake observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). sampler: The sampler used for sampling from the posterior at `X`. observation_noise: If True, include the mean across the observation noise in the training data as observation noise in the posterior from which the samples are drawn. If a Tensor, use it directly as the specified measurement noise. Returns: The constructed fantasy model. """ propagate_grads = kwargs.pop("propagate_grads", False) with fantasize_flag(): with settings.propagate_grads(propagate_grads): post_X = self.posterior( X, observation_noise=observation_noise, **kwargs ) Y_fantasized = sampler(post_X) # num_fantasies x batch_shape x n' x m # Use the mean of the previous noise values (TODO: be smarter here). # noise should be batch_shape x q x m when X is batch_shape x q x d, and # Y_fantasized is num_fantasies x batch_shape x q x m. noise_shape = Y_fantasized.shape[1:] noise = self.likelihood.noise.mean().expand(noise_shape) return self.condition_on_observations( X=self.transform_inputs(X), Y=Y_fantasized, noise=noise ) def forward(self, x: Tensor) -> MultivariateNormal: # TODO: reduce redundancy with the 'forward' method of # SingleTaskGP, which is identical if self.training: x = self.transform_inputs(x) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) def subset_output(self, idcs: List[int]) -> BatchedMultiOutputGPyTorchModel: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: The current model, subset to the specified output indices. """ new_model = super().subset_output(idcs=idcs) full_noise = new_model.likelihood.noise_covar.noise new_noise = full_noise[..., idcs if len(idcs) > 1 else idcs[0], :] new_model.likelihood.noise_covar.noise = new_noise return new_model class HeteroskedasticSingleTaskGP(BatchedMultiOutputGPyTorchModel, ExactGP): r"""A single-task exact GP model using a heteroskedastic noise model. This model differs from `SingleTaskGP` in that noise levels are provided rather than inferred, and differs from `FixedNoiseGP` in that it can predict noise levels out of sample, because it internally wraps another GP (a SingleTaskGP) to model the observation noise. Noise levels must be provided to `HeteroskedasticSingleTaskGP` as `train_Yvar`. Examples of cases in which noise levels are known include online experimentation and simulation optimization. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> se = torch.linalg.norm(train_X, dim=1, keepdim=True) >>> train_Yvar = 0.1 + se * torch.rand_like(train_Y) >>> model = HeteroskedasticSingleTaskGP(train_X, train_Y, train_Yvar) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Note that the noise model internally log-transforms the variances, which will happen after this transform is applied. input_transform: An input transfrom that is applied in the model's forward pass. """ if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar) validate_input_scaling(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self._set_dimensions(train_X=train_X, train_Y=train_Y) noise_likelihood = GaussianLikelihood( noise_prior=SmoothedBoxPrior(-3, 5, 0.5, transform=torch.log), batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=1.0 ), ) noise_model = SingleTaskGP( train_X=train_X, train_Y=train_Yvar, likelihood=noise_likelihood, outcome_transform=Log(), input_transform=input_transform, ) likelihood = _GaussianLikelihoodBase(HeteroskedasticNoise(noise_model)) # This is hacky -- this class used to inherit from SingleTaskGP, but it # shouldn't so this is a quick fix to enable getting rid of that # inheritance SingleTaskGP.__init__( # pyre-fixme[6]: Incompatible parameter type self, train_X=train_X, train_Y=train_Y, likelihood=likelihood, input_transform=input_transform, ) self.register_added_loss_term("noise_added_loss") self.update_added_loss_term( "noise_added_loss", NoiseModelAddedLossTerm(noise_model) ) if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X) # pyre-fixme[15]: Inconsistent override def condition_on_observations(self, *_, **__) -> NoReturn: raise NotImplementedError # pyre-fixme[15]: Inconsistent override def subset_output(self, idcs) -> NoReturn: raise NotImplementedError def forward(self, x: Tensor) -> MultivariateNormal: if self.training: x = self.transform_inputs(x) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Abstract base module for all BoTorch models. This module contains `Model`, the abstract base class for all BoTorch models, and `ModelList`, a container for a list of Models. """ from __future__ import annotations import warnings from abc import ABC, abstractmethod from collections import defaultdict from copy import deepcopy from typing import ( Any, Callable, Dict, Hashable, List, Mapping, Optional, Set, TYPE_CHECKING, TypeVar, Union, ) import numpy as np import torch from botorch import settings from botorch.exceptions.errors import BotorchTensorDimensionError, InputDataError from botorch.logging import shape_to_str from botorch.models.utils.assorted import fantasize as fantasize_flag from botorch.posteriors import Posterior, PosteriorList from botorch.sampling.base import MCSampler from botorch.sampling.list_sampler import ListSampler from botorch.utils.datasets import SupervisedDataset from botorch.utils.transforms import is_fully_bayesian from torch import Tensor from torch.nn import Module, ModuleDict, ModuleList if TYPE_CHECKING: from botorch.acquisition.objective import PosteriorTransform # pragma: no cover TFantasizeMixin = TypeVar("TFantasizeMixin", bound="FantasizeMixin") class Model(Module, ABC): r"""Abstract base class for BoTorch models. The `Model` base class cannot be used directly; it only defines an API for other BoTorch models. `Model` subclasses `torch.nn.Module`. While a `Module` is most typically encountered as a representation of a neural network layer, it can be used more generally: see `documentation <https://pytorch.org/tutorials/beginner/examples_nn/polynomial_module.html>`_ on custom NN Modules. `Module` provides several pieces of useful functionality: A `Model`'s attributes of `Tensor` or `Module` type are automatically registered so they can be moved and/or cast with the `to` method, automatically differentiated, and used with CUDA. Args: _has_transformed_inputs: A boolean denoting whether `train_inputs` are currently stored as transformed or not. _original_train_inputs: A Tensor storing the original train inputs for use in `_revert_to_original_inputs`. Note that this is necessary since transform / untransform cycle introduces numerical errors which lead to upstream errors during training. """ # noqa: E501 _has_transformed_inputs: bool = False _original_train_inputs: Optional[Tensor] = None @abstractmethod def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> Posterior: r"""Computes the posterior over model outputs at the provided points. Note: The input transforms should be applied here using `self.transform_inputs(X)` after the `self.eval()` call and before any `model.forward` or `model.likelihood` calls. Args: X: A `b x q x d`-dim Tensor, where `d` is the dimension of the feature space, `q` is the number of points considered jointly, and `b` is the batch dimension. output_indices: A list of indices, corresponding to the outputs over which to compute the posterior (if the model is multi-output). Can be used to speed up computation if only a subset of the model's outputs are required for optimization. If omitted, computes the posterior over all model outputs. observation_noise: If True, add observation noise to the posterior. posterior_transform: An optional PosteriorTransform. Returns: A `Posterior` object, representing a batch of `b` joint distributions over `q` points and `m` outputs each. """ pass # pragma: no cover @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective, independent of the internal representation of the model (as e.g. in BatchedMultiOutputGPyTorchModel). For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ cls_name = self.__class__.__name__ raise NotImplementedError(f"{cls_name} does not define batch_shape property") @property def num_outputs(self) -> int: r"""The number of outputs of the model.""" cls_name = self.__class__.__name__ raise NotImplementedError(f"{cls_name} does not define num_outputs property") def subset_output(self, idcs: List[int]) -> Model: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: A `Model` object of the same type and with the same parameters as the current model, subset to the specified output indices. """ raise NotImplementedError def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Model: r"""Condition the model on new observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). Y: A `batch_shape' x n' x m`-dim Tensor, where `m` is the number of model outputs, `n'` is the number of points per batch, and `batch_shape'` is the batch shape of the observations. `batch_shape'` must be broadcastable to `batch_shape` using standard broadcasting semantics. If `Y` has fewer batch dimensions than `X`, it is assumed that the missing batch dimensions are the same for all `Y`. Returns: A `Model` object of the same type, representing the original model conditioned on the new observations `(X, Y)` (and possibly noise observations passed in via kwargs). """ raise NotImplementedError( f"`condition_on_observations` not implemented for {self.__class__.__name__}" ) @classmethod def construct_inputs( cls, training_data: Union[SupervisedDataset, Dict[Hashable, SupervisedDataset]], **kwargs: Any, ) -> Dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`.""" from botorch.models.utils.parse_training_data import parse_training_data return parse_training_data(cls, training_data, **kwargs) def transform_inputs( self, X: Tensor, input_transform: Optional[Module] = None, ) -> Tensor: r"""Transform inputs. Args: X: A tensor of inputs input_transform: A Module that performs the input transformation. Returns: A tensor of transformed inputs """ if input_transform is not None: input_transform.to(X) return input_transform(X) try: return self.input_transform(X) except AttributeError: return X def _set_transformed_inputs(self) -> None: r"""Update training inputs with transformed inputs.""" if hasattr(self, "input_transform") and not self._has_transformed_inputs: if hasattr(self, "train_inputs"): self._original_train_inputs = self.train_inputs[0] with torch.no_grad(): X_tf = self.input_transform.preprocess_transform( self.train_inputs[0] ) self.set_train_data(X_tf, strict=False) self._has_transformed_inputs = True else: warnings.warn( "Could not update `train_inputs` with transformed inputs " f"since {self.__class__.__name__} does not have a `train_inputs` " "attribute. Make sure that the `input_transform` is applied to " "both the train inputs and test inputs.", RuntimeWarning, ) def _revert_to_original_inputs(self) -> None: r"""Revert training inputs back to original.""" if hasattr(self, "input_transform") and self._has_transformed_inputs: self.set_train_data(self._original_train_inputs, strict=False) self._has_transformed_inputs = False def eval(self) -> Model: r"""Puts the model in `eval` mode and sets the transformed inputs.""" self._set_transformed_inputs() return super().eval() def train(self, mode: bool = True) -> Model: r"""Put the model in `train` mode. Reverts to the original inputs if in `train` mode (`mode=True`) or sets transformed inputs if in `eval` mode (`mode=False`). Args: mode: A boolean denoting whether to put in `train` or `eval` mode. If `False`, model is put in `eval` mode. """ if mode: self._revert_to_original_inputs() else: self._set_transformed_inputs() return super().train(mode=mode) @property def dtypes_of_buffers(self) -> Set[torch.dtype]: return {t.dtype for t in self.buffers() if t is not None} class FantasizeMixin(ABC): """ Mixin to add a `fantasize` method to a `Model`. Example: class BaseModel: def __init__(self, ...): def condition_on_observations(self, ...): def posterior(self, ...): def transform_inputs(self, ...): class ModelThatCanFantasize(BaseModel, FantasizeMixin): def __init__(self, args): super().__init__(args) model = ModelThatCanFantasize(...) model.fantasize(X) """ @abstractmethod def condition_on_observations( self: TFantasizeMixin, X: Tensor, Y: Tensor, **kwargs: Any ) -> TFantasizeMixin: """ Classes that inherit from `FantasizeMixin` must implement a `condition_on_observations` method. """ @abstractmethod def posterior( self, X: Tensor, *args, observation_noise: bool = False, **kwargs: Any, ) -> Posterior: """ Classes that inherit from `FantasizeMixin` must implement a `posterior` method. """ @abstractmethod def transform_inputs( self, X: Tensor, input_transform: Optional[Module] = None, ) -> Tensor: """ Classes that inherit from `FantasizeMixin` must implement a `transform_inputs` method. """ # When Python 3.11 arrives we can start annotating return types like # this as # 'Self', but at this point the verbose 'T...' syntax is needed. def fantasize( self: TFantasizeMixin, # TODO: see if any of these can be imported only if TYPE_CHECKING X: Tensor, sampler: MCSampler, observation_noise: bool = True, **kwargs: Any, ) -> TFantasizeMixin: r"""Construct a fantasy model. Constructs a fantasy model in the following fashion: (1) compute the model posterior at `X` (including observation noise if `observation_noise=True`). (2) sample from this posterior (using `sampler`) to generate "fake" observations. (3) condition the model on the new fake observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). sampler: The sampler used for sampling from the posterior at `X`. observation_noise: If True, include observation noise. kwargs: Will be passed to `model.condition_on_observations` Returns: The constructed fantasy model. """ # if the inputs are empty, expand the inputs if X.shape[-2] == 0: output_shape = ( sampler.sample_shape + X.shape[:-2] + self.batch_shape + torch.Size([0, self.num_outputs]) ) return self.condition_on_observations( X=self.transform_inputs(X), Y=torch.empty(output_shape, dtype=X.dtype, device=X.device), **kwargs, ) propagate_grads = kwargs.pop("propagate_grads", False) with fantasize_flag(): with settings.propagate_grads(propagate_grads): post_X = self.posterior(X, observation_noise=observation_noise) Y_fantasized = sampler(post_X) # num_fantasies x batch_shape x n' x m return self.condition_on_observations( X=self.transform_inputs(X), Y=Y_fantasized, **kwargs ) class ModelList(Model): r"""A multi-output Model represented by a list of independent models. All BoTorch models are acceptable as inputs. The cost of this flexibility is that `ModelList` does not support all methods that may be implemented by its component models. One use case for `ModelList` is combining a regression model and a deterministic model in one multi-output container model, e.g. for cost-aware or multi-objective optimization where one of the outcomes is a deterministic function of the inputs. """ def __init__(self, *models: Model) -> None: r""" Args: *models: A variable number of models. Example: >>> m_1 = SingleTaskGP(train_X, train_Y) >>> m_2 = GenericDeterministicModel(lambda x: x.sum(dim=-1)) >>> m_12 = ModelList(m_1, m_2) >>> m_12.posterior(test_X) """ super().__init__() self.models = ModuleList(models) def _get_group_subset_indices( self, idcs: Optional[List[int]] ) -> Dict[int, List[int]]: r"""Convert global subset indices to indices for the individual models. Args: idcs: A list of indices to which the `ModelList` model is to be subset to. Returns: A dictionary mapping model indices to subset indices of the respective model in the `ModelList`. """ if idcs is None: return {i: None for i in range(len(self.models))} output_sizes = [model.num_outputs for model in self.models] cum_output_sizes = np.cumsum(output_sizes) idcs = [idx % cum_output_sizes[-1] for idx in idcs] group_indices: Dict[int, List[int]] = defaultdict(list) for idx in idcs: grp_idx = int(np.argwhere(idx < cum_output_sizes)[0]) sub_idx = idx - int(np.sum(output_sizes[:grp_idx])) group_indices[grp_idx].append(sub_idx) return group_indices def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[Callable[[PosteriorList], Posterior]] = None, **kwargs: Any, ) -> Posterior: r"""Computes the posterior over model outputs at the provided points. Note: The input transforms should be applied here using `self.transform_inputs(X)` after the `self.eval()` call and before any `model.forward` or `model.likelihood` calls. Args: X: A `b x q x d`-dim Tensor, where `d` is the dimension of the feature space, `q` is the number of points considered jointly, and `b` is the batch dimension. output_indices: A list of indices, corresponding to the outputs over which to compute the posterior (if the model is multi-output). Can be used to speed up computation if only a subset of the model's outputs are required for optimization. If omitted, computes the posterior over all model outputs. observation_noise: If True, add the observation noise from the respective likelihoods to the posterior. If a Tensor of shape `(batch_shape) x q x m`, use it directly as the observation noise (with `observation_noise[...,i]` added to the posterior of the `i`-th model). posterior_transform: An optional PosteriorTransform. Returns: A `Posterior` object, representing a batch of `b` joint distributions over `q` points and `m` outputs each. """ group_indices = self._get_group_subset_indices(idcs=output_indices) posteriors = [] for i, idcs in group_indices.items(): if isinstance(observation_noise, Tensor): if idcs is None: start_idx = sum(m.num_outputs for m in self.models[:i]) end_idx = start_idx + self.models[i].num_outputs idcs = list(range(start_idx, end_idx)) obs_noise = observation_noise[..., idcs] else: obs_noise = observation_noise posteriors.append( self.models[i].posterior( X=X, output_indices=idcs, observation_noise=obs_noise ) ) posterior = PosteriorList(*posteriors) if posterior_transform is not None: posterior = posterior_transform(posterior) return posterior @property def batch_shape(self) -> torch.Size: r"""The batch shape of the model. This is a batch shape from an I/O perspective, independent of the internal representation of the model (as e.g. in BatchedMultiOutputGPyTorchModel). For a model with `m` outputs, a `test_batch_shape x q x d`-shaped input `X` to the `posterior` method returns a Posterior object over an output of shape `broadcast(test_batch_shape, model.batch_shape) x q x m`. """ batch_shape = self.models[0].batch_shape if all(batch_shape == m.batch_shape for m in self.models[1:]): return batch_shape # TODO: Allow broadcasting of model batch shapes raise NotImplementedError( f"`{self.__class__.__name__}.batch_shape` is only supported if all " "constituent models have the same `batch_shape`." ) @property def num_outputs(self) -> int: r"""The number of outputs of the model. Equal to the sum of the number of outputs of the individual models in the ModelList. """ return sum(model.num_outputs for model in self.models) def subset_output(self, idcs: List[int]) -> Model: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Relative to the overall number of outputs of the model. Returns: A `Model` (either a `ModelList` or one of the submodels) with the outputs subset to the indices in `idcs`. Internally, this drops (if single-output) or subsets (if multi-output) the constitutent models and returns them as a `ModelList`. If the result is a single (possibly subset) model from the list, returns this model (instead of forming a degenerate singe-model `ModelList`). For instance, if `m = ModelList(m1, m2)` with `m1` a two-output model and `m2` a single-output model, then `m.subset_output([1]) ` will return the model `m1` subset to its second output. """ group_indices = self._get_group_subset_indices(idcs=idcs) subset_models = [ deepcopy(self.models[grp_idx].subset_output(idcs=sub_idcs)) for grp_idx, sub_idcs in group_indices.items() ] if len(subset_models) == 1: return subset_models[0] return self.__class__(*subset_models) def transform_inputs(self, X: Tensor) -> List[Tensor]: r"""Individually transform the inputs for each model. Args: X: A tensor of inputs. Returns: A list of tensors of transformed inputs. """ transformed_X_list = [] for model in self.models: try: transformed_X_list.append(model.input_transform(X)) except AttributeError: transformed_X_list.append(X) return transformed_X_list def load_state_dict( self, state_dict: Mapping[str, Any], strict: bool = True ) -> None: """Initialize the fully Bayesian models before loading the state dict.""" for i, m in enumerate(self.models): if is_fully_bayesian(m): filtered_dict = { k.replace(f"models.{i}.", ""): v for k, v in state_dict.items() if k.startswith(f"models.{i}.") } m.load_state_dict(filtered_dict) super().load_state_dict(state_dict=state_dict, strict=strict) def fantasize( self, X: Tensor, sampler: MCSampler, observation_noise: bool = True, evaluation_mask: Optional[Tensor] = None, **kwargs: Any, ) -> Model: r"""Construct a fantasy model. Constructs a fantasy model in the following fashion: (1) compute the model posterior at `X` (including observation noise if `observation_noise=True`). (2) sample from this posterior (using `sampler`) to generate "fake" observations. (3) condition the model on the new fake observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). sampler: The sampler used for sampling from the posterior at `X`. If evaluation_mask is not None, this must be a `ListSampler`. observation_noise: If True, include observation noise. evaluation_mask: A `n' x m`-dim tensor of booleans indicating which outputs should be fantasized for a given design. This uses the same evaluation mask for all batches. Returns: The constructed fantasy model. """ if evaluation_mask is not None: if evaluation_mask.ndim != 2 or evaluation_mask.shape != torch.Size( [X.shape[-2], self.num_outputs] ): raise BotorchTensorDimensionError( f"Expected evaluation_mask of shape `{X.shape[0]} " f"x {self.num_outputs}`, but got " f"{shape_to_str(evaluation_mask.shape)}." ) if not isinstance(sampler, ListSampler): raise ValueError("Decoupled fantasization requires a list of samplers.") fant_models = [] X_i = X for i in range(self.num_outputs): # get the inputs to fantasize at for output i if evaluation_mask is not None: mask_i = evaluation_mask[:, i] X_i = X[..., mask_i, :] # TODO (T158701749): implement a QMC DecoupledSampler that draws all # samples from a single Sobol sequence or consider requiring that the # sampling is IID to ensure good coverage. sampler_i = sampler.samplers[i] else: sampler_i = sampler fant_model = self.models[i].fantasize( X=X_i, sampler=sampler_i, observation_noise=observation_noise, **kwargs, ) fant_models.append(fant_model) return self.__class__(*fant_models) class ModelDict(ModuleDict): r"""A lightweight container mapping model names to models.""" def __init__(self, **models: Model) -> None: r"""Initialize a `ModelDict`. Args: models: An arbitrary number of models. Each model can be any type of BoTorch `Model`, including multi-output models and `ModelList`. """ if any(not isinstance(m, Model) for m in models.values()): raise InputDataError( f"Expected all models to be a BoTorch `Model`. Got {models}." ) super().__init__(modules=models)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Ensemble Models: Simple wrappers that allow the usage of ensembles via the BoTorch Model and Posterior APIs. """ from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, List, Optional from botorch.acquisition.objective import PosteriorTransform from botorch.exceptions.errors import UnsupportedError from botorch.models.model import Model from botorch.posteriors.ensemble import EnsemblePosterior from torch import Tensor class EnsembleModel(Model, ABC): r""" Abstract base class for ensemble models. :meta private: """ @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Compute the (ensemble) model output at X. Args: X: A `batch_shape x n x d`-dim input tensor `X`. Returns: A `batch_shape x s x n x m`-dimensional output tensor where `s` is the size of the ensemble. """ pass # pragma: no cover def _forward(self, X: Tensor) -> Tensor: return self.forward(X=X) @property def num_outputs(self) -> int: r"""The number of outputs of the model.""" return self._num_outputs def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> EnsemblePosterior: r"""Compute the ensemble posterior at X. Args: X: A `batch_shape x q x d`-dim input tensor `X`. output_indices: A list of indices, corresponding to the outputs over which to compute the posterior. If omitted, computes the posterior over all model outputs. posterior_transform: An optional PosteriorTransform. Returns: An `EnsemblePosterior` object, representing `batch_shape` joint posteriors over `n` points and the outputs selected by `output_indices`. """ # Apply the input transforms in `eval` mode. self.eval() X = self.transform_inputs(X) # Note: we use a Tensor instance check so that `observation_noise = True` # just gets ignored. This avoids having to do a bunch of case distinctions # when using a ModelList. if isinstance(kwargs.get("observation_noise"), Tensor): # TODO: Consider returning an MVN here instead raise UnsupportedError("Ensemble models do not support observation noise.") values = self._forward(X) # NOTE: The `outcome_transform` `untransform`s the predictions rather than the # `posterior` (as is done in GP models). This is more general since it works # even if the transform doesn't support `untransform_posterior`. if hasattr(self, "outcome_transform"): values, _ = self.outcome_transform.untransform(values) if output_indices is not None: values = values[..., output_indices] posterior = EnsemblePosterior(values=values) if posterior_transform is not None: return posterior_transform(posterior) else: return posterior
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Multi-Task GP models. References .. [Bonilla2007MTGP] E. Bonilla, K. Chai and C. Williams. Multi-task Gaussian Process Prediction. Advances in Neural Information Processing Systems 20, NeurIPS 2007. .. [Swersky2013MTBO] K. Swersky, J. Snoek and R. Adams. Multi-Task Bayesian Optimization. Advances in Neural Information Processing Systems 26, NeurIPS 2013. .. [Doucet2010sampl] A. Doucet. A Note on Efficient Conditional Simulation of Gaussian Distributions. http://www.stats.ox.ac.uk/~doucet/doucet_simulationconditionalgaussian.pdf, Apr 2010. .. [Maddox2021bohdo] W. Maddox, M. Balandat, A. Wilson, and E. Bakshy. Bayesian Optimization with High-Dimensional Outputs. https://arxiv.org/abs/2106.12997, Jun 2021. """ from __future__ import annotations import math import warnings from typing import Any, Dict, List, Optional, Tuple, Union import torch from botorch.acquisition.objective import PosteriorTransform from botorch.models.gpytorch import GPyTorchModel, MultiTaskGPyTorchModel from botorch.models.model import FantasizeMixin from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from botorch.models.utils.gpytorch_modules import ( get_matern_kernel_with_gamma_prior, MIN_INFERRED_NOISE_LEVEL, ) from botorch.posteriors.multitask import MultitaskGPPosterior from botorch.utils.datasets import SupervisedDataset from gpytorch.constraints import GreaterThan from gpytorch.distributions.multitask_multivariate_normal import ( MultitaskMultivariateNormal, ) from gpytorch.distributions.multivariate_normal import MultivariateNormal from gpytorch.kernels.index_kernel import IndexKernel from gpytorch.kernels.matern_kernel import MaternKernel from gpytorch.kernels.multitask_kernel import MultitaskKernel from gpytorch.likelihoods.gaussian_likelihood import ( FixedNoiseGaussianLikelihood, GaussianLikelihood, ) from gpytorch.likelihoods.likelihood import Likelihood from gpytorch.likelihoods.multitask_gaussian_likelihood import ( MultitaskGaussianLikelihood, ) from gpytorch.means import MultitaskMean from gpytorch.means.constant_mean import ConstantMean from gpytorch.models.exact_gp import ExactGP from gpytorch.module import Module from gpytorch.priors.lkj_prior import LKJCovariancePrior from gpytorch.priors.prior import Prior from gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior from gpytorch.priors.torch_priors import GammaPrior from gpytorch.settings import detach_test_caches from gpytorch.utils.errors import CachingError from gpytorch.utils.memoize import cached, pop_from_cache from linear_operator.operators import ( BatchRepeatLinearOperator, CatLinearOperator, DiagLinearOperator, KroneckerProductDiagLinearOperator, KroneckerProductLinearOperator, RootLinearOperator, to_linear_operator, ) from torch import Tensor class MultiTaskGP(ExactGP, MultiTaskGPyTorchModel, FantasizeMixin): r"""Multi-Task exact GP model using an ICM (intrinsic co-regionalization model) kernel. See [Bonilla2007MTGP]_ and [Swersky2013MTBO]_ for a reference on the model and its use in Bayesian optimization. The model can be single-output or multi-output, determined by the `output_tasks`. This model uses relatively strong priors on the base Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). If the `train_Yvar` is None, this model infers the noise level. If you have known observation noise, you can set `train_Yvar` to a tensor containing the noise variance measurements. WARNING: This currently does not support different noise levels for the different tasks. """ def __init__( self, train_X: Tensor, train_Y: Tensor, task_feature: int, train_Yvar: Optional[Tensor] = None, mean_module: Optional[Module] = None, covar_module: Optional[Module] = None, likelihood: Optional[Likelihood] = None, task_covar_prior: Optional[Prior] = None, output_tasks: Optional[List[int]] = None, rank: Optional[int] = None, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r"""Multi-Task GP model using an ICM kernel. Args: train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor of training data. One of the columns should contain the task features (see `task_feature` argument). train_Y: A `n x 1` or `b x n x 1` (batch mode) tensor of training observations. task_feature: The index of the task feature (`-d <= task_feature <= d`). train_Yvar: An optional `n` or `b x n` (batch mode) tensor of observed measurement noise. If None, we infer the noise. Note that the inferred noise is common across all tasks. mean_module: The mean function to be used. Defaults to `ConstantMean`. covar_module: The module for computing the covariance matrix between the non-task features. Defaults to `MaternKernel`. likelihood: A likelihood. The default is selected based on `train_Yvar`. If `train_Yvar` is None, a standard `GaussianLikelihood` with inferred noise level is used. Otherwise, a FixedNoiseGaussianLikelihood is used. output_tasks: A list of task indices for which to compute model outputs for. If omitted, return outputs for all task indices. rank: The rank to be used for the index kernel. If omitted, use a full rank (i.e. number of tasks) kernel. task_covar_prior : A Prior on the task covariance matrix. Must operate on p.s.d. matrices. A common prior for this is the `LKJ` prior. input_transform: An input transform that is applied in the model's forward pass. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Example: >>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2) >>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1) >>> train_X = torch.cat([ >>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1), >>> ]) >>> train_Y = torch.cat(f1(X1), f2(X2)).unsqueeze(-1) >>> model = MultiTaskGP(train_X, train_Y, task_feature=-1) """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar) all_tasks, task_feature, self.num_non_task_features = self.get_all_tasks( transformed_X, task_feature, output_tasks ) self.num_tasks = len(all_tasks) if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(Y=train_Y, Yvar=train_Yvar) # squeeze output dim train_Y = train_Y.squeeze(-1) if output_tasks is None: output_tasks = all_tasks else: if set(output_tasks) - set(all_tasks): raise RuntimeError("All output tasks must be present in input data.") self._output_tasks = output_tasks self._num_outputs = len(output_tasks) # TODO (T41270962): Support task-specific noise levels in likelihood if likelihood is None: if train_Yvar is None: likelihood = GaussianLikelihood(noise_prior=GammaPrior(1.1, 0.05)) else: likelihood = FixedNoiseGaussianLikelihood(noise=train_Yvar.squeeze(-1)) # construct indexer to be used in forward self._task_feature = task_feature self._base_idxr = torch.arange(self.num_non_task_features) self._base_idxr[task_feature:] += 1 # exclude task feature super().__init__( train_inputs=train_X, train_targets=train_Y, likelihood=likelihood ) self.mean_module = mean_module or ConstantMean() if covar_module is None: self.covar_module = get_matern_kernel_with_gamma_prior( ard_num_dims=self.num_non_task_features ) else: self.covar_module = covar_module self._rank = rank if rank is not None else self.num_tasks self.task_covar_module = IndexKernel( num_tasks=self.num_tasks, rank=self._rank, prior=task_covar_prior ) if input_transform is not None: self.input_transform = input_transform if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X) def _split_inputs(self, x: Tensor) -> Tuple[Tensor, Tensor]: r"""Extracts base features and task indices from input data. Args: x: The full input tensor with trailing dimension of size `d + 1`. Should be of float/double data type. Returns: 2-element tuple containing - A `q x d` or `b x q x d` (batch mode) tensor with trailing dimension made up of the `d` non-task-index columns of `x`, arranged in the order as specified by the indexer generated during model instantiation. - A `q` or `b x q` (batch mode) tensor of long data type containing the task indices. """ batch_shape, d = x.shape[:-2], x.shape[-1] x_basic = x[..., self._base_idxr].view(batch_shape + torch.Size([-1, d - 1])) task_idcs = ( x[..., self._task_feature] .view(batch_shape + torch.Size([-1, 1])) .to(dtype=torch.long) ) return x_basic, task_idcs def forward(self, x: Tensor) -> MultivariateNormal: if self.training: x = self.transform_inputs(x) x_basic, task_idcs = self._split_inputs(x) # Compute base mean and covariance mean_x = self.mean_module(x_basic) covar_x = self.covar_module(x_basic) # Compute task covariances covar_i = self.task_covar_module(task_idcs) # Combine the two in an ICM fashion covar = covar_x.mul(covar_i) return MultivariateNormal(mean_x, covar) @classmethod def get_all_tasks( cls, train_X: Tensor, task_feature: int, output_tasks: Optional[List[int]] = None, ) -> Tuple[List[int], int, int]: if train_X.ndim != 2: # Currently, batch mode MTGPs are blocked upstream in GPyTorch raise ValueError(f"Unsupported shape {train_X.shape} for train_X.") d = train_X.shape[-1] - 1 if not (-d <= task_feature <= d): raise ValueError(f"Must have that -{d} <= task_feature <= {d}") task_feature = task_feature % (d + 1) all_tasks = train_X[:, task_feature].unique().to(dtype=torch.long).tolist() return all_tasks, task_feature, d @classmethod def construct_inputs( cls, training_data: Dict[str, SupervisedDataset], task_feature: int, output_tasks: Optional[List[int]] = None, task_covar_prior: Optional[Prior] = None, prior_config: Optional[dict] = None, rank: Optional[int] = None, **kwargs, ) -> Dict[str, Any]: r"""Construct `Model` keyword arguments from dictionary of `SupervisedDataset`. Args: training_data: Dictionary of `SupervisedDataset`. task_feature: Column index of embedded task indicator features. For details, see `parse_training_data`. output_tasks: A list of task indices for which to compute model outputs for. If omitted, return outputs for all task indices. task_covar_prior: A GPyTorch `Prior` object to use as prior on the cross-task covariance matrix, prior_config: Configuration for inter-task covariance prior. Should only be used if `task_covar_prior` is not passed directly. Must contain `use_LKJ_prior` indicator and should contain float value `eta`. rank: The rank of the cross-task covariance matrix. """ if task_covar_prior is not None and prior_config is not None: raise ValueError( "Only one of `task_covar_prior` and `prior_config` arguments expected." ) if prior_config is not None: if not prior_config.get("use_LKJ_prior"): raise ValueError("Currently only config for LKJ prior is supported.") num_tasks = len(training_data) sd_prior = GammaPrior(1.0, 0.15) sd_prior._event_shape = torch.Size([num_tasks]) eta = prior_config.get("eta", 0.5) if not isinstance(eta, float) and not isinstance(eta, int): raise ValueError(f"eta must be a real number, your eta was {eta}.") task_covar_prior = LKJCovariancePrior(num_tasks, eta, sd_prior) base_inputs = super().construct_inputs( training_data=training_data, task_feature=task_feature, **kwargs ) return { **base_inputs, "task_feature": task_feature, "output_tasks": output_tasks, "task_covar_prior": task_covar_prior, "rank": rank, } class FixedNoiseMultiTaskGP(MultiTaskGP): r"""Multi-Task GP model using an ICM kernel, with known observation noise. DEPRECATED: Please use `MultiTaskGP` with `train_Yvar` instead. """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, task_feature: int, covar_module: Optional[Module] = None, task_covar_prior: Optional[Prior] = None, output_tasks: Optional[List[int]] = None, rank: Optional[int] = None, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r""" Args: train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor of training data. One of the columns should contain the task features (see `task_feature` argument). train_Y: A `n x 1` or `b x n x 1` (batch mode) tensor of training observations. train_Yvar: A `n` or `b x n` (batch mode) tensor of observed measurement noise. task_feature: The index of the task feature (`-d <= task_feature <= d`). task_covar_prior : A Prior on the task covariance matrix. Must operate on p.s.d. matrices. A common prior for this is the `LKJ` prior. output_tasks: A list of task indices for which to compute model outputs for. If omitted, return outputs for all task indices. rank: The rank to be used for the index kernel. If omitted, use a full rank (i.e. number of tasks) kernel. input_transform: An input transform that is applied in the model's forward pass. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Example: >>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2) >>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1) >>> train_X = torch.cat([ >>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1), >>> ], dim=0) >>> train_Y = torch.cat(f1(X1), f2(X2)) >>> train_Yvar = 0.1 + 0.1 * torch.rand_like(train_Y) >>> model = FixedNoiseMultiTaskGP(train_X, train_Y, train_Yvar, -1) """ warnings.warn( "`FixedNoiseMultiTaskGP` has been deprecated and will be removed in a " "future release. Please use the `MultiTaskGP` model instead. " "When `train_Yvar` is specified, `MultiTaskGP` behaves the same " "as the `FixedNoiseMultiTaskGP`.", DeprecationWarning, ) super().__init__( train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar, covar_module=covar_module, task_feature=task_feature, output_tasks=output_tasks, rank=rank, task_covar_prior=task_covar_prior, input_transform=input_transform, outcome_transform=outcome_transform, ) class KroneckerMultiTaskGP(ExactGP, GPyTorchModel, FantasizeMixin): """Multi-task GP with Kronecker structure, using an ICM kernel. This model assumes the "block design" case, i.e., it requires that all tasks are observed at all data points. For posterior sampling, this model uses Matheron's rule [Doucet2010sampl] to compute the posterior over all tasks as in [Maddox2021bohdo] by exploiting Kronecker structure. When a multi-fidelity model has Kronecker structure, this means there is one covariance kernel over the fidelity features (call it `K_f`) and another over the rest of the input parameters (call it `K_i`), and the resulting covariance across inputs and fidelities is given by the Kronecker product of the two covariance matrices. This is equivalent to saying the covariance between two input and feature pairs is given by K((parameter_1, fidelity_1), (parameter_2, fidelity_2)) = K_f(fidelity_1, fidelity_2) * K_i(parameter_1, parameter_2). Then the covariance matrix of `n_i` parameters and `n_f` fidelities can be codified as a Kronecker product of an `n_i x n_i` matrix and an `n_f x n_f` matrix, which is far more parsimonious than specifying the whole `(n_i * n_f) x (n_i * n_f)` covariance matrix. Example: >>> train_X = torch.rand(10, 2) >>> train_Y = torch.cat([f_1(X), f_2(X)], dim=-1) >>> model = KroneckerMultiTaskGP(train_X, train_Y) """ def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[MultitaskGaussianLikelihood] = None, data_covar_module: Optional[Module] = None, task_covar_prior: Optional[Prior] = None, rank: Optional[int] = None, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, **kwargs: Any, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. likelihood: A `MultitaskGaussianLikelihood`. If omitted, uses a `MultitaskGaussianLikelihood` with a `GammaPrior(1.1, 0.05)` noise prior. data_covar_module: The module computing the covariance (Kernel) matrix in data space. If omitted, use a `MaternKernel`. task_covar_prior : A Prior on the task covariance matrix. Must operate on p.s.d. matrices. A common prior for this is the `LKJ` prior. If omitted, uses `LKJCovariancePrior` with `eta` parameter as specified in the keyword arguments (if not specified, use `eta=1.5`). rank: The rank of the ICM kernel. If omitted, use a full rank kernel. kwargs: Additional arguments to override default settings of priors, including: - eta: The eta parameter on the default LKJ task_covar_prior. A value of 1.0 is uninformative, values <1.0 favor stronger correlations (in magnitude), correlations vanish as eta -> inf. - sd_prior: A scalar prior over nonnegative numbers, which is used for the default LKJCovariancePrior task_covar_prior. - likelihood_rank: The rank of the task covariance matrix to fit. Defaults to 0 (which corresponds to a diagonal covariance matrix). """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, _ = outcome_transform(train_Y) self._validate_tensor_args(X=transformed_X, Y=train_Y) self._num_outputs = train_Y.shape[-1] batch_shape, ard_num_dims = train_X.shape[:-2], train_X.shape[-1] num_tasks = train_Y.shape[-1] if rank is None: rank = num_tasks if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = MultitaskGaussianLikelihood( num_tasks=num_tasks, batch_shape=batch_shape, noise_prior=noise_prior, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), rank=kwargs.get("likelihood_rank", 0), ) if task_covar_prior is None: task_covar_prior = LKJCovariancePrior( n=num_tasks, eta=torch.tensor(kwargs.get("eta", 1.5)).to(train_X), sd_prior=kwargs.get( "sd_prior", SmoothedBoxPrior(math.exp(-6), math.exp(1.25), 0.05), ), ) super().__init__(train_X, train_Y, likelihood) self.mean_module = MultitaskMean( base_means=ConstantMean(batch_shape=batch_shape), num_tasks=num_tasks ) if data_covar_module is None: data_covar_module = MaternKernel( nu=2.5, ard_num_dims=ard_num_dims, lengthscale_prior=GammaPrior(3.0, 6.0), batch_shape=batch_shape, ) else: data_covar_module = data_covar_module self.covar_module = MultitaskKernel( data_covar_module=data_covar_module, num_tasks=num_tasks, rank=rank, batch_shape=batch_shape, task_covar_prior=task_covar_prior, ) if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform self.to(train_X) def forward(self, X: Tensor) -> MultitaskMultivariateNormal: if self.training: X = self.transform_inputs(X) mean_x = self.mean_module(X) covar_x = self.covar_module(X) return MultitaskMultivariateNormal(mean_x, covar_x) @property def _task_covar_matrix(self): res = self.covar_module.task_covar_module.covar_matrix if detach_test_caches.on(): res = res.detach() return res @property @cached(name="train_full_covar") def train_full_covar(self): train_x = self.transform_inputs(self.train_inputs[0]) # construct Kxx \otimes Ktt train_full_covar = self.covar_module(train_x).evaluate_kernel() if detach_test_caches.on(): train_full_covar = train_full_covar.detach() return train_full_covar @property @cached(name="predictive_mean_cache") def predictive_mean_cache(self): train_x = self.transform_inputs(self.train_inputs[0]) train_noise = self.likelihood._shaped_noise_covar(train_x.shape) if detach_test_caches.on(): train_noise = train_noise.detach() train_diff = self.train_targets - self.mean_module(train_x) train_solve = (self.train_full_covar + train_noise).solve( train_diff.reshape(*train_diff.shape[:-2], -1) ) if detach_test_caches.on(): train_solve = train_solve.detach() return train_solve def posterior( self, X: Tensor, output_indices: Optional[List[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> MultitaskGPPosterior: self.eval() if posterior_transform is not None: # this could be very costly, disallow for now raise NotImplementedError( "Posterior transforms currently not supported for " f"{self.__class__.__name__}" ) X = self.transform_inputs(X) train_x = self.transform_inputs(self.train_inputs[0]) # construct Ktt task_covar = self._task_covar_matrix task_rootlt = self._task_covar_matrix.root_decomposition( method="diagonalization" ) task_root = task_rootlt.root if task_covar.batch_shape != X.shape[:-2]: task_covar = BatchRepeatLinearOperator( task_covar, batch_repeat=X.shape[:-2] ) task_root = BatchRepeatLinearOperator( to_linear_operator(task_root), batch_repeat=X.shape[:-2] ) task_covar_rootlt = RootLinearOperator(task_root) # construct RR' \approx Kxx data_data_covar = self.train_full_covar.linear_ops[0] # populate the diagonalziation caches for the root and inverse root # decomposition data_data_evals, data_data_evecs = data_data_covar.diagonalization() # pad the eigenvalue and eigenvectors with zeros if we are using lanczos if data_data_evecs.shape[-1] < data_data_evecs.shape[-2]: cols_to_add = data_data_evecs.shape[-2] - data_data_evecs.shape[-1] zero_evecs = torch.zeros( *data_data_evecs.shape[:-1], cols_to_add, dtype=data_data_evals.dtype, device=data_data_evals.device, ) zero_evals = torch.zeros( *data_data_evecs.shape[:-2], cols_to_add, dtype=data_data_evals.dtype, device=data_data_evals.device, ) data_data_evecs = CatLinearOperator( data_data_evecs, to_linear_operator(zero_evecs), dim=-1, output_device=data_data_evals.device, ) data_data_evals = torch.cat((data_data_evals, zero_evals), dim=-1) # construct K_{xt, x} test_data_covar = self.covar_module.data_covar_module(X, train_x) # construct K_{xt, xt} test_test_covar = self.covar_module.data_covar_module(X) # now update root so that \tilde{R}\tilde{R}' \approx K_{(x,xt), (x,xt)} # cloning preserves the gradient history updated_linear_op = data_data_covar.cat_rows( cross_mat=test_data_covar.clone(), new_mat=test_test_covar, method="diagonalization", ) updated_root = updated_linear_op.root_decomposition().root # occasionally, there's device errors so enforce this comes out right updated_root = updated_root.to(data_data_covar.device) # build a root decomposition of the joint train/test covariance matrix # construct (\tilde{R} \otimes M)(\tilde{R} \otimes M)' \approx # (K_{(x,xt), (x,xt)} \otimes Ktt) joint_covar = RootLinearOperator( KroneckerProductLinearOperator( updated_root, task_covar_rootlt.root.detach() ) ) # construct K_{xt, x} \otimes Ktt test_obs_kernel = KroneckerProductLinearOperator(test_data_covar, task_covar) # collect y - \mu(x) and \mu(X) train_diff = self.train_targets - self.mean_module(train_x) if detach_test_caches.on(): train_diff = train_diff.detach() test_mean = self.mean_module(X) train_noise = self.likelihood._shaped_noise_covar(train_x.shape) diagonal_noise = isinstance(train_noise, DiagLinearOperator) if detach_test_caches.on(): train_noise = train_noise.detach() test_noise = ( self.likelihood._shaped_noise_covar(X.shape) if observation_noise else None ) # predictive mean and variance for the mvn # first the predictive mean pred_mean = ( test_obs_kernel.matmul(self.predictive_mean_cache).reshape_as(test_mean) + test_mean ) # next the predictive variance, assume diagonal noise test_var_term = KroneckerProductLinearOperator( test_test_covar, task_covar ).diagonal() if diagonal_noise: task_evals, task_evecs = self._task_covar_matrix.diagonalization() # TODO: make this be the default KPMatmulLT diagonal method in gpytorch full_data_inv_evals = ( KroneckerProductDiagLinearOperator( DiagLinearOperator(data_data_evals), DiagLinearOperator(task_evals) ) + train_noise ).inverse() test_train_hadamard = KroneckerProductLinearOperator( test_data_covar.matmul(data_data_evecs).to_dense() ** 2, task_covar.matmul(task_evecs).to_dense() ** 2, ) data_var_term = test_train_hadamard.matmul(full_data_inv_evals).sum(dim=-1) else: # if non-diagonal noise (but still kronecker structured), we have to pull # across the noise because the inverse is not closed form # should be a kronecker lt, R = \Sigma_X^{-1/2} \kron \Sigma_T^{-1/2} # TODO: enforce the diagonalization to return a KPLT for all shapes in # gpytorch or dense linear algebra for small shapes data_noise, task_noise = train_noise.linear_ops data_noise_root = data_noise.root_inv_decomposition( method="diagonalization" ) task_noise_root = task_noise.root_inv_decomposition( method="diagonalization" ) # ultimately we need to compute the diagonal of # (K_{x* X} \kron K_T)(K_{XX} \kron K_T + \Sigma_X \kron \Sigma_T)^{-1} # (K_{x* X} \kron K_T)^T # = (K_{x* X} \Sigma_X^{-1/2} Q_R)(\Lambda_R + I)^{-1} # (K_{x* X} \Sigma_X^{-1/2} Q_R)^T # where R = (\Sigma_X^{-1/2T}K_{XX}\Sigma_X^{-1/2} \kron # \Sigma_T^{-1/2T}K_{T}\Sigma_T^{-1/2}) # first we construct the components of R's eigen-decomposition # TODO: make this be the default KPMatmulLT diagonal method in gpytorch whitened_data_covar = ( data_noise_root.transpose(-1, -2) .matmul(data_data_covar) .matmul(data_noise_root) ) w_data_evals, w_data_evecs = whitened_data_covar.diagonalization() whitened_task_covar = ( task_noise_root.transpose(-1, -2) .matmul(self._task_covar_matrix) .matmul(task_noise_root) ) w_task_evals, w_task_evecs = whitened_task_covar.diagonalization() # we add one to the eigenvalues as above (not just for stability) full_data_inv_evals = ( KroneckerProductDiagLinearOperator( DiagLinearOperator(w_data_evals), DiagLinearOperator(w_task_evals) ) .add_jitter(1.0) .inverse() ) test_data_comp = ( test_data_covar.matmul(data_noise_root).matmul(w_data_evecs).to_dense() ** 2 ) task_comp = ( task_covar.matmul(task_noise_root).matmul(w_task_evecs).to_dense() ** 2 ) test_train_hadamard = KroneckerProductLinearOperator( test_data_comp, task_comp ) data_var_term = test_train_hadamard.matmul(full_data_inv_evals).sum(dim=-1) pred_variance = test_var_term - data_var_term specialized_mvn = MultitaskMultivariateNormal( pred_mean, DiagLinearOperator(pred_variance) ) if observation_noise: specialized_mvn = self.likelihood(specialized_mvn) posterior = MultitaskGPPosterior( distribution=specialized_mvn, joint_covariance_matrix=joint_covar, test_train_covar=test_obs_kernel, train_diff=train_diff, test_mean=test_mean, train_train_covar=self.train_full_covar, train_noise=train_noise, test_noise=test_noise, ) if hasattr(self, "outcome_transform"): posterior = self.outcome_transform.untransform_posterior(posterior) return posterior def train(self, val=True, *args, **kwargs): if val: fixed_cache_names = ["data_data_roots", "train_full_covar", "task_root"] for name in fixed_cache_names: try: pop_from_cache(self, name) except CachingError: pass return super().train(val, *args, **kwargs)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Cost models to be used with multi-fidelity optimization. Cost are useful for defining known cost functions when the cost of an evaluation is heterogeneous in fidelity. For a full worked example, see the `tutorial <https://botorch.org/tutorials/multi_fidelity_bo>`_ on continuous multi-fidelity Bayesian Optimization. """ from __future__ import annotations from typing import Dict, Optional import torch from botorch.models.deterministic import DeterministicModel from torch import Tensor class AffineFidelityCostModel(DeterministicModel): r"""Deterministic, affine cost model operating on fidelity parameters. For each (q-batch) element of a candidate set `X`, this module computes a cost of the form cost = fixed_cost + sum_j weights[j] * X[fidelity_dims[j]] For a full worked example, see the `tutorial <https://botorch.org/tutorials/multi_fidelity_bo>`_ on continuous multi-fidelity Bayesian Optimization. Example: >>> from botorch.models import AffineFidelityCostModel >>> from botorch.acquisition.cost_aware import InverseCostWeightedUtility >>> cost_model = AffineFidelityCostModel( >>> fidelity_weights={6: 1.0}, fixed_cost=5.0 >>> ) >>> cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model) """ def __init__( self, fidelity_weights: Optional[Dict[int, float]] = None, fixed_cost: float = 0.01, ) -> None: r""" Args: fidelity_weights: A dictionary mapping a subset of columns of `X` (the fidelity parameters) to its associated weight in the affine cost expression. If omitted, assumes that the last column of `X` is the fidelity parameter with a weight of 1.0. fixed_cost: The fixed cost of running a single candidate point (i.e. an element of a q-batch). """ if fidelity_weights is None: fidelity_weights = {-1: 1.0} super().__init__() self.fidelity_dims = sorted(fidelity_weights) self.fixed_cost = fixed_cost weights = torch.tensor([fidelity_weights[i] for i in self.fidelity_dims]) self.register_buffer("weights", weights) self._num_outputs = 1 def forward(self, X: Tensor) -> Tensor: r"""Evaluate the cost on a candidate set X. Computes a cost of the form cost = fixed_cost + sum_j weights[j] * X[fidelity_dims[j]] for each element of the q-batch Args: X: A `batch_shape x q x d'`-dim tensor of candidate points. Returns: A `batch_shape x q x 1`-dim tensor of costs. """ # TODO: Consider different aggregation (i.e. max) across q-batch lin_cost = torch.einsum( "...f,f", X[..., self.fidelity_dims], self.weights.to(X) ) return self.fixed_cost + lin_cost.unsqueeze(-1)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Model List GP Regression models. """ from __future__ import annotations from copy import deepcopy from typing import Any, List from botorch.exceptions.errors import BotorchTensorDimensionError from botorch.models.gpytorch import GPyTorchModel, ModelListGPyTorchModel from botorch.models.model import FantasizeMixin from gpytorch.models import IndependentModelList from torch import Tensor class ModelListGP(IndependentModelList, ModelListGPyTorchModel, FantasizeMixin): r"""A multi-output GP model with independent GPs for the outputs. This model supports different-shaped training inputs for each of its sub-models. It can be used with any number of single-output `GPyTorchModel`\s and the models can be of different types. Use this model when you have independent outputs with different training data. When modeling correlations between outputs, use `MultiTaskGP`. Internally, this model is just a list of individual models, but it implements the same input/output interface as all other BoTorch models. This makes it very flexible and convenient to work with. The sequential evaluation comes at a performance cost though - if you are using a block design (i.e. the same number of training example for each output, and a similar model structure, you should consider using a batched GP model instead, such as `SingleTaskGP` with batched inputs). """ def __init__(self, *gp_models: GPyTorchModel) -> None: r""" Args: *gp_models: A number of single-output `GPyTorchModel`\s. If models have input/output transforms, these are honored individually for each model. Example: >>> model1 = SingleTaskGP(train_X1, train_Y1) >>> model2 = SingleTaskGP(train_X2, train_Y2) >>> model = ModelListGP(model1, model2) """ super().__init__(*gp_models) # pyre-fixme[14]: Inconsistent override. Here `X` is a List[Tensor], but in the # parent method it's a Tensor. def condition_on_observations( self, X: List[Tensor], Y: Tensor, **kwargs: Any ) -> ModelListGP: r"""Condition the model on new observations. Args: X: A `m`-list of `batch_shape x n' x d`-dim Tensors, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). Y: A `batch_shape' x n' x m`-dim Tensor, where `m` is the number of model outputs, `n'` is the number of points per batch, and `batch_shape'` is the batch shape of the observations. `batch_shape'` must be broadcastable to `batch_shape` using standard broadcasting semantics. If `Y` has fewer batch dimensions than `X`, its is assumed that the missing batch dimensions are the same for all `Y`. kwargs: Keyword arguments passed to `IndependentModelList.get_fantasy_model`. Returns: A `ModelListGP` representing the original model conditioned on the new observations `(X, Y)` (and possibly noise observations passed in via kwargs). Here the `i`-th model has `n_i + n'` training examples, where the `n'` training examples have been added and all test-time caches have been updated. """ if Y.shape[-1] != self.num_outputs: raise BotorchTensorDimensionError( "Incorrect number of outputs for observations. Received " f"{Y.shape[-1]} observation outputs, but model has " f"{self.num_outputs} outputs." ) targets = [Y[..., i] for i in range(Y.shape[-1])] for i, model in enumerate(self.models): if hasattr(model, "outcome_transform"): noise = kwargs.get("noise") targets[i], noise = model.outcome_transform(targets[i], noise) # This should never trigger, posterior call would fail. assert len(targets) == len(X) if "noise" in kwargs: noise = kwargs.pop("noise") if noise.shape != Y.shape[-noise.dim() :]: raise BotorchTensorDimensionError( "The shape of observation noise does not agree with the outcomes. " f"Received {noise.shape} noise with {Y.shape} outcomes." ) kwargs_ = {**kwargs, "noise": [noise[..., i] for i in range(Y.shape[-1])]} else: kwargs_ = kwargs return super().get_fantasy_model(X, targets, **kwargs_) def subset_output(self, idcs: List[int]) -> ModelListGP: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: The current model, subset to the specified output indices. """ return self.__class__(*[deepcopy(self.models[i]) for i in idcs]) def _set_transformed_inputs(self) -> None: r"""Update training inputs with transformed inputs.""" for m in self.models: m._set_transformed_inputs() def _revert_to_original_inputs(self) -> None: r"""Revert training inputs back to original.""" for m in self.models: m._revert_to_original_inputs()
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Multi-Fidelity Gaussian Process Regression models based on GPyTorch models. For more on Multi-Fidelity BO, see the `tutorial <https://botorch.org/tutorials/discrete_multi_fidelity_bo>`__. A common use case of multi-fidelity regression modeling is optimizing a "high-fidelity" function that is expensive to simulate when you have access to one or more cheaper "lower-fidelity" versions that are not fully accurate but are correlated with the high-fidelity function. The multi-fidelity model models both the low- and high-fidelity functions together, including the correlation between them, which can help you predict and optimize the high-fidelity function without having to do too many expensive high-fidelity evaluations. .. [Wu2019mf] J. Wu, S. Toscano-Palmerin, P. I. Frazier, and A. G. Wilson. Practical multi-fidelity bayesian optimization for hyperparameter tuning. ArXiv 2019. """ from __future__ import annotations import warnings from typing import Any, Dict, List, Optional, Tuple, Union import torch from botorch.exceptions.errors import UnsupportedError from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP from botorch.models.kernels.downsampling import DownsamplingKernel from botorch.models.kernels.exponential_decay import ExponentialDecayKernel from botorch.models.kernels.linear_truncated_fidelity import ( LinearTruncatedFidelityKernel, ) from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from botorch.utils.datasets import SupervisedDataset from gpytorch.kernels.kernel import ProductKernel from gpytorch.kernels.rbf_kernel import RBFKernel from gpytorch.kernels.scale_kernel import ScaleKernel from gpytorch.likelihoods.likelihood import Likelihood from gpytorch.priors.torch_priors import GammaPrior from torch import Tensor class SingleTaskMultiFidelityGP(SingleTaskGP): r"""A single task multi-fidelity GP model. A SingleTaskGP model using a DownsamplingKernel for the data fidelity parameter (if present) and an ExponentialDecayKernel for the iteration fidelity parameter (if present). This kernel is described in [Wu2019mf]_. Example: >>> train_X = torch.rand(20, 4) >>> train_Y = train_X.pow(2).sum(dim=-1, keepdim=True) >>> model = SingleTaskMultiFidelityGP(train_X, train_Y, data_fidelities=[3]) """ def __init__( self, train_X: Tensor, train_Y: Tensor, iteration_fidelity: Optional[int] = None, data_fidelities: Optional[Union[List[int], Tuple[int]]] = None, data_fidelity: Optional[int] = None, linear_truncated: bool = True, nu: float = 2.5, likelihood: Optional[Likelihood] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x (d + s)` tensor of training features, where `s` is the dimension of the fidelity parameters (either one or two). train_Y: A `batch_shape x n x m` tensor of training observations. iteration_fidelity: The column index for the training iteration fidelity parameter (optional). data_fidelities: The column indices for the downsampling fidelity parameter. If a list/tuple of indices is provided, a kernel will be constructed for each index (optional). data_fidelity: The column index for the downsampling fidelity parameter (optional). Deprecated in favor of `data_fidelities`. linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead of the default kernel. nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or 5/2. Only used when `linear_truncated=True`. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied in the model's forward pass. """ if data_fidelity is not None: warnings.warn( "The `data_fidelity` argument is deprecated and will be removed in " "a future release. Please use `data_fidelities` instead.", DeprecationWarning, ) if data_fidelities is not None: raise ValueError( "Cannot specify both `data_fidelity` and `data_fidelities`." ) data_fidelities = [data_fidelity] self._init_args = { "iteration_fidelity": iteration_fidelity, "data_fidelities": data_fidelities, "linear_truncated": linear_truncated, "nu": nu, "outcome_transform": outcome_transform, } if iteration_fidelity is None and data_fidelities is None: raise UnsupportedError( "SingleTaskMultiFidelityGP requires at least one fidelity parameter." ) with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) self._set_dimensions(train_X=transformed_X, train_Y=train_Y) covar_module, subset_batch_dict = _setup_multifidelity_covar_module( dim=transformed_X.size(-1), aug_batch_shape=self._aug_batch_shape, iteration_fidelity=iteration_fidelity, data_fidelities=data_fidelities, linear_truncated=linear_truncated, nu=nu, ) super().__init__( train_X=train_X, train_Y=train_Y, likelihood=likelihood, covar_module=covar_module, outcome_transform=outcome_transform, input_transform=input_transform, ) self._subset_batch_dict = { "likelihood.noise_covar.raw_noise": -2, "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, **subset_batch_dict, } self.to(train_X) @classmethod def construct_inputs( cls, training_data: SupervisedDataset, fidelity_features: List[int], **kwargs, ) -> Dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: training_data: Dictionary of `SupervisedDataset`. fidelity_features: Index of fidelity parameter as input columns. """ inputs = super().construct_inputs(training_data=training_data, **kwargs) inputs["data_fidelities"] = fidelity_features return inputs class FixedNoiseMultiFidelityGP(FixedNoiseGP): r"""A single task multi-fidelity GP model using fixed noise levels. A FixedNoiseGP model analogue to SingleTaskMultiFidelityGP, using a DownsamplingKernel for the data fidelity parameter (if present) and an ExponentialDecayKernel for the iteration fidelity parameter (if present). This kernel is described in [Wu2019mf]_. Example: >>> train_X = torch.rand(20, 4) >>> train_Y = train_X.pow(2).sum(dim=-1, keepdim=True) >>> train_Yvar = torch.full_like(train_Y) * 0.01 >>> model = FixedNoiseMultiFidelityGP( >>> train_X, >>> train_Y, >>> train_Yvar, >>> data_fidelities=[3], >>> ) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, iteration_fidelity: Optional[int] = None, data_fidelities: Optional[Union[List[int], Tuple[int]]] = None, data_fidelity: Optional[int] = None, linear_truncated: bool = True, nu: float = 2.5, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x (d + s)` tensor of training features, where `s` is the dimension of the fidelity parameters (either one or two). train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. iteration_fidelity: The column index for the training iteration fidelity parameter (optional). data_fidelities: The column indices for the downsampling fidelity parameter. If a list of indices is provided, a kernel will be constructed for each index (optional). data_fidelity: The column index for the downsampling fidelity parameter (optional). Deprecated in favor of `data_fidelities`. linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead of the default kernel. nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or 5/2. Only used when `linear_truncated=True`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied in the model's forward pass. """ if data_fidelity is not None: warnings.warn( "The `data_fidelity` argument is deprecated and will be removed in " "a future release. Please use `data_fidelities` instead.", DeprecationWarning, ) if data_fidelities is not None: raise ValueError( "Cannot specify both `data_fidelity` and `data_fidelities`." ) data_fidelities = [data_fidelity] self._init_args = { "iteration_fidelity": iteration_fidelity, "data_fidelities": data_fidelities, "linear_truncated": linear_truncated, "nu": nu, "outcome_transform": outcome_transform, } if iteration_fidelity is None and data_fidelities is None: raise UnsupportedError( "FixedNoiseMultiFidelityGP requires at least one fidelity parameter." ) with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) self._set_dimensions(train_X=transformed_X, train_Y=train_Y) covar_module, subset_batch_dict = _setup_multifidelity_covar_module( dim=transformed_X.size(-1), aug_batch_shape=self._aug_batch_shape, iteration_fidelity=iteration_fidelity, data_fidelities=data_fidelities, linear_truncated=linear_truncated, nu=nu, ) super().__init__( train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar, covar_module=covar_module, outcome_transform=outcome_transform, input_transform=input_transform, ) self._subset_batch_dict = { "likelihood.noise_covar.raw_noise": -2, "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, **subset_batch_dict, } self.to(train_X) @classmethod def construct_inputs( cls, training_data: SupervisedDataset, fidelity_features: List[int], **kwargs, ) -> Dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: training_data: Dictionary of `SupervisedDataset`. fidelity_features: Column indices of fidelity features. """ inputs = super().construct_inputs(training_data=training_data, **kwargs) inputs["data_fidelities"] = fidelity_features return inputs def _setup_multifidelity_covar_module( dim: int, aug_batch_shape: torch.Size, iteration_fidelity: Optional[int], data_fidelities: Optional[List[int]], linear_truncated: bool, nu: float, ) -> Tuple[ScaleKernel, Dict]: """Helper function to get the covariance module and associated subset_batch_dict for the multifidelity setting. Args: dim: The dimensionality of the training data. aug_batch_shape: The output-augmented batch shape as defined in `BatchedMultiOutputGPyTorchModel`. iteration_fidelity: The column index for the training iteration fidelity parameter (optional). data_fidelities: The column indices for the downsampling fidelity parameters (optional). linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead of the default kernel. nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or 5/2. Only used when `linear_truncated=True`. Returns: The covariance module and subset_batch_dict. """ if iteration_fidelity is not None and iteration_fidelity < 0: iteration_fidelity = dim + iteration_fidelity if data_fidelities is not None: for i in range(len(data_fidelities)): if data_fidelities[i] < 0: data_fidelities[i] = dim + data_fidelities[i] kernels = [] if linear_truncated: leading_dims = [iteration_fidelity] if iteration_fidelity is not None else [] trailing_dims = ( [[i] for i in data_fidelities] if data_fidelities is not None else [[]] ) for tdims in trailing_dims: kernels.append( LinearTruncatedFidelityKernel( fidelity_dims=leading_dims + tdims, dimension=dim, nu=nu, batch_shape=aug_batch_shape, power_prior=GammaPrior(3.0, 3.0), ) ) else: non_active_dims = set(data_fidelities or []) if iteration_fidelity is not None: non_active_dims.add(iteration_fidelity) active_dimsX = sorted(set(range(dim)) - non_active_dims) kernels.append( RBFKernel( ard_num_dims=len(active_dimsX), batch_shape=aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), active_dims=active_dimsX, ) ) if iteration_fidelity is not None: kernels.append( ExponentialDecayKernel( batch_shape=aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), offset_prior=GammaPrior(3.0, 6.0), power_prior=GammaPrior(3.0, 6.0), active_dims=[iteration_fidelity], ) ) if data_fidelities is not None: for data_fidelity in data_fidelities: kernels.append( DownsamplingKernel( batch_shape=aug_batch_shape, offset_prior=GammaPrior(3.0, 6.0), power_prior=GammaPrior(3.0, 6.0), active_dims=[data_fidelity], ) ) kernel = ProductKernel(*kernels) covar_module = ScaleKernel( kernel, batch_shape=aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15) ) key_prefix = "covar_module.base_kernel.kernels" if linear_truncated: subset_batch_dict = {} for i in range(len(kernels)): subset_batch_dict.update( { f"{key_prefix}.{i}.raw_power": -2, f"{key_prefix}.{i}.covar_module_unbiased.raw_lengthscale": -3, f"{key_prefix}.{i}.covar_module_biased.raw_lengthscale": -3, } ) else: subset_batch_dict = { f"{key_prefix}.0.raw_lengthscale": -3, } if iteration_fidelity is not None: subset_batch_dict.update( { f"{key_prefix}.1.raw_power": -2, f"{key_prefix}.1.raw_offset": -2, f"{key_prefix}.1.raw_lengthscale": -3, } ) if data_fidelities is not None: start_idx = 2 if iteration_fidelity is not None else 1 for i in range(start_idx, len(data_fidelities) + start_idx): subset_batch_dict.update( { f"{key_prefix}.{i}.raw_power": -2, f"{key_prefix}.{i}.raw_offset": -2, } ) return covar_module, subset_batch_dict
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import warnings from typing import Any, Callable, Dict, List, Optional import torch from botorch.exceptions.warnings import InputDataWarning from botorch.models.gp_regression import SingleTaskGP from botorch.models.kernels.categorical import CategoricalKernel from botorch.models.transforms.input import InputTransform from botorch.models.transforms.outcome import OutcomeTransform from botorch.utils.datasets import SupervisedDataset from botorch.utils.transforms import normalize_indices from gpytorch.constraints import GreaterThan from gpytorch.kernels.kernel import Kernel from gpytorch.kernels.matern_kernel import MaternKernel from gpytorch.kernels.scale_kernel import ScaleKernel from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood from gpytorch.likelihoods.likelihood import Likelihood from gpytorch.priors import GammaPrior from torch import Tensor class MixedSingleTaskGP(SingleTaskGP): r"""A single-task exact GP model for mixed search spaces. This model is similar to `SingleTaskGP`, but supports mixed search spaces, which combine discrete and continuous features, as well as solely discrete spaces. It uses a kernel that combines a CategoricalKernel (based on Hamming distances) and a regular kernel into a kernel of the form K((x1, c1), (x2, c2)) = K_cont_1(x1, x2) + K_cat_1(c1, c2) + K_cont_2(x1, x2) * K_cat_2(c1, c2) where `xi` and `ci` are the continuous and categorical features of the input, respectively. The suffix `_i` indicates that we fit different lengthscales for the kernels in the sum and product terms. Since this model does not provide gradients for the categorical features, optimization of the acquisition function will need to be performed in a mixed fashion, i.e., treating the categorical features properly as discrete optimization variables. We recommend using `optimize_acqf_mixed.` Example: >>> train_X = torch.cat( [torch.rand(20, 2), torch.randint(3, (20, 1))], dim=-1) ) >>> train_Y = ( torch.sin(train_X[..., :-1]).sum(dim=1, keepdim=True) + train_X[..., -1:] ) >>> model = MixedSingleTaskGP(train_X, train_Y, cat_dims=[-1]) """ def __init__( self, train_X: Tensor, train_Y: Tensor, cat_dims: List[int], cont_kernel_factory: Optional[ Callable[[torch.Size, int, List[int]], Kernel] ] = None, likelihood: Optional[Likelihood] = None, outcome_transform: Optional[OutcomeTransform] = None, # TODO input_transform: Optional[InputTransform] = None, # TODO ) -> None: r"""A single-task exact GP model supporting categorical parameters. Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. cat_dims: A list of indices corresponding to the columns of the input `X` that should be considered categorical features. cont_kernel_factory: A method that accepts `batch_shape`, `ard_num_dims`, and `active_dims` arguments and returns an instantiated GPyTorch `Kernel` object to be used as the base kernel for the continuous dimensions. If omitted, this model uses a Matern-2.5 kernel as the kernel for the ordinal parameters. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied in the model's forward pass. Only input transforms are allowed which do not transform the categorical dimensions. If you want to use it for example in combination with a `OneHotToNumeric` input transform one has to instantiate the transform with `transform_on_train` == False and pass in the already transformed input. """ if len(cat_dims) == 0: raise ValueError( "Must specify categorical dimensions for MixedSingleTaskGP" ) self._ignore_X_dims_scaling_check = cat_dims _, aug_batch_shape = self.get_batch_dimensions(train_X=train_X, train_Y=train_Y) if cont_kernel_factory is None: def cont_kernel_factory( batch_shape: torch.Size, ard_num_dims: int, active_dims: List[int], ) -> MaternKernel: return MaternKernel( nu=2.5, batch_shape=batch_shape, ard_num_dims=ard_num_dims, active_dims=active_dims, lengthscale_constraint=GreaterThan(1e-04), ) if likelihood is None: # This Gamma prior is quite close to the Horseshoe prior min_noise = 1e-5 if train_X.dtype == torch.float else 1e-6 likelihood = GaussianLikelihood( batch_shape=aug_batch_shape, noise_constraint=GreaterThan( min_noise, transform=None, initial_value=1e-3 ), noise_prior=GammaPrior(0.9, 10.0), ) d = train_X.shape[-1] cat_dims = normalize_indices(indices=cat_dims, d=d) ord_dims = sorted(set(range(d)) - set(cat_dims)) if len(ord_dims) == 0: covar_module = ScaleKernel( CategoricalKernel( batch_shape=aug_batch_shape, ard_num_dims=len(cat_dims), lengthscale_constraint=GreaterThan(1e-06), ) ) else: sum_kernel = ScaleKernel( cont_kernel_factory( batch_shape=aug_batch_shape, ard_num_dims=len(ord_dims), active_dims=ord_dims, ) + ScaleKernel( CategoricalKernel( batch_shape=aug_batch_shape, ard_num_dims=len(cat_dims), active_dims=cat_dims, lengthscale_constraint=GreaterThan(1e-06), ) ) ) prod_kernel = ScaleKernel( cont_kernel_factory( batch_shape=aug_batch_shape, ard_num_dims=len(ord_dims), active_dims=ord_dims, ) * CategoricalKernel( batch_shape=aug_batch_shape, ard_num_dims=len(cat_dims), active_dims=cat_dims, lengthscale_constraint=GreaterThan(1e-06), ) ) covar_module = sum_kernel + prod_kernel super().__init__( train_X=train_X, train_Y=train_Y, likelihood=likelihood, covar_module=covar_module, outcome_transform=outcome_transform, input_transform=input_transform, ) @classmethod def construct_inputs( cls, training_data: SupervisedDataset, categorical_features: List[int], likelihood: Optional[Likelihood] = None, **kwargs: Any, ) -> Dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: training_data: A `SupervisedDataset` containing the training data. categorical_features: Column indices of categorical features. likelihood: Optional likelihood used to constuct the model. """ base_inputs = super().construct_inputs(training_data=training_data, **kwargs) if base_inputs.pop("train_Yvar", None) is not None: # TODO: Remove when SingleTaskGP supports optional Yvar [T162925473]. warnings.warn( "`MixedSingleTaskGP` only supports inferred noise at the moment. " "Ignoring the provided `train_Yvar` observations.", InputDataWarning, ) return { **base_inputs, "cat_dims": categorical_features, "likelihood": likelihood, }
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Optional import torch from gpytorch.constraints import Interval, Positive from gpytorch.kernels import Kernel from gpytorch.priors import Prior from torch import Tensor class DownsamplingKernel(Kernel): r"""GPyTorch Downsampling Kernel. Computes a covariance matrix based on the down sampling kernel between inputs `x_1` and `x_2` (we expect `d = 1`): K(\mathbf{x_1}, \mathbf{x_2}) = c + (1 - x_1)^(1 + delta) * (1 - x_2)^(1 + delta). where `c` is an offset parameter, and `delta` is a power parameter. """ def __init__( self, power_prior: Optional[Prior] = None, offset_prior: Optional[Prior] = None, power_constraint: Optional[Interval] = None, offset_constraint: Optional[Interval] = None, **kwargs, ): r""" Args: power_constraint: Constraint to place on power parameter. Default is `Positive`. power_prior: Prior over the power parameter. offset_constraint: Constraint to place on offset parameter. Default is `Positive`. active_dims: List of data dimensions to operate on. `len(active_dims)` should equal `num_dimensions`. """ super().__init__(**kwargs) if power_constraint is None: power_constraint = Positive() if offset_constraint is None: offset_constraint = Positive() self.register_parameter( name="raw_power", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) self.register_parameter( name="raw_offset", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) if power_prior is not None: self.register_prior( "power_prior", power_prior, lambda m: m.power, lambda m, v: m._set_power(v), ) self.register_constraint("raw_power", power_constraint) if offset_prior is not None: self.register_prior( "offset_prior", offset_prior, lambda m: m.offset, lambda m, v: m._set_offset(v), ) self.register_constraint("raw_offset", offset_constraint) @property def power(self) -> Tensor: return self.raw_power_constraint.transform(self.raw_power) @power.setter def power(self, value: Tensor) -> None: self._set_power(value) def _set_power(self, value: Tensor) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.raw_power) self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value)) @property def offset(self) -> Tensor: return self.raw_offset_constraint.transform(self.raw_offset) @offset.setter def offset(self, value: Tensor) -> None: self._set_offset(value) def _set_offset(self, value: Tensor) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.raw_offset) self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value)) def forward( self, x1: Tensor, x2: Tensor, diag: Optional[bool] = False, last_dim_is_batch: Optional[bool] = False, **params, ) -> Tensor: offset = self.offset exponent = 1 + self.power if last_dim_is_batch: x1 = x1.transpose(-1, -2).unsqueeze(-1) x2 = x2.transpose(-1, -2).unsqueeze(-1) x1_ = 1 - x1 x2_ = 1 - x2 if diag: return offset + (x1_ * x2_).sum(dim=-1).pow(exponent) offset = offset.unsqueeze(-1) # unsqueeze enables batch evaluation exponent = exponent.unsqueeze(-1) # unsqueeze enables batch evaluation return offset + x1_.pow(exponent) @ x2_.transpose(-2, -1).pow(exponent)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from gpytorch.kernels.kernel import Kernel from torch import Tensor class CategoricalKernel(Kernel): r"""A Kernel for categorical features. Computes `exp(-dist(x1, x2) / lengthscale)`, where `dist(x1, x2)` is zero if `x1 == x2` and one if `x1 != x2`. If the last dimension is not a batch dimension, then the mean is considered. Note: This kernel is NOT differentiable w.r.t. the inputs. """ has_lengthscale = True def forward( self, x1: Tensor, x2: Tensor, diag: bool = False, last_dim_is_batch: bool = False, **kwargs, ) -> Tensor: delta = x1.unsqueeze(-2) != x2.unsqueeze(-3) dists = delta / self.lengthscale.unsqueeze(-2) if last_dim_is_batch: dists = dists.transpose(-3, -1) else: dists = dists.mean(-1) res = torch.exp(-dists) if diag: res = torch.diagonal(res, dim1=-1, dim2=-2) return res
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import List, Optional, Tuple import numpy import torch from botorch.exceptions.errors import UnsupportedError from gpytorch.constraints import Interval, Positive from gpytorch.kernels import Kernel from torch import nn, Tensor _positivity_constraint = Positive() class OrthogonalAdditiveKernel(Kernel): r"""Orthogonal Additive Kernels (OAKs) were introduced in [Lu2022additive]_, though only for the case of Gaussian base kernels with a Gaussian input data distribution. The implementation here generalizes OAKs to arbitrary base kernels by using a Gauss-Legendre quadrature approximation to the required one-dimensional integrals involving the base kernels. .. [Lu2022additive] X. Lu, A. Boukouvalas, and J. Hensman. Additive Gaussian processes revisited. Proceedings of the 39th International Conference on Machine Learning. Jul 2022. """ def __init__( self, base_kernel: Kernel, dim: int, quad_deg: int = 32, second_order: bool = False, batch_shape: Optional[torch.Size] = None, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, coeff_constraint: Interval = _positivity_constraint, ): """ Args: base_kernel: The kernel which to orthogonalize and evaluate in `forward`. dim: Input dimensionality of the kernel. quad_deg: Number of integration nodes for orthogonalization. second_order: Toggles second order interactions. If true, both the time and space complexity of evaluating the kernel are quadratic in `dim`. batch_shape: Optional batch shape for the kernel and its parameters. dtype: Initialization dtype for required Tensors. device: Initialization device for required Tensors. coeff_constraint: Constraint on the coefficients of the additive kernel. """ super().__init__(batch_shape=batch_shape) self.base_kernel = base_kernel # integration nodes, weights for [0, 1] tkwargs = {"dtype": dtype, "device": device} z, w = leggauss(deg=quad_deg, a=0, b=1, **tkwargs) self.z = z.unsqueeze(-1).expand(quad_deg, dim) # deg x dim self.w = w.unsqueeze(-1) self.register_parameter( name="raw_offset", parameter=nn.Parameter(torch.zeros(self.batch_shape, **tkwargs)), ) log_d = math.log(dim) self.register_parameter( name="raw_coeffs_1", parameter=nn.Parameter( torch.zeros(*self.batch_shape, dim, **tkwargs) - log_d ), ) self.register_parameter( name="raw_coeffs_2", parameter=nn.Parameter( torch.zeros(*self.batch_shape, int(dim * (dim - 1) / 2), **tkwargs) - 2 * log_d ) if second_order else None, ) if second_order: self._rev_triu_indices = torch.tensor( _reverse_triu_indices(dim), device=device, dtype=int, ) # zero tensor for construction of upper-triangular coefficient matrix self._quad_zero = torch.zeros( tuple(1 for _ in range(len(batch_shape) + 1)), **tkwargs ).expand(*batch_shape, 1) self.coeff_constraint = coeff_constraint self.dim = dim def k(self, x1, x2) -> Tensor: """Evaluates the kernel matrix base_kernel(x1, x2) on each input dimension independently. Args: x1: `batch_shape x n1 x d`-dim Tensor in [0, 1]^dim. x2: `batch_shape x n2 x d`-dim Tensor in [0, 1]^dim. Returns: A `batch_shape x d x n1 x n2`-dim Tensor of kernel matrices. """ return self.base_kernel(x1, x2, last_dim_is_batch=True).to_dense() @property def offset(self) -> Tensor: """Returns the `batch_shape`-dim Tensor of zeroth-order coefficients.""" return self.coeff_constraint.transform(self.raw_offset) @property def coeffs_1(self) -> Tensor: """Returns the `batch_shape x d`-dim Tensor of first-order coefficients.""" return self.coeff_constraint.transform(self.raw_coeffs_1) @property def coeffs_2(self) -> Optional[Tensor]: """Returns the upper-triangular tensor of second-order coefficients. NOTE: We only keep track of the upper triangular part of raw second order coefficients since the effect of the lower triangular part is identical and exclude the diagonal, since it is associated with first-order effects only. While we could further exploit this structure in the forward pass, the associated indexing and temporary allocations make it significantly less efficient than the einsum-based implementation below. Returns: `batch_shape x d x d`-dim Tensor of second-order coefficients. """ if self.raw_coeffs_2 is not None: C2 = self.coeff_constraint.transform(self.raw_coeffs_2) C2 = torch.cat((C2, self._quad_zero), dim=-1) # batch_shape x (d(d-1)/2+1) C2 = C2.index_select(-1, self._rev_triu_indices) return C2.reshape(*self.batch_shape, self.dim, self.dim) else: return None def forward( self, x1: Tensor, x2: Tensor, diag: bool = False, last_dim_is_batch: bool = False, ) -> Tensor: """Computes the kernel matrix k(x1, x2). Args: x1: `batch_shape x n1 x d`-dim Tensor in [0, 1]^dim. x2: `batch_shape x n2 x d`-dim Tensor in [0, 1]^dim. diag: If True, only returns the diagonal of the kernel matrix. last_dim_is_batch: Not supported by this kernel. Returns: A `batch_shape x n1 x n2`-dim Tensor of kernel matrices. """ if last_dim_is_batch: raise UnsupportedError( "OrthogonalAdditiveKernel does not support `last_dim_is_batch`." ) K_ortho = self._orthogonal_base_kernels(x1, x2) # batch_shape x d x n1 x n2 # contracting over d, leading to `batch_shape x n x n`-dim tensor, i.e.: # K1 = torch.sum(self.coeffs_1[..., None, None] * K_ortho, dim=-3) K1 = torch.einsum(self.coeffs_1, [..., 0], K_ortho, [..., 0, 1, 2], [..., 1, 2]) # adding the non-batch dimensions to offset K = K1 + self.offset[..., None, None] if self.coeffs_2 is not None: # Computing the tensor of second order interactions K2. # NOTE: K2 here is equivalent to: # K2 = K_ortho.unsqueeze(-4) * K_ortho.unsqueeze(-3) # d x d x n x n # K2 = (self.coeffs_2[..., None, None] * K2).sum(dim=(-4, -3)) # but avoids forming the `batch_shape x d x d x n x n`-dim tensor in memory. # Reducing over the dimensions with the O(d^2) quadratic terms: K2 = torch.einsum( K_ortho, [..., 0, 2, 3], K_ortho, [..., 1, 2, 3], self.coeffs_2, [..., 0, 1], [..., 2, 3], # i.e. contracting over the first two non-batch dims ) K = K + K2 return K if not diag else K.diag() # poor man's diag (TODO) def _orthogonal_base_kernels(self, x1: Tensor, x2: Tensor) -> Tensor: """Evaluates the set of `d` orthogonalized base kernels on (x1, x2). Note that even if the base kernel is positive, the orthogonalized versions can - and usually do - take negative values. Args: x1: `batch_shape x n1 x d`-dim inputs to the kernel. x2: `batch_shape x n2 x d`-dim inputs to the kernel. Returns: A `batch_shape x d x n1 x n2`-dim Tensor. """ _check_hypercube(x1, "x1") if x1 is not x2: _check_hypercube(x2, "x2") Kx1x2 = self.k(x1, x2) # d x n x n # Overwriting allocated quadrature tensors with fitting dtype and device # self.z, self.w = self.z.to(x1), self.w.to(x1) # include normalization constant in weights w = self.w / self.normalizer().sqrt() Skx1 = self.k(x1, self.z) @ w # batch_shape x d x n Skx2 = Skx1 if (x1 is x2) else self.k(x2, self.z) @ w # d x n # this is a tensor of kernel matrices of orthogonal 1d kernels K_ortho = (Kx1x2 - Skx1 @ Skx2.transpose(-2, -1)).to_dense() # d x n x n return K_ortho def normalizer(self, eps: float = 1e-6) -> Tensor: """Integrates the `d` orthogonalized base kernels over `[0, 1] x [0, 1]`. NOTE: If the module is in train mode, this needs to re-compute the normalizer each time because the underlying parameters might have changed. Args: eps: Minimum value constraint on the normalizers. Avoids division by zero. Returns: A `d`-dim tensor of normalization constants. """ if self.train() or getattr(self, "_normalizer", None) is None: self._normalizer = (self.w.T @ self.k(self.z, self.z) @ self.w).clamp(eps) return self._normalizer def leggauss( deg: int, a: float = -1.0, b: float = 1.0, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ) -> Tuple[Tensor, Tensor]: """Computes Gauss-Legendre quadrature nodes and weights. Wraps `numpy.polynomial.legendre.leggauss` and returns Torch Tensors. Args: deg: Number of sample points and weights. Integrates poynomials of degree `2 * deg + 1` exactly. a, b: Lower and upper bound of integration domain. dtype: Desired floating point type of the return Tensors. device: Desired device type of the return Tensors. Returns: A tuple of Gauss-Legendre quadrature nodes and weights of length deg. """ dtype = dtype if dtype is not None else torch.get_default_dtype() x, w = numpy.polynomial.legendre.leggauss(deg=deg) x = torch.as_tensor(x, dtype=dtype, device=device) w = torch.as_tensor(w, dtype=dtype, device=device) if not (a == -1 and b == 1): # need to normalize for different domain x = (b - a) * (x + 1) / 2 + a w = w * ((b - a) / 2) return x, w def _check_hypercube(x: Tensor, name: str) -> None: """Raises a `ValueError` if an element `x` is not in [0, 1]. Args: x: Tensor to be checked. name: Name of the Tensor for the error message. """ if (x < 0).any() or (x > 1).any(): raise ValueError(name + " is not in hypercube [0, 1]^d.") def _reverse_triu_indices(d: int) -> List[int]: """Computes a list of indices which, upon indexing a `d * (d - 1) / 2 + 1`-dim Tensor whose last element is zero, will lead to a vectorized representation of an upper-triangular matrix, whose diagonal is set to zero and whose super-diagonal elements are set to the `d * (d - 1) / 2` values in the original tensor. NOTE: This is a helper function for Orthogonal Additive Kernels, and allows the implementation to only register `d * (d - 1) / 2` parameters to model the second order interactions, instead of the full d^2 redundant terms. Args: d: Dimensionality that gives rise to the `d * (d - 1) / 2` quadratic terms. Returns: A list of integer indices in `[0, d * (d - 1) / 2]`. See above for details. """ indices = [] j = 0 d2 = int(d * (d - 1) / 2) for i in range(d): indices.extend(d2 for _ in range(i + 1)) # indexing zero (sub-diagonal) indices.extend(range(j, j + d - i - 1)) # indexing coeffs (super-diagonal) j += d - i - 1 return indices
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.models.kernels.categorical import CategoricalKernel from botorch.models.kernels.downsampling import DownsamplingKernel from botorch.models.kernels.exponential_decay import ExponentialDecayKernel from botorch.models.kernels.linear_truncated_fidelity import ( LinearTruncatedFidelityKernel, ) __all__ = [ "CategoricalKernel", "DownsamplingKernel", "ExponentialDecayKernel", "LinearTruncatedFidelityKernel", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, List, Optional import torch from gpytorch.kernels.kernel import Kernel from gpytorch.kernels.matern_kernel import MaternKernel from gpytorch.kernels.scale_kernel import ScaleKernel from gpytorch.priors.torch_priors import GammaPrior from linear_operator.operators.sum_linear_operator import SumLinearOperator from torch import Tensor from torch.nn import ModuleDict # pyre-ignore class SACKernel(Kernel): r"""The structural additive contextual(SAC) kernel. The kernel is used for contextual BO without oberseving context breakdowns. There are d parameters and M contexts. In total, the dimension of parameter space is d*M and input x can be written as x=[x_11, ..., x_1d, x_21, ..., x_2d, ..., x_M1, ..., x_Md]. The kernel uses the parameter decomposition and assumes an additive structure across contexts. Each context compponent is assumed to be independent. .. math:: \begin{equation*} k(\mathbf{x}, \mathbf{x'}) = k_1(\mathbf{x_(1)}, \mathbf{x'_(1)}) + \cdots + k_M(\mathbf{x_(M)}, \mathbf{x'_(M)}) \end{equation*} where * :math: M is the number of partitions of parameter space. Each partition contains same number of parameters d. Each kernel `k_i` acts only on d parameters of ith partition i.e. `\mathbf{x}_(i)`. Each kernel `k_i` is a scaled Matern kernel with same lengthscales but different outputscales. """ def __init__( self, decomposition: Dict[str, List[int]], batch_shape: torch.Size, device: Optional[torch.device] = None, ) -> None: r""" Args: decomposition: Keys are context names. Values are the indexes of parameters belong to the context. The parameter indexes are in the same order across contexts. batch_shape: Batch shape as usual for gpytorch kernels. device: The torch device. """ super().__init__(batch_shape=batch_shape) self.decomposition = decomposition self._device = device num_param = len(next(iter(decomposition.values()))) for active_parameters in decomposition.values(): # check number of parameters are same in each decomp if len(active_parameters) != num_param: raise ValueError( "num of parameters needs to be same across all contexts" ) self._indexers = { context: torch.tensor(active_params, device=self.device) for context, active_params in self.decomposition.items() } self.base_kernel = MaternKernel( nu=2.5, ard_num_dims=num_param, batch_shape=batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ) self.kernel_dict = {} # scaled kernel for each parameter space partition for context in list(decomposition.keys()): self.kernel_dict[context] = ScaleKernel( base_kernel=self.base_kernel, outputscale_prior=GammaPrior(2.0, 15.0) ) self.kernel_dict = ModuleDict(self.kernel_dict) @property def device(self) -> Optional[torch.device]: return self._device def forward( self, x1: Tensor, x2: Tensor, diag: bool = False, last_dim_is_batch: bool = False, **params: Any, ) -> Tensor: """ iterate across each partition of parameter space and sum the covariance matrices together """ # same lengthscale for all the components covars = [ self.kernel_dict[context]( x1=x1.index_select(dim=-1, index=active_params), # pyre-ignore x2=x2.index_select(dim=-1, index=active_params), diag=diag, ) for context, active_params in self._indexers.items() ] if diag: res = sum(covars) else: res = SumLinearOperator(*covars) return res
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Any, List, Optional import torch from botorch.exceptions import UnsupportedError from gpytorch.constraints import Interval, Positive from gpytorch.kernels import Kernel from gpytorch.kernels.matern_kernel import MaternKernel from gpytorch.priors import Prior from gpytorch.priors.torch_priors import GammaPrior from torch import Tensor class LinearTruncatedFidelityKernel(Kernel): r"""GPyTorch Linear Truncated Fidelity Kernel. Computes a covariance matrix based on the Linear truncated kernel between inputs `x_1` and `x_2` for up to two fidelity parmeters: K(x_1, x_2) = k_0 + c_1(x_1, x_2)k_1 + c_2(x_1,x_2)k_2 + c_3(x_1,x_2)k_3 where - `k_i(i=0,1,2,3)` are Matern kernels calculated between non-fidelity parameters of `x_1` and `x_2` with different priors. - `c_1=(1 - x_1[f_1])(1 - x_2[f_1]))(1 + x_1[f_1] x_2[f_1])^p` is the kernel of the the bias term, which can be decomposed into a determistic part and a polynomial kernel. Here `f_1` is the first fidelity dimension and `p` is the order of the polynomial kernel. - `c_3` is the same as `c_1` but is calculated for the second fidelity dimension `f_2`. - `c_2` is the interaction term with four deterministic terms and the polynomial kernel between `x_1[..., [f_1, f_2]]` and `x_2[..., [f_1, f_2]]`. Example: >>> x = torch.randn(10, 5) >>> # Non-batch: Simple option >>> covar_module = LinearTruncatedFidelityKernel() >>> covar = covar_module(x) # Output: LinearOperator of size (10 x 10) >>> >>> batch_x = torch.randn(2, 10, 5) >>> # Batch: Simple option >>> covar_module = LinearTruncatedFidelityKernel(batch_shape = torch.Size([2])) >>> covar = covar_module(x) # Output: LinearOperator of size (2 x 10 x 10) """ def __init__( # noqa C901 self, fidelity_dims: List[int], dimension: Optional[int] = None, power_prior: Optional[Prior] = None, power_constraint: Optional[Interval] = None, nu: float = 2.5, lengthscale_prior_unbiased: Optional[Prior] = None, lengthscale_prior_biased: Optional[Prior] = None, lengthscale_constraint_unbiased: Optional[Interval] = None, lengthscale_constraint_biased: Optional[Interval] = None, covar_module_unbiased: Optional[Kernel] = None, covar_module_biased: Optional[Kernel] = None, **kwargs: Any, ) -> None: """ Args: fidelity_dims: A list containing either one or two indices specifying the fidelity parameters of the input. dimension: The dimension of `x`. Unused if `active_dims` is specified. power_prior: Prior for the power parameter of the polynomial kernel. Default is `None`. power_constraint: Constraint on the power parameter of the polynomial kernel. Default is `Positive`. nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or 5/2. Unused if both `covar_module_unbiased` and `covar_module_biased` are specified. lengthscale_prior_unbiased: Prior on the lengthscale parameter of Matern kernel `k_0`. Default is `Gamma(1.1, 1/20)`. lengthscale_constraint_unbiased: Constraint on the lengthscale parameter of the Matern kernel `k_0`. Default is `Positive`. lengthscale_prior_biased: Prior on the lengthscale parameter of Matern kernels `k_i(i>0)`. Default is `Gamma(5, 1/20)`. lengthscale_constraint_biased: Constraint on the lengthscale parameter of the Matern kernels `k_i(i>0)`. Default is `Positive`. covar_module_unbiased: Specify a custom kernel for `k_0`. If omitted, use a `MaternKernel`. covar_module_biased: Specify a custom kernel for the biased parts `k_i(i>0)`. If omitted, use a `MaternKernel`. batch_shape: If specified, use a separate lengthscale for each batch of input data. If `x1` is a `batch_shape x n x d` tensor, this should be `batch_shape`. active_dims: Compute the covariance of a subset of input dimensions. The numbers correspond to the indices of the dimensions. """ if dimension is None and kwargs.get("active_dims") is None: raise UnsupportedError( "Must specify dimension when not specifying active_dims." ) n_fidelity = len(fidelity_dims) if len(set(fidelity_dims)) != n_fidelity: raise ValueError("fidelity_dims must not have repeated elements") if n_fidelity not in {1, 2}: raise UnsupportedError( "LinearTruncatedFidelityKernel accepts either one or two" "fidelity parameters." ) if nu not in {0.5, 1.5, 2.5}: raise ValueError("nu must be one of 0.5, 1.5, or 2.5") super().__init__(**kwargs) self.fidelity_dims = fidelity_dims if power_constraint is None: power_constraint = Positive() if lengthscale_prior_unbiased is None: lengthscale_prior_unbiased = GammaPrior(3, 6) if lengthscale_prior_biased is None: lengthscale_prior_biased = GammaPrior(6, 2) if lengthscale_constraint_unbiased is None: lengthscale_constraint_unbiased = Positive() if lengthscale_constraint_biased is None: lengthscale_constraint_biased = Positive() self.register_parameter( name="raw_power", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) self.register_constraint("raw_power", power_constraint) if power_prior is not None: self.register_prior( "power_prior", power_prior, lambda m: m.power, lambda m, v: m._set_power(v), ) if self.active_dims is not None: dimension = len(self.active_dims) if covar_module_unbiased is None: covar_module_unbiased = MaternKernel( nu=nu, batch_shape=self.batch_shape, lengthscale_prior=lengthscale_prior_unbiased, ard_num_dims=dimension - n_fidelity, lengthscale_constraint=lengthscale_constraint_unbiased, ) if covar_module_biased is None: covar_module_biased = MaternKernel( nu=nu, batch_shape=self.batch_shape, lengthscale_prior=lengthscale_prior_biased, ard_num_dims=dimension - n_fidelity, lengthscale_constraint=lengthscale_constraint_biased, ) self.covar_module_unbiased = covar_module_unbiased self.covar_module_biased = covar_module_biased @property def power(self) -> Tensor: return self.raw_power_constraint.transform(self.raw_power) @power.setter def power(self, value: Tensor) -> None: self._set_power(value) def _set_power(self, value: Tensor) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.raw_power) self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value)) def forward(self, x1: Tensor, x2: Tensor, diag: bool = False, **params) -> Tensor: if params.get("last_dim_is_batch", False): raise NotImplementedError( "last_dim_is_batch not yet supported by LinearTruncatedFidelityKernel" ) power = self.power.view(*self.batch_shape, 1, 1) active_dimsM = torch.tensor( [i for i in range(x1.size(-1)) if i not in self.fidelity_dims], device=x1.device, ) if len(active_dimsM) == 0: raise RuntimeError( "Input to LinearTruncatedFidelityKernel must have at least one " "non-fidelity dimension." ) x1_ = x1.index_select(dim=-1, index=active_dimsM) x2_ = x2.index_select(dim=-1, index=active_dimsM) covar_unbiased = self.covar_module_unbiased(x1_, x2_, diag=diag) covar_biased = self.covar_module_biased(x1_, x2_, diag=diag) # clamp to avoid numerical issues fd_idxr0 = torch.full( (1,), self.fidelity_dims[0], dtype=torch.long, device=x1.device ) x11_ = x1.index_select(dim=-1, index=fd_idxr0).clamp(0, 1) x21t_ = x2.index_select(dim=-1, index=fd_idxr0).clamp(0, 1) if not diag: x21t_ = x21t_.transpose(-1, -2) cross_term_1 = (1 - x11_) * (1 - x21t_) bias_factor = cross_term_1 * (1 + x11_ * x21t_).pow(power) if len(self.fidelity_dims) > 1: # clamp to avoid numerical issues fd_idxr1 = torch.full( (1,), self.fidelity_dims[1], dtype=torch.long, device=x1.device ) x12_ = x1.index_select(dim=-1, index=fd_idxr1).clamp(0, 1) x22t_ = x2.index_select(dim=-1, index=fd_idxr1).clamp(0, 1) x1b_ = torch.cat([x11_, x12_], dim=-1) if diag: x2bt_ = torch.cat([x21t_, x22t_], dim=-1) k = (1 + (x1b_ * x2bt_).sum(dim=-1, keepdim=True)).pow(power) else: x22t_ = x22t_.transpose(-1, -2) x2bt_ = torch.cat([x21t_, x22t_], dim=-2) k = (1 + x1b_ @ x2bt_).pow(power) cross_term_2 = (1 - x12_) * (1 - x22t_) bias_factor += cross_term_2 * (1 + x12_ * x22t_).pow(power) bias_factor += cross_term_2 * cross_term_1 * k if diag: bias_factor = bias_factor.view(covar_biased.shape) return covar_unbiased + bias_factor * covar_biased
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, List, Optional import torch from gpytorch.constraints import Positive from gpytorch.kernels.kernel import Kernel from gpytorch.kernels.matern_kernel import MaternKernel from gpytorch.priors.torch_priors import GammaPrior from linear_operator.operators import DiagLinearOperator from linear_operator.operators.dense_linear_operator import DenseLinearOperator from torch import Tensor from torch.nn import ModuleList def get_order(indices: List[int]) -> List[int]: r"""Get the order indices as integers ranging from 0 to the number of indices. Args: indices: A list of parameter indices. Returns: A list of integers ranging from 0 to the number of indices. """ return [i % len(indices) for i in indices] def is_contiguous(indices: List[int]) -> bool: r"""Check if the list of integers is contiguous. Args: indices: A list of parameter indices. Returns: A boolean indicating whether the indices are contiguous. """ min_idx = min(indices) return set(indices) == set(range(min_idx, min_idx + len(indices))) def get_permutation(decomposition: Dict[str, List[int]]) -> Optional[List[int]]: """Construct permutation to reorder the parameters such that: 1) the parameters for each context are contiguous. 2) The parameters for each context are in the same order Args: decomposition: A dictionary mapping context names to a list of parameters. Returns: A permutation to reorder the parameters for (1) and (2). Returning `None` means that ordering specified in `decomposition` satisfies (1) and (2). """ permutation = None if not all( is_contiguous(indices=active_parameters) for active_parameters in decomposition.values() ): permutation = _create_new_permutation(decomposition=decomposition) else: same_order = True expected_order = get_order(indices=next(iter(decomposition.values()))) for active_parameters in decomposition.values(): order = get_order(indices=active_parameters) if order != expected_order: same_order = False break if not same_order: permutation = _create_new_permutation(decomposition=decomposition) return permutation def _create_new_permutation(decomposition: Dict[str, List[int]]) -> List[int]: # make contiguous and ordered permutation = [] for active_parameters in decomposition.values(): sorted_indices = sorted(active_parameters) permutation.extend(sorted_indices) return permutation class LCEAKernel(Kernel): r"""The Latent Context Embedding Additive (LCE-A) Kernel. This kernel is similar to the SACKernel, and is used when context breakdowns are unbserverable. It assumes the same additive structure and a spatial kernel shared across contexts. Rather than assuming independence, LCEAKernel models the correlation in the latent functions for each context through learning context embeddings. """ def __init__( self, decomposition: Dict[str, List[int]], batch_shape: torch.Size, train_embedding: bool = True, cat_feature_dict: Optional[Dict] = None, embs_feature_dict: Optional[Dict] = None, embs_dim_list: Optional[List[int]] = None, context_weight_dict: Optional[Dict] = None, device: Optional[torch.device] = None, ) -> None: r""" Args: decomposition: Keys index context names. Values are the indexes of parameters belong to the context. batch_shape: Batch shape as usual for gpytorch kernels. Model does not support batch training. When batch_shape is non-empty, it is used for loading hyper-parameter values generated from MCMC sampling. train_embedding: A boolean indictor of whether to learn context embeddings. cat_feature_dict: Keys are context names and values are list of categorical features i.e. {"context_name" : [cat_0, ..., cat_k]}. k equals the number of categorical variables. If None, uses context names in the decomposition as the only categorical feature, i.e., k = 1. embs_feature_dict: Pre-trained continuous embedding features of each context. embs_dim_list: Embedding dimension for each categorical variable. The length equals to num of categorical features k. If None, the embedding dimension is set to 1 for each categorical variable. context_weight_dict: Known population weights of each context. """ super().__init__(batch_shape=batch_shape) self.batch_shape = batch_shape self.train_embedding = train_embedding self._device = device self.num_param = len(next(iter(decomposition.values()))) self.context_list = list(decomposition.keys()) self.num_contexts = len(self.context_list) # get parameter space decomposition for active_parameters in decomposition.values(): # check number of parameters are same in each decomp if len(active_parameters) != self.num_param: raise ValueError( "The number of parameters needs to be same across all contexts." ) # reorder the parameter list based on decomposition such that # parameters for each context are contiguous and in the same order for each # context self.permutation = get_permutation(decomposition=decomposition) # get context features and set emb dim self.context_cat_feature = None self.context_emb_feature = None self.n_embs = 0 self.emb_weight_matrix_list = None self.emb_dims = None self._set_context_features( cat_feature_dict=cat_feature_dict, embs_feature_dict=embs_feature_dict, embs_dim_list=embs_dim_list, ) # contruct embedding layer if train_embedding: self._set_emb_layers() # task covariance matrix self.task_covar_module = MaternKernel( nu=2.5, ard_num_dims=self.n_embs, batch_shape=batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ) # base kernel self.base_kernel = MaternKernel( nu=2.5, ard_num_dims=self.num_param, batch_shape=batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ) # outputscales for each context (note this is like sqrt of outputscale) self.context_weight = None if context_weight_dict is None: outputscale_list = torch.zeros( *batch_shape, self.num_contexts, device=self.device ) else: outputscale_list = torch.zeros(*batch_shape, 1, device=self.device) self.context_weight = torch.tensor( [context_weight_dict[c] for c in self.context_list], device=self.device ) self.register_parameter( name="raw_outputscale_list", parameter=torch.nn.Parameter(outputscale_list) ) self.register_prior( "outputscale_list_prior", GammaPrior(2.0, 15.0), lambda m: m.outputscale_list, lambda m, v: m._set_outputscale_list(v), ) self.register_constraint("raw_outputscale_list", Positive()) @property def device(self) -> Optional[torch.device]: return self._device @property def outputscale_list(self) -> Tensor: return self.raw_outputscale_list_constraint.transform(self.raw_outputscale_list) @outputscale_list.setter def outputscale_list(self, value: Tensor) -> None: self._set_outputscale_list(value) def _set_outputscale_list(self, value: Tensor) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.raw_outputscale_list) self.initialize( raw_outputscale_list=self.raw_outputscale_list_constraint.inverse_transform( value ) ) def _set_context_features( self, cat_feature_dict: Optional[Dict] = None, embs_feature_dict: Optional[Dict] = None, embs_dim_list: Optional[List[int]] = None, ) -> None: """Set context categorical features and continuous embedding features. If cat_feature_dict is None, context indices will be used; If embs_dim_list is None, we use 1-d embedding for each categorical features. """ # get context categorical features if cat_feature_dict is None: self.context_cat_feature = torch.arange( self.num_contexts, device=self.device ).unsqueeze(-1) else: self.context_cat_feature = torch.tensor( [cat_feature_dict[c] for c in self.context_list] ) # construct emb_dims based on categorical features if embs_dim_list is None: # set embedding_dim = 1 for each categorical variable embs_dim_list = [1 for _i in range(self.context_cat_feature.size(1))] self.emb_dims = [ (len(self.context_cat_feature[:, i].unique()), embs_dim_list[i]) for i in range(self.context_cat_feature.size(1)) ] if self.train_embedding: self.n_embs = sum(embs_dim_list) # total num of emb features # get context embedding features if embs_feature_dict is not None: self.context_emb_feature = torch.tensor( [embs_feature_dict[c] for c in self.context_list], device=self.device ) self.n_embs += self.context_emb_feature.size(1) def _set_emb_layers(self) -> None: """Construct embedding layers. If model is non-batch, we use nn.Embedding to learn emb weights. If model is batched (sef.batch_shape is non-empty), we load emb weights posterior samples and construct a parameter list that each parameter is the emb weight of each layer. The shape of weight matrices are ns x num_contexts x emb_dim. """ self.emb_layers = ModuleList( [ torch.nn.Embedding(num_embeddings=x, embedding_dim=y, max_norm=1.0) for x, y in self.emb_dims ] ) # use posterior of emb weights if len(self.batch_shape) > 0: self.emb_weight_matrix_list = torch.nn.ParameterList( [ torch.nn.Parameter( torch.zeros( self.batch_shape + emb_layer.weight.shape, device=self.device, ) ) for emb_layer in self.emb_layers ] ) def _eval_context_covar(self) -> Tensor: """Compute context covariance matrix. Returns: A (ns) x num_contexts x num_contexts tensor. """ if len(self.batch_shape) > 0: # broadcast - (ns x num_contexts x k) all_embs = self._task_embeddings_batch() else: all_embs = self._task_embeddings() # no broadcast - (num_contexts x k) context_covar = self.task_covar_module(all_embs).to_dense() if self.context_weight is None: context_outputscales = self.outputscale_list else: context_outputscales = self.outputscale_list * self.context_weight context_covar = ( (context_outputscales.unsqueeze(-2)) # (ns) x 1 x num_contexts .mul(context_covar) .mul(context_outputscales.unsqueeze(-1)) # (ns) x num_contexts x 1 ) return context_covar def _task_embeddings(self) -> Tensor: """Generate embedding features of contexts when model is non-batch. Returns: a (num_contexts x n_embs) tensor. n_embs is the sum of embedding dimensions i.e. sum(embs_dim_list) """ if self.train_embedding is False: return self.context_emb_feature # use pre-trained embedding only context_features = torch.stack( [self.context_cat_feature[i, :] for i in range(self.num_contexts)], dim=0 ) embeddings = [ emb_layer(context_features[:, i].to(device=self.device, dtype=torch.long)) for i, emb_layer in enumerate(self.emb_layers) ] embeddings = torch.cat(embeddings, dim=1) # add given embeddings if any if self.context_emb_feature is not None: embeddings = torch.cat([embeddings, self.context_emb_feature], dim=1) return embeddings def _task_embeddings_batch(self) -> Tensor: """Generate embedding features of contexts when model has multiple batches. Returns: a (ns) x num_contexts x n_embs tensor. ns is the batch size i.e num of posterior samples; n_embs is the sum of embedding dimensions i.e. sum(embs_dim_list). """ context_features = torch.cat( [ self.context_cat_feature[i, :].unsqueeze(0) for i in range(self.num_contexts) ] ) embeddings = [] for b in range(self.batch_shape.numel()): # pyre-ignore for i in range(len(self.emb_weight_matrix_list)): # loop over emb layer and concat embs from each layer embeddings.append( torch.cat( [ torch.nn.functional.embedding( context_features[:, 0].to( dtype=torch.long, device=self.device ), self.emb_weight_matrix_list[i][b, :], ).unsqueeze(0) ], dim=1, ) ) embeddings = torch.cat(embeddings, dim=0) # add given embeddings if any if self.context_emb_feature is not None: embeddings = torch.cat( [ embeddings, self.context_emb_feature.expand( *self.batch_shape + self.context_emb_feature.shape ), ], dim=-1, ) return embeddings def train(self, mode: bool = True) -> None: super().train(mode=mode) if not mode: self.register_buffer("_context_covar", self._eval_context_covar()) def forward( self, x1: Tensor, x2: Tensor, diag: bool = False, last_dim_is_batch: bool = False, **params: Any, ) -> Tensor: """Iterate across each partition of parameter space and sum the covariance matrices together """ # context covar matrix context_covar = ( self._eval_context_covar() if self.training else self._context_covar ) base_covar_perm = self._eval_base_covar_perm(x1, x2) # expand context_covar to match base_covar_perm if base_covar_perm.dim() > context_covar.dim(): context_covar = context_covar.expand(base_covar_perm.shape) # then weight by the context kernel # compute the base kernel on the d parameters einsum_str = "...nnki, ...nnki -> ...n" if diag else "...ki, ...ki -> ..." covar_dense = torch.einsum(einsum_str, context_covar, base_covar_perm) if diag: return DiagLinearOperator(covar_dense) return DenseLinearOperator(covar_dense) def _eval_base_covar_perm(self, x1: Tensor, x2: Tensor) -> Tensor: """Computes the base covariance matrix on x1, x2, applying permutations and reshaping the kernel matrix as required by `forward`. NOTE: Using the notation n = num_observations, k = num_contexts, d = input_dim, the input tensors have to have the following shapes. Args: x1: `batch_shape x n x (k*d)`-dim Tensor of kernel inputs. x2: `batch_shape x n x (k*d)`-dim Tensor of kernel inputs. Returns: `batch_shape x n x n x k x k`-dim Tensor of base covariance values. """ if self.permutation is not None: x1 = x1[..., self.permutation] x2 = x2[..., self.permutation] # turn last two dimensions of n x (k*d) into (n*k) x d. x1_exp = x1.reshape(*x1.shape[:-2], -1, self.num_param) x2_exp = x2.reshape(*x2.shape[:-2], -1, self.num_param) # batch shape x n*k x n*k base_covar = self.base_kernel(x1_exp, x2_exp) # batch shape x n x n x k x k view_shape = x1.shape[:-2] + torch.Size( [ x1.shape[-2], self.num_contexts, x2.shape[-2], self.num_contexts, ] ) base_covar_perm = ( base_covar.to_dense() .view(view_shape) .permute(*list(range(x1.ndim - 2)), -4, -2, -3, -1) ) return base_covar_perm
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Optional import torch from gpytorch.constraints import Interval, Positive from gpytorch.kernels import Kernel from gpytorch.priors import Prior from torch import Tensor class ExponentialDecayKernel(Kernel): r"""GPyTorch Exponential Decay Kernel. Computes a covariance matrix based on the exponential decay kernel between inputs `x_1` and `x_2` (we expect `d = 1`): K(x_1, x_2) = w + beta^alpha / (x_1 + x_2 + beta)^alpha. where `w` is an offset parameter, `beta` is a lenthscale parameter, and `alpha` is a power parameter. """ has_lengthscale = True def __init__( self, power_prior: Optional[Prior] = None, offset_prior: Optional[Prior] = None, power_constraint: Optional[Interval] = None, offset_constraint: Optional[Interval] = None, **kwargs, ): r""" Args: lengthscale_constraint: Constraint to place on lengthscale parameter. Default is `Positive`. lengthscale_prior: Prior over the lengthscale parameter. power_constraint: Constraint to place on power parameter. Default is `Positive`. power_prior: Prior over the power parameter. offset_constraint: Constraint to place on offset parameter. Default is `Positive`. active_dims: List of data dimensions to operate on. `len(active_dims)` should equal `num_dimensions`. """ super().__init__(**kwargs) if power_constraint is None: power_constraint = Positive() if offset_constraint is None: offset_constraint = Positive() self.register_parameter( name="raw_power", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) self.register_parameter( name="raw_offset", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) if power_prior is not None: self.register_prior( "power_prior", power_prior, lambda m: m.power, lambda m, v: m._set_power(v), ) self.register_constraint("raw_power", offset_constraint) if offset_prior is not None: self.register_prior( "offset_prior", offset_prior, lambda m: m.offset, lambda m, v: m._set_offset(v), ) self.register_constraint("raw_offset", offset_constraint) @property def power(self) -> Tensor: return self.raw_power_constraint.transform(self.raw_power) @power.setter def power(self, value: Tensor) -> None: self._set_power(value) def _set_power(self, value: Tensor) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.raw_power) self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value)) @property def offset(self) -> Tensor: return self.raw_offset_constraint.transform(self.raw_offset) @offset.setter def offset(self, value: Tensor) -> None: self._set_offset(value) def _set_offset(self, value: Tensor) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.raw_offset) self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value)) def forward(self, x1: Tensor, x2: Tensor, **params) -> Tensor: offset = self.offset power = self.power if not params.get("diag", False): offset = offset.unsqueeze(-1) # unsqueeze enables batch evaluation power = power.unsqueeze(-1) # unsqueeze enables batch evaluation x1_ = x1.div(self.lengthscale) x2_ = x2.div(self.lengthscale) diff = self.covar_dist(x1_, -x2_, **params) res = offset + (diff + 1).pow(-power) return res
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch from gpytorch.constraints.constraints import GreaterThan from gpytorch.kernels import MaternKernel, ScaleKernel from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood from gpytorch.priors.torch_priors import GammaPrior MIN_INFERRED_NOISE_LEVEL = 1e-4 def get_matern_kernel_with_gamma_prior( ard_num_dims: int, batch_shape: Optional[torch.Size] = None ) -> ScaleKernel: r"""Constructs the Scale-Matern kernel that is used by default by several models. This uses a Gamma(3.0, 6.0) prior for the lengthscale and a Gamma(2.0, 0.15) prior for the output scale. """ return ScaleKernel( base_kernel=MaternKernel( nu=2.5, ard_num_dims=ard_num_dims, batch_shape=batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) def get_gaussian_likelihood_with_gamma_prior( batch_shape: Optional[torch.Size] = None, ) -> GaussianLikelihood: r"""Constructs the GaussianLikelihood that is used by default by several models. This uses a Gamma(1.1, 0.05) prior and constrains the noise level to be greater than MIN_INFERRED_NOISE_LEVEL (=1e-4). """ batch_shape = torch.Size() if batch_shape is None else batch_shape noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate return GaussianLikelihood( noise_prior=noise_prior, batch_shape=batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Functionality for allocating the inducing points of sparse Gaussian process models. References .. [chen2018dpp] Laming Chen and Guoxin Zhang and Hanning Zhou, Fast greedy MAP inference for determinantal point process to improve recommendation diversity, Proceedings of the 32nd International Conference on Neural Information Processing Systems, 2018, https://arxiv.org/abs/1709.05135. """ from __future__ import annotations from abc import ABC, abstractmethod from typing import Union import torch from botorch.exceptions.errors import UnsupportedError from botorch.models.model import Model from botorch.utils.probability.utils import ndtr as Phi, phi from gpytorch.module import Module from linear_operator.operators import LinearOperator from torch import Tensor NEG_INF = torch.tensor(float("-inf")) class InducingPointAllocator(ABC): r""" This class provides functionality to initialize the inducing point locations of an inducing point-based model, e.g. a `SingleTaskVariationalGP`. """ @abstractmethod def _get_quality_function( self, ) -> QualityFunction: """ Build the quality function required for this inducing point allocation strategy. Returns: A quality function. """ def allocate_inducing_points( self, inputs: Tensor, covar_module: Module, num_inducing: int, input_batch_shape: torch.Size, ) -> Tensor: r""" Initialize the `num_inducing` inducing point locations according to a specific initialization strategy. todo say something about quality Args: inputs: A (\*batch_shape, n, d)-dim input data tensor. covar_module: GPyTorch Module returning a LinearOperator kernel matrix. num_inducing: The maximun number (m) of inducing points (m <= n). input_batch_shape: The non-task-related batch shape. Returns: A (\*batch_shape, m, d)-dim tensor of inducing point locations. """ quality_function = self._get_quality_function() covar_module = covar_module.to(inputs.device) # We use 'no_grad' here because `inducing_points` are not # auto-differentiable with respect to the kernel hyper-parameters, # because `_pivoted_cholesky_init` does in-place operations. with torch.no_grad(): # Evaluate lazily because this may only be needed to figure out what # case we are in possibly_lazy_kernel = covar_module(inputs) base_case = possibly_lazy_kernel.ndimension() == 2 multi_task_case = ( possibly_lazy_kernel.ndimension() == 3 and len(input_batch_shape) == 0 ) if base_case or multi_task_case: train_train_kernel = possibly_lazy_kernel.evaluate_kernel() if base_case: quality_scores = quality_function(inputs) inducing_points = _pivoted_cholesky_init( train_inputs=inputs, kernel_matrix=train_train_kernel, max_length=num_inducing, quality_scores=quality_scores, ) return inducing_points if multi_task_case: input_element = inputs[0] if inputs.ndimension() == 3 else inputs kernel_element = train_train_kernel[0] quality_scores = quality_function(input_element) inducing_points = _pivoted_cholesky_init( train_inputs=input_element, kernel_matrix=kernel_element, max_length=num_inducing, quality_scores=quality_scores, ) return inducing_points # batched input cases batched_inputs = ( inputs.expand(*input_batch_shape, -1, -1) if inputs.ndimension() == 2 else inputs ) reshaped_inputs = batched_inputs.flatten(end_dim=-3) inducing_points = [] for input_element in reshaped_inputs: # the extra kernel evals are a little wasteful but make it # easier to infer the task batch size # We use 'no_grad' here because `inducing_points` are not # auto-differentiable with respect to the kernel hyper-parameters, # because `_pivoted_cholesky_init` does in-place operations. with torch.no_grad(): kernel_element = covar_module(input_element).evaluate_kernel() # handle extra task batch dimension kernel_element = ( kernel_element[0] if kernel_element.ndimension() == 3 else kernel_element ) quality_scores = quality_function(input_element) inducing_points.append( _pivoted_cholesky_init( train_inputs=input_element, kernel_matrix=kernel_element, max_length=num_inducing, quality_scores=quality_scores, ) ) inducing_points = torch.stack(inducing_points).view( *input_batch_shape, num_inducing, -1 ) return inducing_points class QualityFunction(ABC): """A function that scores inputs with respect to a specific criterion.""" @abstractmethod def __call__(self, inputs: Tensor) -> Tensor: # [n, d] -> [n] """ Args: inputs: inputs (of shape n x d) Returns: A tensor of quality scores for each input, of shape [n] """ class UnitQualityFunction(QualityFunction): """ A function returning ones for each element. Using this quality function for inducing point allocation corresponds to allocating inducing points with the sole aim of minimizing predictive variance, i.e. the approach of [burt2020svgp]_. """ @torch.no_grad() def __call__(self, inputs: Tensor) -> Tensor: # [n, d]-> [n] """ Args: inputs: inputs (of shape n x d) Returns: A tensor of ones for each input, of shape [n] """ return torch.ones([inputs.shape[0]], device=inputs.device, dtype=inputs.dtype) class ExpectedImprovementQualityFunction(QualityFunction): """ A function measuring the quality of input points as their expected improvement with respect to a conservative baseline. Expectations are according to the model from the previous BO step. See [moss2023ipa]_ for details and justification. """ def __init__(self, model: Model, maximize: bool): r""" Args: model: The model fitted during the previous BO step. For now, this must be a single task model (i.e. num_outputs=1). maximize: Set True if we are performing function maximization, else set False. """ if model.num_outputs != 1: raise NotImplementedError( "Multi-output models are currently not supported. " ) self._model = model self._maximize = maximize @torch.no_grad() def __call__(self, inputs: Tensor) -> Tensor: # [n, d] -> [n] """ Args: inputs: inputs (of shape n x d) Returns: A tensor of quality scores for each input, of shape [n] """ posterior = self._model.posterior(inputs) mean = posterior.mean.squeeze(-2).squeeze(-1) # removing redundant dimensions sigma = posterior.variance.clamp_min(1e-12).sqrt().view(mean.shape) best_f = torch.max(mean) if self._maximize else torch.min(mean) u = (mean - best_f) / sigma if self._maximize else -(mean - best_f) / sigma return sigma * (phi(u) + u * Phi(u)) class GreedyVarianceReduction(InducingPointAllocator): r""" The inducing point allocator proposed by [burt2020svgp]_, that greedily chooses inducing point locations with maximal (conditional) predictive variance. """ def _get_quality_function( self, ) -> QualityFunction: """ Build the unit quality function required for the greedy variance reduction inducing point allocation strategy. Returns: A quality function. """ return UnitQualityFunction() class GreedyImprovementReduction(InducingPointAllocator): r""" An inducing point allocator that greedily chooses inducing points with large predictive variance and that are in promising regions of the search space (according to the model form the previous BO step), see [moss2023ipa]_. """ def __init__(self, model: Model, maximize: bool): r""" Args: model: The model fitted during the previous BO step. maximize: Set True if we are performing function maximization, else set False. """ self._model = model self._maximize = maximize def _get_quality_function( self, ) -> QualityFunction: """ Build the improvement-based quality function required for the greedy improvement reduction inducing point allocation strategy. Returns: A quality function. """ return ExpectedImprovementQualityFunction(self._model, self._maximize) def _pivoted_cholesky_init( train_inputs: Tensor, kernel_matrix: Union[Tensor, LinearOperator], max_length: int, quality_scores: Tensor, epsilon: float = 1e-6, ) -> Tensor: r""" A pivoted Cholesky initialization method for the inducing points, originally proposed in [burt2020svgp]_ with the algorithm itself coming from [chen2018dpp]_. Code is a PyTorch version from [chen2018dpp]_, based on https://github.com/laming-chen/fast-map-dpp/blob/master/dpp.py but with a small modification to allow the underlying DPP to be defined through its diversity-quality decomposition,as discussed by [moss2023ipa]_. This method returns a greedy approximation of the MAP estimate of the specified DPP, i.e. its returns a set of points that are highly diverse (according to the provided kernel_matrix) and have high quality (according to the provided quality_scores). Args: train_inputs: training inputs (of shape n x d) kernel_matrix: kernel matrix on the training inputs max_length: number of inducing points to initialize quality_scores: scores representing the quality of each candidate input (of shape [n]) epsilon: numerical jitter for stability. Returns: max_length x d tensor of the training inputs corresponding to the top max_length pivots of the training kernel matrix """ # this is numerically equivalent to iteratively performing a pivoted cholesky # while storing the diagonal pivots at each iteration # TODO: use gpytorch's pivoted cholesky instead once that gets an exposed list # TODO: ensure this works in batch mode, which it does not currently. # todo test for shape of quality function if quality_scores.shape[0] != train_inputs.shape[0]: raise ValueError( "_pivoted_cholesky_init requires a quality score for each of train_inputs" ) if kernel_matrix.requires_grad: raise UnsupportedError( "`_pivoted_cholesky_init` does not support using a `kernel_matrix` " "with `requires_grad=True`." ) item_size = kernel_matrix.shape[-2] cis = torch.zeros( (max_length, item_size), device=kernel_matrix.device, dtype=kernel_matrix.dtype ) di2s = kernel_matrix.diagonal() scores = di2s * torch.square(quality_scores) selected_item = torch.argmax(scores) selected_items = [selected_item] while len(selected_items) < max_length: k = len(selected_items) - 1 ci_optimal = cis[:k, selected_item] di_optimal = torch.sqrt(di2s[selected_item]) elements = kernel_matrix[..., selected_item, :] eis = (elements - torch.matmul(ci_optimal, cis[:k, :])) / di_optimal cis[k, :] = eis di2s = di2s - eis.pow(2.0) di2s[selected_item] = NEG_INF scores = di2s * torch.square(quality_scores) selected_item = torch.argmax(scores) if di2s[selected_item] < epsilon: break selected_items.append(selected_item) ind_points = train_inputs[torch.stack(selected_items)] return ind_points[:max_length, :]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Parsing rules for BoTorch datasets.""" from __future__ import annotations from typing import Any, Dict, Hashable, Type, Union import torch from botorch.exceptions import UnsupportedError from botorch.models.model import Model from botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP from botorch.models.pairwise_gp import PairwiseGP from botorch.utils.datasets import RankingDataset, SupervisedDataset from botorch.utils.dispatcher import Dispatcher from torch import cat, Tensor from torch.nn.functional import pad def _encoder(arg: Any) -> Type: # Allow type variables to be passed as arguments at runtime return arg if isinstance(arg, type) else type(arg) dispatcher = Dispatcher("parse_training_data", encoder=_encoder) def parse_training_data( consumer: Any, training_data: Union[SupervisedDataset, Dict[Hashable, SupervisedDataset]], **kwargs: Any, ) -> Dict[str, Tensor]: r"""Prepares a (collection of) datasets for consumption by a given object. Args: training_datas: A SupervisedDataset or dictionary thereof. consumer: The object that will consume the parsed data, or type thereof. Returns: A dictionary containing the extracted information. """ return dispatcher(consumer, training_data, **kwargs) @dispatcher.register(Model, SupervisedDataset) def _parse_model_supervised( consumer: Model, dataset: SupervisedDataset, **ignore: Any ) -> Dict[str, Tensor]: parsed_data = {"train_X": dataset.X, "train_Y": dataset.Y} if dataset.Yvar is not None: parsed_data["train_Yvar"] = dataset.Yvar return parsed_data @dispatcher.register(PairwiseGP, RankingDataset) def _parse_pairwiseGP_ranking( consumer: PairwiseGP, dataset: RankingDataset, **ignore: Any ) -> Dict[str, Tensor]: # TODO: [T163045056] Not sure what the point of the special container is if we have # to further process it here. We should move this logic into RankingDataset. datapoints = dataset._X.values comparisons = dataset._X.indices comp_order = dataset.Y comparisons = torch.gather(input=comparisons, dim=-1, index=comp_order) return { "datapoints": datapoints, "comparisons": comparisons, } @dispatcher.register(Model, dict) def _parse_model_dict( consumer: Model, training_data: Dict[Hashable, SupervisedDataset], **kwargs: Any, ) -> Dict[str, Tensor]: if len(training_data) != 1: raise UnsupportedError( "Default training data parsing logic does not support " "passing multiple datasets to single task models." ) return dispatcher(consumer, next(iter(training_data.values()))) @dispatcher.register((MultiTaskGP, FixedNoiseMultiTaskGP), dict) def _parse_multitask_dict( consumer: Model, training_data: Dict[Hashable, SupervisedDataset], *, task_feature: int = 0, task_feature_container: Hashable = "train_X", **kwargs: Any, ) -> Dict[str, Tensor]: cache = {} for task_id, dataset in enumerate(training_data.values()): parse = parse_training_data(consumer, dataset, **kwargs) if task_feature_container not in parse: raise ValueError(f"Missing required term `{task_feature_container}`.") if cache and cache.keys() != parse.keys(): raise UnsupportedError( "Cannot combine datasets with heterogeneous parsed formats." ) # Add task indicator features to specified container X = parse[task_feature_container] d = X.shape[-1] i = d + task_feature + 1 if task_feature < 0 else task_feature if i < 0 or d < i: raise ValueError("Invalid `task_feature`: out-of-bounds.") if i == 0: X = pad(X, (1, 0), value=task_id) elif i == d: X = pad(X, (0, 1), value=task_id) else: A, B = X.split([i, d - i], dim=-1) X = cat([pad(A, (0, 1), value=task_id), B], dim=-1) parse[task_feature_container] = X if cache: for key, val in parse.items(): cache[key].append(val) else: cache = {key: [val] for key, val in parse.items()} return {key: cat(tensors, dim=0) for key, tensors in cache.items()}
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.models.utils.assorted import ( _make_X_full, add_output_dim, check_min_max_scaling, check_no_nans, check_standardization, consolidate_duplicates, detect_duplicates, fantasize, gpt_posterior_settings, mod_batch_shape, multioutput_to_batch_mode_transform, validate_input_scaling, ) # # TODO: Omitted to avoid circular dependency created by `Model.construct_inputs` # from botorch.models.utils.parse_training_data import parse_training_data __all__ = [ "_make_X_full", "add_output_dim", "check_no_nans", "check_min_max_scaling", "check_standardization", "fantasize", "gpt_posterior_settings", "multioutput_to_batch_mode_transform", "mod_batch_shape", "validate_input_scaling", "detect_duplicates", "consolidate_duplicates", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Assorted helper methods and objects for working with BoTorch models.""" from __future__ import annotations import warnings from contextlib import contextmanager, ExitStack from typing import Iterator, List, Optional, Tuple import torch from botorch import settings from botorch.exceptions import InputDataError, InputDataWarning from botorch.settings import _Flag from gpytorch import settings as gpt_settings from gpytorch.module import Module from torch import Tensor def _make_X_full(X: Tensor, output_indices: List[int], tf: int) -> Tensor: r"""Helper to construct input tensor with task indices. Args: X: The raw input tensor (without task information). output_indices: The output indices to generate (passed in via `posterior`). tf: The task feature index. Returns: Tensor: The full input tensor for the multi-task model, including task indices. """ index_shape = X.shape[:-1] + torch.Size([1]) indexers = ( torch.full(index_shape, fill_value=i, device=X.device, dtype=X.dtype) for i in output_indices ) X_l, X_r = X[..., :tf], X[..., tf:] return torch.cat( [torch.cat([X_l, indexer, X_r], dim=-1) for indexer in indexers], dim=-2 ) def multioutput_to_batch_mode_transform( train_X: Tensor, train_Y: Tensor, num_outputs: int, train_Yvar: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: r"""Transforms training inputs for a multi-output model. Used for multi-output models that internally are represented by a batched single output model, where each output is modeled as an independent batch. Args: train_X: A `n x d` or `input_batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `target_batch_shape x n x m` (batch mode) tensor of training observations. num_outputs: number of outputs train_Yvar: A `n x m` or `target_batch_shape x n x m` tensor of observed measurement noise. Returns: 3-element tuple containing - A `input_batch_shape x m x n x d` tensor of training features. - A `target_batch_shape x m x n` tensor of training observations. - A `target_batch_shape x m x n` tensor observed measurement noise. """ # make train_Y `batch_shape x m x n` train_Y = train_Y.transpose(-1, -2) # expand train_X to `batch_shape x m x n x d` train_X = train_X.unsqueeze(-3).expand( train_X.shape[:-2] + torch.Size([num_outputs]) + train_X.shape[-2:] ) if train_Yvar is not None: # make train_Yvar `batch_shape x m x n` train_Yvar = train_Yvar.transpose(-1, -2) return train_X, train_Y, train_Yvar def add_output_dim(X: Tensor, original_batch_shape: torch.Size) -> Tuple[Tensor, int]: r"""Insert the output dimension at the correct location. The trailing batch dimensions of X must match the original batch dimensions of the training inputs, but can also include extra batch dimensions. Args: X: A `(new_batch_shape) x (original_batch_shape) x n x d` tensor of features. original_batch_shape: the batch shape of the model's training inputs. Returns: 2-element tuple containing - A `(new_batch_shape) x (original_batch_shape) x m x n x d` tensor of features. - The index corresponding to the output dimension. """ X_batch_shape = X.shape[:-2] if len(X_batch_shape) > 0 and len(original_batch_shape) > 0: # check that X_batch_shape supports broadcasting or augments # original_batch_shape with extra batch dims try: torch.broadcast_shapes(X_batch_shape, original_batch_shape) except RuntimeError: raise RuntimeError( "The trailing batch dimensions of X must match the trailing " "batch dimensions of the training inputs." ) # insert `m` dimension X = X.unsqueeze(-3) output_dim_idx = max(len(original_batch_shape), len(X_batch_shape)) return X, output_dim_idx def check_no_nans(Z: Tensor) -> None: r"""Check that tensor does not contain NaN values. Raises an InputDataError if `Z` contains NaN values. Args: Z: The input tensor. """ if torch.any(torch.isnan(Z)).item(): raise InputDataError("Input data contains NaN values.") def check_min_max_scaling( X: Tensor, strict: bool = False, atol: float = 1e-2, raise_on_fail: bool = False, ignore_dims: Optional[List[int]] = None, ) -> None: r"""Check that tensor is normalized to the unit cube. Args: X: A `batch_shape x n x d` input tensor. Typically the training inputs of a model. strict: If True, require `X` to be scaled to the unit cube (rather than just to be contained within the unit cube). atol: The tolerance for the boundary check. Only used if `strict=True`. raise_on_fail: If True, raise an exception instead of a warning. ignore_dims: Subset of dimensions where the min-max scaling check is omitted. """ ignore_dims = ignore_dims or [] check_dims = list(set(range(X.shape[-1])) - set(ignore_dims)) if len(check_dims) == 0: return None with torch.no_grad(): X_check = X[..., check_dims] Xmin = torch.min(X_check, dim=-1).values Xmax = torch.max(X_check, dim=-1).values msg = None if strict and max(torch.abs(Xmin).max(), torch.abs(Xmax - 1).max()) > atol: msg = "scaled" if torch.any(Xmin < -atol) or torch.any(Xmax > 1 + atol): msg = "contained" if msg is not None: msg = ( f"Input data is not {msg} to the unit cube. " "Please consider min-max scaling the input data." ) if raise_on_fail: raise InputDataError(msg) warnings.warn(msg, InputDataWarning) def check_standardization( Y: Tensor, atol_mean: float = 1e-2, atol_std: float = 1e-2, raise_on_fail: bool = False, ) -> None: r"""Check that tensor is standardized (zero mean, unit variance). Args: Y: The input tensor of shape `batch_shape x n x m`. Typically the train targets of a model. Standardization is checked across the `n`-dimension. atol_mean: The tolerance for the mean check. atol_std: The tolerance for the std check. raise_on_fail: If True, raise an exception instead of a warning. """ with torch.no_grad(): Ymean, Ystd = torch.mean(Y, dim=-2), torch.std(Y, dim=-2) if torch.abs(Ymean).max() > atol_mean or torch.abs(Ystd - 1).max() > atol_std: msg = ( f"Input data is not standardized (mean = {Ymean}, std = {Ystd}). " "Please consider scaling the input to zero mean and unit variance." ) if raise_on_fail: raise InputDataError(msg) warnings.warn(msg, InputDataWarning) def validate_input_scaling( train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor] = None, raise_on_fail: bool = False, ignore_X_dims: Optional[List[int]] = None, ) -> None: r"""Helper function to validate input data to models. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. train_Yvar: A `batch_shape x n x m` or `batch_shape x n x m` (batch mode) tensor of observed measurement noise. raise_on_fail: If True, raise an error instead of emitting a warning (only for normalization/standardization checks, an error is always raised if NaN values are present). ignore_X_dims: For this subset of dimensions from `{1, ..., d}`, ignore the min-max scaling check. This function is typically called inside the constructor of standard BoTorch models. It validates the following: (i) none of the inputs contain NaN values (ii) the training data (`train_X`) is normalized to the unit cube for all dimensions except those in `ignore_X_dims`. (iii) the training targets (`train_Y`) are standardized (zero mean, unit var) No checks (other than the NaN check) are performed for observed variances (`train_Yvar`) at this point. """ if settings.validate_input_scaling.off(): return check_no_nans(train_X) check_no_nans(train_Y) if train_Yvar is not None: check_no_nans(train_Yvar) if torch.any(train_Yvar < 0): raise InputDataError("Input data contains negative variances.") check_min_max_scaling( X=train_X, raise_on_fail=raise_on_fail, ignore_dims=ignore_X_dims ) check_standardization(Y=train_Y, raise_on_fail=raise_on_fail) def mod_batch_shape(module: Module, names: List[str], b: int) -> None: r"""Recursive helper to modify gpytorch modules' batch shape attribute. Modifies the module in-place. Args: module: The module to be modified. names: The list of names to access the attribute. If the full name of the module is `"module.sub_module.leaf_module"`, this will be `["sub_module", "leaf_module"]`. b: The new size of the last element of the module's `batch_shape` attribute. """ if len(names) == 0: return m = getattr(module, names[0]) if len(names) == 1 and hasattr(m, "batch_shape") and len(m.batch_shape) > 0: m.batch_shape = m.batch_shape[:-1] + torch.Size([b] if b > 0 else []) else: mod_batch_shape(module=m, names=names[1:], b=b) @contextmanager def gpt_posterior_settings(): r"""Context manager for settings used for computing model posteriors.""" with ExitStack() as es: if gpt_settings.debug.is_default(): es.enter_context(gpt_settings.debug(False)) if gpt_settings.fast_pred_var.is_default(): es.enter_context(gpt_settings.fast_pred_var()) es.enter_context( gpt_settings.detach_test_caches(settings.propagate_grads.off()) ) yield def detect_duplicates( X: Tensor, rtol: float = 0, atol: float = 1e-8, ) -> Iterator[Tuple[int, int]]: """Returns an iterator over index pairs `(duplicate index, original index)` for all duplicate entries of `X`. Supporting 2-d Tensor only. Args: X: the datapoints tensor with potential duplicated entries rtol: relative tolerance atol: absolute tolerance """ if len(X.shape) != 2: raise ValueError("X must have 2 dimensions.") tols = atol if rtol: rval = X.abs().max(dim=-1, keepdim=True).values tols = tols + rtol * rval.max(rval.transpose(-1, -2)) n = X.shape[-2] dist = torch.full((n, n), float("inf"), device=X.device, dtype=X.dtype) dist[torch.triu_indices(n, n, offset=1).unbind()] = torch.nn.functional.pdist( X, p=float("inf") ) return ( (i, int(j)) # pyre-fixme[19]: Expected 1 positional argument. for diff, j, i in zip(*(dist - tols).min(dim=-2), range(n)) if diff < 0 ) def consolidate_duplicates( X: Tensor, Y: Tensor, rtol: float = 0.0, atol: float = 1e-8 ) -> Tuple[Tensor, Tensor, Tensor]: """Drop duplicated Xs and update the indices tensor Y accordingly. Supporting 2d Tensor only as in batch mode block design is not guaranteed. Args: X: the datapoints tensor Y: the index tensor to be updated (e.g., pairwise comparisons) rtol: relative tolerance atol: absolute tolerance Returns: consolidated_X: the consolidated X consolidated_Y: the consolidated Y (e.g., pairwise comparisons indices) new_indices: new index of each original item in X, a tensor of size X.shape[-2] """ if len(X.shape) != 2: raise ValueError("X must have 2 dimensions.") n = X.shape[-2] dup_map = dict(detect_duplicates(X=X, rtol=rtol, atol=atol)) # Handle edge cases conservatively # If a item is in both dup set and kept set, do not remove it common_set = set(dup_map.keys()).intersection(dup_map.values()) for k in list(dup_map.keys()): if k in common_set or dup_map[k] in common_set: del dup_map[k] if dup_map: dup_indices, kept_indices = zip(*dup_map.items()) unique_indices = sorted(set(range(n)) - set(dup_indices)) # After dropping the duplicates, # the kept ones' indices may also change by being shifted up new_idx_map = dict(zip(unique_indices, range(len(unique_indices)))) new_indices_for_dup = (new_idx_map[idx] for idx in kept_indices) new_idx_map.update(dict(zip(dup_indices, new_indices_for_dup))) consolidated_X = X[list(unique_indices), :] consolidated_Y = torch.tensor( [[new_idx_map[item.item()] for item in row] for row in Y.unbind()], dtype=torch.long, device=Y.device, ) new_indices = ( torch.arange(n, dtype=torch.long) .apply_(lambda x: new_idx_map[x]) .to(Y.device) ) return consolidated_X, consolidated_Y, new_indices else: return X, Y, torch.arange(n, device=Y.device, dtype=Y.dtype) class fantasize(_Flag): r"""A flag denoting whether we are currently in a `fantasize` context.""" _state: bool = False
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.models.likelihoods.pairwise import ( PairwiseLogitLikelihood, PairwiseProbitLikelihood, ) __all__ = [ "PairwiseProbitLikelihood", "PairwiseLogitLikelihood", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Pairwise likelihood for pairwise preference model (e.g., PairwiseGP). """ from __future__ import annotations import math from abc import ABC, abstractmethod from typing import Any, Tuple import torch from botorch.utils.probability.utils import ( log_ndtr, log_phi, standard_normal_log_hazard, ) from gpytorch.likelihoods import Likelihood from torch import Tensor from torch.distributions import Bernoulli class PairwiseLikelihood(Likelihood, ABC): """ Pairwise likelihood base class for pairwise preference GP (e.g., PairwiseGP). :meta private: """ def __init__(self, max_plate_nesting: int = 1): """ Initialized like a `gpytorch.likelihoods.Likelihood`. Args: max_plate_nesting: Defaults to 1. """ super().__init__(max_plate_nesting) def forward(self, utility: Tensor, D: Tensor, **kwargs: Any) -> Bernoulli: """Given the difference in (estimated) utility util_diff = f(v) - f(u), return a Bernoulli distribution object representing the likelihood of the user prefer v over u. Note that this is not used by the `PairwiseGP` model, """ return Bernoulli(probs=self.p(utility=utility, D=D)) @abstractmethod def p(self, utility: Tensor, D: Tensor) -> Tensor: """Given the difference in (estimated) utility util_diff = f(v) - f(u), return the probability of the user prefer v over u. Args: utility: A Tensor of shape `(batch_size) x n`, the utility at MAP point D: D is `(batch_size x) m x n` matrix with all elements being zero in last dimension except at two positions D[..., i] = 1 and D[..., j] = -1 respectively, representing item i is preferred over item j. log: if true, return log probability """ def log_p(self, utility: Tensor, D: Tensor) -> Tensor: """return the log of p""" return torch.log(self.p(utility=utility, D=D)) def negative_log_gradient_sum(self, utility: Tensor, D: Tensor) -> Tensor: """Calculate the sum of negative log gradient with respect to each item's latent utility values. Useful for models using laplace approximation. Args: utility: A Tensor of shape `(batch_size x) n`, the utility at MAP point D: D is `(batch_size x) m x n` matrix with all elements being zero in last dimension except at two positions D[..., i] = 1 and D[..., j] = -1 respectively, representing item i is preferred over item j. Returns: A `(batch_size x) n` Tensor representing the sum of negative log gradient values of the likelihood over all comparisons (i.e., the m dimension) with respect to each item. """ raise NotImplementedError def negative_log_hessian_sum(self, utility: Tensor, D: Tensor) -> Tensor: """Calculate the sum of negative log hessian with respect to each item's latent utility values. Useful for models using laplace approximation. Args: utility: A Tensor of shape `(batch_size) x n`, the utility at MAP point D: D is `(batch_size x) m x n` matrix with all elements being zero in last dimension except at two positions D[..., i] = 1 and D[..., j] = -1 respectively, representing item i is preferred over item j. Returns: A `(batch_size x) n x n` Tensor representing the sum of negative log hessian values of the likelihood over all comparisons (i.e., the m dimension) with respect to each item. """ raise NotImplementedError class PairwiseProbitLikelihood(PairwiseLikelihood): """Pairwise likelihood using probit function Given two items v and u with utilities f(v) and f(u), the probability that we prefer v over u with probability std_normal_cdf((f(v) - f(u))/sqrt(2)). Note that this formulation implicitly assume the noise term is fixed at 1. """ # Clamping z values for better numerical stability. See self._calc_z for detail # norm_cdf(z=3) ~= 0.999, top 0.1% percent _zlim = 3 def _calc_z(self, utility: Tensor, D: Tensor) -> Tensor: """Calculate the z score given estimated utility values and the comparison matrix D. """ scaled_util = (utility / math.sqrt(2)).unsqueeze(-1) z = D.to(scaled_util) @ scaled_util z = z.clamp(-self._zlim, self._zlim).squeeze(-1) return z def _calc_z_derived(self, z: Tensor) -> Tuple[Tensor, Tensor, Tensor]: """Calculate auxiliary statistics derived from z, including log pdf, log cdf, and the hazard function (pdf divided by cdf) Args: z: A Tensor of arbitrary shape. Returns: Tensors with standard normal logpdf(z), logcdf(z), and hazard function values evaluated at -z. """ return log_phi(z), log_ndtr(z), standard_normal_log_hazard(-z).exp() def p(self, utility: Tensor, D: Tensor, log: bool = False) -> Tensor: z = self._calc_z(utility=utility, D=D) std_norm = torch.distributions.normal.Normal( torch.zeros(1, dtype=z.dtype, device=z.device), torch.ones(1, dtype=z.dtype, device=z.device), ) return std_norm.cdf(z) def negative_log_gradient_sum(self, utility: Tensor, D: Tensor) -> Tensor: # Compute the sum over of grad. of negative Log-LH wrt utility f. # Original grad should be of dimension m x n, as in (6) from # [Chu2005preference]_. The sum over the m dimension of grad. of # negative log likelihood with respect to the utility z = self._calc_z(utility, D) _, _, h = self._calc_z_derived(z) h_factor = h / math.sqrt(2) grad = (h_factor.unsqueeze(-2) @ (-D)).squeeze(-2) return grad def negative_log_hessian_sum(self, utility: Tensor, D: Tensor) -> Tensor: # Original hess should be of dimension m x n x n, as in (7) from # [Chu2005preference]_ Sum over the first dimension and return a tensor of # shape n x n. # The sum over the m dimension of hessian of negative log likelihood # with respect to the utility DT = D.transpose(-1, -2) z = self._calc_z(utility, D) _, _, h = self._calc_z_derived(z) mul_factor = h * (h + z) / 2 mul_factor = mul_factor.unsqueeze(-2).expand(*DT.size()) # multiply the hessian value by preference signs # (+1 if preferred or -1 otherwise) and sum over the m dimension hess = DT * mul_factor @ D return hess class PairwiseLogitLikelihood(PairwiseLikelihood): """Pairwise likelihood using logistic (i.e., sigmoid) function Given two items v and u with utilities f(v) and f(u), the probability that we prefer v over u with probability sigmoid(f(v) - f(u)). Note that this formulation implicitly assume the beta term in logistic function is fixed at 1. """ # Clamping logit values for better numerical stability. # See self._calc_logit for detail logistic(8) ~= 0.9997, top 0.03% percent _logit_lim = 8 def _calc_logit(self, utility: Tensor, D: Tensor) -> Tensor: logit = D.to(utility) @ utility.unsqueeze(-1) logit = logit.clamp(-self._logit_lim, self._logit_lim).squeeze(-1) return logit def log_p(self, utility: Tensor, D: Tensor) -> Tensor: logit = self._calc_logit(utility=utility, D=D) return torch.nn.functional.logsigmoid(logit) def p(self, utility: Tensor, D: Tensor) -> Tensor: logit = self._calc_logit(utility=utility, D=D) return torch.sigmoid(logit) def negative_log_gradient_sum(self, utility: Tensor, D: Tensor) -> Tensor: indices_shape = utility.shape[:-1] + (-1,) winner_indices = (D == 1).nonzero(as_tuple=True)[-1].reshape(indices_shape) loser_indices = (D == -1).nonzero(as_tuple=True)[-1].reshape(indices_shape) ex = torch.exp(torch.gather(utility, -1, winner_indices)) ey = torch.exp(torch.gather(utility, -1, loser_indices)) unsigned_grad = ey / (ex + ey) grad = (unsigned_grad.unsqueeze(-2) @ (-D)).squeeze(-2) return grad def negative_log_hessian_sum(self, utility: Tensor, D: Tensor) -> Tensor: DT = D.transpose(-1, -2) # calculating f(v) - f(u) given u > v information in D neg_logit = -(D @ utility.unsqueeze(-1)).squeeze(-1) term = torch.sigmoid(neg_logit) mul_factor = term - (term) ** 2 mul_factor = mul_factor.unsqueeze(-2).expand(*DT.size()) # multiply the hessian value by preference signs # (+1 if preferred or -1 otherwise) and sum over the m dimension hess = DT * mul_factor @ D return hess
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Outcome transformations for automatically transforming and un-transforming model outputs. Outcome transformations are typically part of a Model and applied (i) within the model constructor to transform the train observations to the model space, and (ii) in the `Model.posterior` call to untransform the model posterior back to the original space. References .. [eriksson2021scalable] D. Eriksson, M. Poloczek. Scalable Constrained Bayesian Optimization. International Conference on Artificial Intelligence and Statistics. PMLR, 2021, http://proceedings.mlr.press/v130/eriksson21a.html """ from __future__ import annotations import warnings from abc import ABC, abstractmethod from collections import OrderedDict from typing import Any, List, Mapping, Optional, Tuple, Union import torch from botorch.models.transforms.utils import ( norm_to_lognorm_mean, norm_to_lognorm_variance, ) from botorch.posteriors import GPyTorchPosterior, Posterior, TransformedPosterior from botorch.utils.transforms import normalize_indices from linear_operator.operators import CholLinearOperator, DiagLinearOperator from torch import Tensor from torch.nn import Module, ModuleDict class OutcomeTransform(Module, ABC): r""" Abstract base class for outcome transforms. :meta private: """ @abstractmethod def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Transform the outcomes in a model's training targets Args: Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). Returns: A two-tuple with the transformed outcomes: - The transformed outcome observations. - The transformed observation noise (if applicable). """ pass # pragma: no cover def subset_output(self, idcs: List[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. This functionality is used to properly treat outcome transformations in the `subset_model` functionality. Args: idcs: The output indices to subset the transform to. Returns: The current outcome transform, subset to the specified output indices. """ raise NotImplementedError( f"{self.__class__.__name__} does not implement the " "`subset_output` method" ) def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Un-transform previously transformed outcomes Args: Y: A `batch_shape x n x m`-dim tensor of transfomred training targets. Yvar: A `batch_shape x n x m`-dim tensor of transformed observation noises associated with the training targets (if applicable). Returns: A two-tuple with the un-transformed outcomes: - The un-transformed outcome observations. - The un-transformed observation noise (if applicable). """ raise NotImplementedError( f"{self.__class__.__name__} does not implement the `untransform` method" ) @property def _is_linear(self) -> bool: """ True for transformations such as `Standardize`; these should be able to apply `untransform_posterior` to a GPyTorchPosterior and return a GPyTorchPosterior, because a multivariate normal distribution should remain multivariate normal after applying the transform. """ return False def untransform_posterior(self, posterior: Posterior) -> Posterior: r"""Un-transform a posterior. Posteriors with `_is_linear=True` should return a `GPyTorchPosterior` when `posterior` is a `GPyTorchPosterior`. Posteriors with `_is_linear=False` likely return a `TransformedPosterior` instead. Args: posterior: A posterior in the transformed space. Returns: The un-transformed posterior. """ raise NotImplementedError( f"{self.__class__.__name__} does not implement the " "`untransform_posterior` method" ) class ChainedOutcomeTransform(OutcomeTransform, ModuleDict): r"""An outcome transform representing the chaining of individual transforms""" def __init__(self, **transforms: OutcomeTransform) -> None: r"""Chaining of outcome transforms. Args: transforms: The transforms to chain. Internally, the names of the kwargs are used as the keys for accessing the individual transforms on the module. """ super().__init__(OrderedDict(transforms)) def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Transform the outcomes in a model's training targets Args: Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). Returns: A two-tuple with the transformed outcomes: - The transformed outcome observations. - The transformed observation noise (if applicable). """ for tf in self.values(): Y, Yvar = tf.forward(Y, Yvar) return Y, Yvar def subset_output(self, idcs: List[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: idcs: The output indices to subset the transform to. Returns: The current outcome transform, subset to the specified output indices. """ return self.__class__( **{name: tf.subset_output(idcs=idcs) for name, tf in self.items()} ) def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Un-transform previously transformed outcomes Args: Y: A `batch_shape x n x m`-dim tensor of transfomred training targets. Yvar: A `batch_shape x n x m`-dim tensor of transformed observation noises associated with the training targets (if applicable). Returns: A two-tuple with the un-transformed outcomes: - The un-transformed outcome observations. - The un-transformed observation noise (if applicable). """ for tf in reversed(self.values()): Y, Yvar = tf.untransform(Y, Yvar) return Y, Yvar @property def _is_linear(self) -> bool: """ A `ChainedOutcomeTransform` is linear only if all of the component transforms are linear. """ return all((octf._is_linear for octf in self.values())) def untransform_posterior(self, posterior: Posterior) -> Posterior: r"""Un-transform a posterior Args: posterior: A posterior in the transformed space. Returns: The un-transformed posterior. """ for tf in reversed(self.values()): posterior = tf.untransform_posterior(posterior) return posterior class Standardize(OutcomeTransform): r"""Standardize outcomes (zero mean, unit variance). This module is stateful: If in train mode, calling forward updates the module state (i.e. the mean/std normalizing constants). If in eval mode, calling forward simply applies the standardization using the current module state. """ def __init__( self, m: int, outputs: Optional[List[int]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 min_stdv: float = 1e-8, ) -> None: r"""Standardize outcomes (zero mean, unit variance). Args: m: The output dimension. outputs: Which of the outputs to standardize. If omitted, all outputs will be standardized. batch_shape: The batch_shape of the training targets. min_stddv: The minimum standard deviation for which to perform standardization (if lower, only de-mean the data). """ super().__init__() self.register_buffer("means", torch.zeros(*batch_shape, 1, m)) self.register_buffer("stdvs", torch.ones(*batch_shape, 1, m)) self.register_buffer("_stdvs_sq", torch.ones(*batch_shape, 1, m)) self.register_buffer("_is_trained", torch.tensor(False)) self._outputs = normalize_indices(outputs, d=m) self._m = m self._batch_shape = batch_shape self._min_stdv = min_stdv def load_state_dict( self, state_dict: Mapping[str, Any], strict: bool = True ) -> None: r"""Custom logic for loading the state dict.""" if "_is_trained" not in state_dict: warnings.warn( "Key '_is_trained' not found in state_dict. Setting to True. " "In a future release, this will result in an error.", DeprecationWarning, ) state_dict = {**state_dict, "_is_trained": torch.tensor(True)} super().load_state_dict(state_dict, strict=strict) def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Standardize outcomes. If the module is in train mode, this updates the module state (i.e. the mean/std normalizing constants). If the module is in eval mode, simply applies the normalization using the module state. Args: Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). Returns: A two-tuple with the transformed outcomes: - The transformed outcome observations. - The transformed observation noise (if applicable). """ if self.training: if Y.shape[:-2] != self._batch_shape: raise RuntimeError( f"Expected Y.shape[:-2] to be {self._batch_shape}, matching " "the `batch_shape` argument to `Standardize`, but got " f"Y.shape[:-2]={Y.shape[:-2]}." ) if Y.size(-1) != self._m: raise RuntimeError( f"Wrong output dimension. Y.size(-1) is {Y.size(-1)}; expected " f"{self._m}." ) stdvs = Y.std(dim=-2, keepdim=True) stdvs = stdvs.where(stdvs >= self._min_stdv, torch.full_like(stdvs, 1.0)) means = Y.mean(dim=-2, keepdim=True) if self._outputs is not None: unused = [i for i in range(self._m) if i not in self._outputs] means[..., unused] = 0.0 stdvs[..., unused] = 1.0 self.means = means self.stdvs = stdvs self._stdvs_sq = stdvs.pow(2) self._is_trained = torch.tensor(True) Y_tf = (Y - self.means) / self.stdvs Yvar_tf = Yvar / self._stdvs_sq if Yvar is not None else None return Y_tf, Yvar_tf def subset_output(self, idcs: List[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: idcs: The output indices to subset the transform to. Returns: The current outcome transform, subset to the specified output indices. """ new_m = len(idcs) if new_m > self._m: raise RuntimeError( "Trying to subset a transform have more outputs than " " the original transform." ) nlzd_idcs = normalize_indices(idcs, d=self._m) new_outputs = None if self._outputs is not None: new_outputs = [i for i in self._outputs if i in nlzd_idcs] new_tf = self.__class__( m=new_m, outputs=new_outputs, batch_shape=self._batch_shape, min_stdv=self._min_stdv, ) new_tf.means = self.means[..., nlzd_idcs] new_tf.stdvs = self.stdvs[..., nlzd_idcs] new_tf._stdvs_sq = self._stdvs_sq[..., nlzd_idcs] new_tf._is_trained = self._is_trained if not self.training: new_tf.eval() return new_tf def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Un-standardize outcomes. Args: Y: A `batch_shape x n x m`-dim tensor of standardized targets. Yvar: A `batch_shape x n x m`-dim tensor of standardized observation noises associated with the targets (if applicable). Returns: A two-tuple with the un-standardized outcomes: - The un-standardized outcome observations. - The un-standardized observation noise (if applicable). """ if not self._is_trained: raise RuntimeError( "`Standardize` transforms must be called on outcome data " "(e.g. `transform(Y)`) before calling `untransform`, since " "means and standard deviations need to be computed." ) Y_utf = self.means + self.stdvs * Y Yvar_utf = self._stdvs_sq * Yvar if Yvar is not None else None return Y_utf, Yvar_utf @property def _is_linear(self) -> bool: return True def untransform_posterior( self, posterior: Posterior ) -> Union[GPyTorchPosterior, TransformedPosterior]: r"""Un-standardize the posterior. Args: posterior: A posterior in the standardized space. Returns: The un-standardized posterior. If the input posterior is a `GPyTorchPosterior`, return a `GPyTorchPosterior`. Otherwise, return a `TransformedPosterior`. """ if self._outputs is not None: raise NotImplementedError( "Standardize does not yet support output selection for " "untransform_posterior" ) if not self._is_trained: raise RuntimeError( "`Standardize` transforms must be called on outcome data " "(e.g. `transform(Y)`) before calling `untransform_posterior`, since " "means and standard deviations need to be computed." ) is_mtgp_posterior = False if type(posterior) is GPyTorchPosterior: is_mtgp_posterior = posterior._is_mt if not self._m == posterior._extended_shape()[-1] and not is_mtgp_posterior: raise RuntimeError( "Incompatible output dimensions encountered. Transform has output " f"dimension {self._m} and posterior has " f"{posterior._extended_shape()[-1]}." ) if type(posterior) is not GPyTorchPosterior: # fall back to TransformedPosterior # this applies to subclasses of GPyTorchPosterior like MultitaskGPPosterior return TransformedPosterior( posterior=posterior, sample_transform=lambda s: self.means + self.stdvs * s, mean_transform=lambda m, v: self.means + self.stdvs * m, variance_transform=lambda m, v: self._stdvs_sq * v, ) # GPyTorchPosterior (TODO: Should we Lazy-evaluate the mean here as well?) mvn = posterior.distribution offset = self.means scale_fac = self.stdvs if not posterior._is_mt: mean_tf = offset.squeeze(-1) + scale_fac.squeeze(-1) * mvn.mean scale_fac = scale_fac.squeeze(-1).expand_as(mean_tf) else: mean_tf = offset + scale_fac * mvn.mean reps = mean_tf.shape[-2:].numel() // scale_fac.size(-1) scale_fac = scale_fac.squeeze(-2) if mvn._interleaved: scale_fac = scale_fac.repeat(*[1 for _ in scale_fac.shape[:-1]], reps) else: scale_fac = torch.repeat_interleave(scale_fac, reps, dim=-1) if ( not mvn.islazy # TODO: Figure out attribute namming weirdness here or mvn._MultivariateNormal__unbroadcasted_scale_tril is not None ): # if already computed, we can save a lot of time using scale_tril covar_tf = CholLinearOperator(mvn.scale_tril * scale_fac.unsqueeze(-1)) else: lcv = mvn.lazy_covariance_matrix scale_fac = scale_fac.expand(lcv.shape[:-1]) scale_mat = DiagLinearOperator(scale_fac) covar_tf = scale_mat @ lcv @ scale_mat kwargs = {"interleaved": mvn._interleaved} if posterior._is_mt else {} mvn_tf = mvn.__class__(mean=mean_tf, covariance_matrix=covar_tf, **kwargs) return GPyTorchPosterior(mvn_tf) class Log(OutcomeTransform): r"""Log-transform outcomes. Useful if the targets are modeled using a (multivariate) log-Normal distribution. This means that we can use a standard GP model on the log-transformed outcomes and un-transform the model posterior of that GP. """ def __init__(self, outputs: Optional[List[int]] = None) -> None: r"""Log-transform outcomes. Args: outputs: Which of the outputs to log-transform. If omitted, all outputs will be standardized. """ super().__init__() self._outputs = outputs def subset_output(self, idcs: List[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: idcs: The output indices to subset the transform to. Returns: The current outcome transform, subset to the specified output indices. """ new_outputs = None if self._outputs is not None: if min(self._outputs + idcs) < 0: raise NotImplementedError( f"Negative indexing not supported for {self.__class__.__name__} " "when subsetting outputs and only transforming some outputs." ) new_outputs = [i for i in self._outputs if i in idcs] new_tf = self.__class__(outputs=new_outputs) if not self.training: new_tf.eval() return new_tf def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Log-transform outcomes. Args: Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). Returns: A two-tuple with the transformed outcomes: - The transformed outcome observations. - The transformed observation noise (if applicable). """ Y_tf = torch.log(Y) outputs = normalize_indices(self._outputs, d=Y.size(-1)) if outputs is not None: Y_tf = torch.stack( [ Y_tf[..., i] if i in outputs else Y[..., i] for i in range(Y.size(-1)) ], dim=-1, ) if Yvar is not None: # TODO: Delta method, possibly issue warning raise NotImplementedError( "Log does not yet support transforming observation noise" ) return Y_tf, Yvar def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Un-transform log-transformed outcomes Args: Y: A `batch_shape x n x m`-dim tensor of log-transfomred targets. Yvar: A `batch_shape x n x m`-dim tensor of log- transformed observation noises associated with the training targets (if applicable). Returns: A two-tuple with the un-transformed outcomes: - The exponentiated outcome observations. - The exponentiated observation noise (if applicable). """ Y_utf = torch.exp(Y) outputs = normalize_indices(self._outputs, d=Y.size(-1)) if outputs is not None: Y_utf = torch.stack( [ Y_utf[..., i] if i in outputs else Y[..., i] for i in range(Y.size(-1)) ], dim=-1, ) if Yvar is not None: # TODO: Delta method, possibly issue warning raise NotImplementedError( "Log does not yet support transforming observation noise" ) return Y_utf, Yvar def untransform_posterior(self, posterior: Posterior) -> TransformedPosterior: r"""Un-transform the log-transformed posterior. Args: posterior: A posterior in the log-transformed space. Returns: The un-transformed posterior. """ if self._outputs is not None: raise NotImplementedError( "Log does not yet support output selection for untransform_posterior" ) return TransformedPosterior( posterior=posterior, sample_transform=torch.exp, mean_transform=norm_to_lognorm_mean, variance_transform=norm_to_lognorm_variance, ) class Power(OutcomeTransform): r"""Power-transform outcomes. Useful if the targets are modeled using a (multivariate) power transform of a Normal distribution. This means that we can use a standard GP model on the power-transformed outcomes and un-transform the model posterior of that GP. """ def __init__(self, power: float, outputs: Optional[List[int]] = None) -> None: r"""Power-transform outcomes. Args: outputs: Which of the outputs to power-transform. If omitted, all outputs will be standardized. """ super().__init__() self._outputs = outputs self.power = power def subset_output(self, idcs: List[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: idcs: The output indices to subset the transform to. Returns: The current outcome transform, subset to the specified output indices. """ new_outputs = None if self._outputs is not None: if min(self._outputs + idcs) < 0: raise NotImplementedError( f"Negative indexing not supported for {self.__class__.__name__} " "when subsetting outputs and only transforming some outputs." ) new_outputs = [i for i in self._outputs if i in idcs] new_tf = self.__class__(power=self.power, outputs=new_outputs) if not self.training: new_tf.eval() return new_tf def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Power-transform outcomes. Args: Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). Returns: A two-tuple with the transformed outcomes: - The transformed outcome observations. - The transformed observation noise (if applicable). """ Y_tf = Y.pow(self.power) outputs = normalize_indices(self._outputs, d=Y.size(-1)) if outputs is not None: Y_tf = torch.stack( [ Y_tf[..., i] if i in outputs else Y[..., i] for i in range(Y.size(-1)) ], dim=-1, ) if Yvar is not None: # TODO: Delta method, possibly issue warning raise NotImplementedError( "Power does not yet support transforming observation noise" ) return Y_tf, Yvar def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Un-transform power-transformed outcomes Args: Y: A `batch_shape x n x m`-dim tensor of power-transfomred targets. Yvar: A `batch_shape x n x m`-dim tensor of power-transformed observation noises associated with the training targets (if applicable). Returns: A two-tuple with the un-transformed outcomes: - The un-power transformed outcome observations. - The un-power transformed observation noise (if applicable). """ Y_utf = Y.pow(1.0 / self.power) outputs = normalize_indices(self._outputs, d=Y.size(-1)) if outputs is not None: Y_utf = torch.stack( [ Y_utf[..., i] if i in outputs else Y[..., i] for i in range(Y.size(-1)) ], dim=-1, ) if Yvar is not None: # TODO: Delta method, possibly issue warning raise NotImplementedError( "Power does not yet support transforming observation noise" ) return Y_utf, Yvar def untransform_posterior(self, posterior: Posterior) -> TransformedPosterior: r"""Un-transform the power-transformed posterior. Args: posterior: A posterior in the power-transformed space. Returns: The un-transformed posterior. """ if self._outputs is not None: raise NotImplementedError( "Power does not yet support output selection for untransform_posterior" ) return TransformedPosterior( posterior=posterior, sample_transform=lambda x: x.pow(1.0 / self.power), ) class Bilog(OutcomeTransform): r"""Bilog-transform outcomes. The Bilog transform [eriksson2021scalable]_ is useful for modeling outcome constraints as it magnifies values near zero and flattens extreme values. """ def __init__(self, outputs: Optional[List[int]] = None) -> None: r"""Bilog-transform outcomes. Args: outputs: Which of the outputs to Bilog-transform. If omitted, all outputs will be transformed. """ super().__init__() self._outputs = outputs def subset_output(self, idcs: List[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: idcs: The output indices to subset the transform to. Returns: The current outcome transform, subset to the specified output indices. """ new_outputs = None if self._outputs is not None: if min(self._outputs + idcs) < 0: raise NotImplementedError( f"Negative indexing not supported for {self.__class__.__name__} " "when subsetting outputs and only transforming some outputs." ) new_outputs = [i for i in self._outputs if i in idcs] new_tf = self.__class__(outputs=new_outputs) if not self.training: new_tf.eval() return new_tf def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Bilog-transform outcomes. Args: Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). Returns: A two-tuple with the transformed outcomes: - The transformed outcome observations. - The transformed observation noise (if applicable). """ Y_tf = Y.sign() * (Y.abs() + 1.0).log() outputs = normalize_indices(self._outputs, d=Y.size(-1)) if outputs is not None: Y_tf = torch.stack( [ Y_tf[..., i] if i in outputs else Y[..., i] for i in range(Y.size(-1)) ], dim=-1, ) if Yvar is not None: raise NotImplementedError( "Bilog does not yet support transforming observation noise" ) return Y_tf, Yvar def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: r"""Un-transform bilog-transformed outcomes Args: Y: A `batch_shape x n x m`-dim tensor of bilog-transfomred targets. Yvar: A `batch_shape x n x m`-dim tensor of bilog-transformed observation noises associated with the training targets (if applicable). Returns: A two-tuple with the un-transformed outcomes: - The un-transformed outcome observations. - The un-transformed observation noise (if applicable). """ Y_utf = Y.sign() * (Y.abs().exp() - 1.0) outputs = normalize_indices(self._outputs, d=Y.size(-1)) if outputs is not None: Y_utf = torch.stack( [ Y_utf[..., i] if i in outputs else Y[..., i] for i in range(Y.size(-1)) ], dim=-1, ) if Yvar is not None: # TODO: Delta method, possibly issue warning raise NotImplementedError( "Bilog does not yet support transforming observation noise" ) return Y_utf, Yvar def untransform_posterior(self, posterior: Posterior) -> TransformedPosterior: r"""Un-transform the bilog-transformed posterior. Args: posterior: A posterior in the bilog-transformed space. Returns: The un-transformed posterior. """ if self._outputs is not None: raise NotImplementedError( "Bilog does not yet support output selection for untransform_posterior" ) return TransformedPosterior( posterior=posterior, sample_transform=lambda x: x.sign() * (x.abs().exp() - 1.0), )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.models.transforms.factory import get_rounding_input_transform from botorch.models.transforms.input import ( ChainedInputTransform, Normalize, Round, Warp, ) from botorch.models.transforms.outcome import ( Bilog, ChainedOutcomeTransform, Log, Power, Standardize, ) __all__ = [ "get_rounding_input_transform", "Bilog", "ChainedInputTransform", "ChainedOutcomeTransform", "Log", "Normalize", "Power", "Round", "Standardize", "Warp", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from collections import OrderedDict from typing import Dict, List, Optional from botorch.models.transforms.input import ( ChainedInputTransform, Normalize, OneHotToNumeric, Round, ) from torch import Tensor def get_rounding_input_transform( one_hot_bounds: Tensor, integer_indices: Optional[List[int]] = None, categorical_features: Optional[Dict[int, int]] = None, initialization: bool = False, return_numeric: bool = False, approximate: bool = False, ) -> ChainedInputTransform: """Get a rounding input transform. The rounding function will take inputs from the unit cube, unnormalize the integers raw search space, round the inputs, and normalize them back to the unit cube. Categoricals are assumed to be one-hot encoded. Integers are currently assumed to be contiguous ranges (e.g. [1,2,3] and not [1,5,7]). TODO: support non-contiguous sets of integers by modifying the rounding function. Args: one_hot_bounds: The raw search space bounds where categoricals are encoded in one-hot representation and the integer parameters are not normalized. integer_indices: The indices of the integer parameters. categorical_features: A dictionary mapping indices to cardinalities for the categorical features. initialization: A boolean indicating whether this exact rounding function is for initialization. For initialization, the bounds for are expanded such that the end point of a range is selected with same probability that an interior point is selected, after rounding. return_numeric: A boolean indicating whether to return numeric or one-hot encoded categoricals. Returning a nummeric representation is helpful if the downstream code (e.g. kernel) expects a numeric representation of the categoricals. approximate: A boolean indicating whether to use an approximate rounding function. Returns: The rounding function ChainedInputTransform. """ has_integers = integer_indices is not None and len(integer_indices) > 0 has_categoricals = ( categorical_features is not None and len(categorical_features) > 0 ) if not (has_integers or has_categoricals): raise ValueError( "A rounding function is a no-op " "if there are no integer or categorical parammeters." ) if initialization and has_integers: # this gives the extreme integer values (end points) # the same probability as the interior values of the range init_one_hot_bounds = one_hot_bounds.clone() init_one_hot_bounds[0, integer_indices] -= 0.4999 init_one_hot_bounds[1, integer_indices] += 0.4999 else: init_one_hot_bounds = one_hot_bounds tfs = OrderedDict() if has_integers: # unnormalize to integer space tfs["unnormalize_tf"] = Normalize( d=init_one_hot_bounds.shape[1], bounds=init_one_hot_bounds, indices=integer_indices, transform_on_train=False, transform_on_eval=True, transform_on_fantasize=True, reverse=True, ) # round tfs["round"] = Round( approximate=approximate, transform_on_train=False, transform_on_fantasize=True, integer_indices=integer_indices, categorical_features=categorical_features, ) if has_integers: # renormalize to unit cube tfs["normalize_tf"] = Normalize( d=one_hot_bounds.shape[1], bounds=one_hot_bounds, indices=integer_indices, transform_on_train=False, transform_on_eval=True, transform_on_fantasize=True, reverse=False, ) if return_numeric and has_categoricals: tfs["one_hot_to_numeric"] = OneHotToNumeric( # this is the dimension using one-hot encoded representation dim=one_hot_bounds.shape[-1], categorical_features=categorical_features, transform_on_train=True, transform_on_eval=True, transform_on_fantasize=True, ) tf = ChainedInputTransform(**tfs) tf.to(dtype=one_hot_bounds.dtype, device=one_hot_bounds.device) tf.eval() return tf
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from functools import wraps from typing import Tuple import torch from torch import Tensor def lognorm_to_norm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]: """Compute mean and covariance of a MVN from those of the associated log-MVN If `Y` is log-normal with mean mu_ln and covariance Cov_ln, then `X ~ N(mu_n, Cov_n)` with Cov_n_{ij} = log(1 + Cov_ln_{ij} / (mu_ln_{i} * mu_n_{j})) mu_n_{i} = log(mu_ln_{i}) - 0.5 * log(1 + Cov_ln_{ii} / mu_ln_{i}**2) Args: mu: A `batch_shape x n` mean vector of the log-Normal distribution. Cov: A `batch_shape x n x n` covariance matrix of the log-Normal distribution. Returns: A two-tuple containing: - The `batch_shape x n` mean vector of the Normal distribution - The `batch_shape x n x n` covariance matrix of the Normal distribution """ Cov_n = torch.log(1 + Cov / (mu.unsqueeze(-1) * mu.unsqueeze(-2))) mu_n = torch.log(mu) - 0.5 * torch.diagonal(Cov_n, dim1=-1, dim2=-2) return mu_n, Cov_n def norm_to_lognorm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]: """Compute mean and covariance of a log-MVN from its MVN sufficient statistics If `X ~ N(mu, Cov)` and `Y = exp(X)`, then `Y` is log-normal with mu_ln_{i} = exp(mu_{i} + 0.5 * Cov_{ii}) Cov_ln_{ij} = exp(mu_{i} + mu_{j} + 0.5 * (Cov_{ii} + Cov_{jj})) * (exp(Cov_{ij}) - 1) Args: mu: A `batch_shape x n` mean vector of the Normal distribution. Cov: A `batch_shape x n x n` covariance matrix of the Normal distribution. Returns: A two-tuple containing: - The `batch_shape x n` mean vector of the log-Normal distribution. - The `batch_shape x n x n` covariance matrix of the log-Normal distribution. """ diag = torch.diagonal(Cov, dim1=-1, dim2=-2) b = mu + 0.5 * diag mu_ln = torch.exp(b) Cov_ln = (torch.exp(Cov) - 1) * torch.exp(b.unsqueeze(-1) + b.unsqueeze(-2)) return mu_ln, Cov_ln def norm_to_lognorm_mean(mu: Tensor, var: Tensor) -> Tensor: """Compute mean of a log-MVN from its MVN marginals Args: mu: A `batch_shape x n` mean vector of the Normal distribution. var: A `batch_shape x n` variance vectorof the Normal distribution. Returns: The `batch_shape x n` mean vector of the log-Normal distribution. """ return torch.exp(mu + 0.5 * var) def norm_to_lognorm_variance(mu: Tensor, var: Tensor) -> Tensor: """Compute variance of a log-MVN from its MVN marginals Args: mu: A `batch_shape x n` mean vector of the Normal distribution. var: A `batch_shape x n` variance vectorof the Normal distribution. Returns: The `batch_shape x n` variance vector of the log-Normal distribution. """ b = mu + 0.5 * var return (torch.exp(var) - 1) * torch.exp(2 * b) def expand_and_copy_tensor(X: Tensor, batch_shape: torch.Size) -> Tensor: r"""Expand and copy X according to batch_shape. Args: X: A `input_batch_shape x n x d`-dim tensor of inputs. batch_shape: The new batch shape. Returns: A `new_batch_shape x n x d`-dim tensor of inputs, where `new_batch_shape` is `input_batch_shape` against `batch_shape`. """ try: batch_shape = torch.broadcast_shapes(X.shape[:-2], batch_shape) except RuntimeError: raise RuntimeError( f"Provided batch shape ({batch_shape}) and input batch shape " f"({X.shape[:-2]}) are not broadcastable." ) expand_shape = batch_shape + X.shape[-2:] return X.expand(expand_shape).clone() def subset_transform(transform): r"""Decorator of an input transform function to separate out indexing logic.""" @wraps(transform) def f(self, X: Tensor) -> Tensor: if not hasattr(self, "indices") or self.indices is None: return transform(self, X) has_shape = hasattr(self, "batch_shape") Y = expand_and_copy_tensor(X, self.batch_shape) if has_shape else X.clone() Y[..., self.indices] = transform(self, X[..., self.indices]) return Y return f
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Input Transformations. These classes implement a variety of transformations for input parameters including: learned input warping functions, rounding functions, and log transformations. The input transformation is typically part of a Model and applied within the model.forward() method. """ from __future__ import annotations from abc import ABC, abstractmethod from collections import OrderedDict from typing import Any, Callable, Dict, List, Optional, Union from warnings import warn import torch from botorch.exceptions.errors import BotorchTensorDimensionError from botorch.exceptions.warnings import UserInputWarning from botorch.models.transforms.utils import subset_transform from botorch.models.utils import fantasize from botorch.utils.rounding import approximate_round, OneHotArgmaxSTE, RoundSTE from gpytorch import Module as GPyTorchModule from gpytorch.constraints import GreaterThan from gpytorch.priors import Prior from torch import LongTensor, nn, Tensor from torch.distributions import Kumaraswamy from torch.nn import Module, ModuleDict from torch.nn.functional import one_hot class InputTransform(ABC): r"""Abstract base class for input transforms. Note: Input transforms must inherit from `torch.nn.Module`. This is deferred to the subclasses to avoid any potential conflict between `gpytorch.module.Module` and `torch.nn.Module` in `Warp`. Properties: is_one_to_many: A boolean denoting whether the transform produces multiple values for each input. transform_on_train: A boolean indicating whether to apply the transform in train() mode. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. :meta private: """ is_one_to_many: bool = False transform_on_eval: bool transform_on_train: bool transform_on_fantasize: bool def forward(self, X: Tensor) -> Tensor: r"""Transform the inputs to a model. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n' x d`-dim tensor of transformed inputs. """ if self.training: if self.transform_on_train: return self.transform(X) elif self.transform_on_eval: if fantasize.off() or self.transform_on_fantasize: return self.transform(X) return X @abstractmethod def transform(self, X: Tensor) -> Tensor: r"""Transform the inputs to a model. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ pass # pragma: no cover def untransform(self, X: Tensor) -> Tensor: r"""Un-transform the inputs to a model. Args: X: A `batch_shape x n x d`-dim tensor of transformed inputs. Returns: A `batch_shape x n x d`-dim tensor of un-transformed inputs. """ raise NotImplementedError( f"{self.__class__.__name__} does not implement the `untransform` method." ) def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Note: The reason that a custom equals method is defined rather than defining an __eq__ method is because defining an __eq__ method sets the __hash__ method to None. Hashing modules is currently used in pytorch. See https://github.com/pytorch/pytorch/issues/7733. Args: other: Another input transform. Returns: A boolean indicating if the other transform is equivalent. """ other_state_dict = other.state_dict() return ( type(self) is type(other) and (self.transform_on_train == other.transform_on_train) and (self.transform_on_eval == other.transform_on_eval) and (self.transform_on_fantasize == other.transform_on_fantasize) and all( torch.allclose(v, other_state_dict[k].to(v)) for k, v in self.state_dict().items() ) ) def preprocess_transform(self, X: Tensor) -> Tensor: r"""Apply transforms for preprocessing inputs. The main use cases for this method are 1) to preprocess training data before calling `set_train_data` and 2) preprocess `X_baseline` for noisy acquisition functions so that `X_baseline` is "preprocessed" with the same transformations as the cached training inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of (transformed) inputs. """ if self.transform_on_train: # We need to disable learning of bounds / affine coefficients here. # See why: https://github.com/pytorch/botorch/issues/1078. if hasattr(self, "learn_coefficients"): learn_coefficients = self.learn_coefficients self.learn_coefficients = False result = self.transform(X) self.learn_coefficients = learn_coefficients return result else: return self.transform(X) return X class ChainedInputTransform(InputTransform, ModuleDict): r"""An input transform representing the chaining of individual transforms.""" def __init__(self, **transforms: InputTransform) -> None: r"""Chaining of input transforms. Args: transforms: The transforms to chain. Internally, the names of the kwargs are used as the keys for accessing the individual transforms on the module. Example: >>> tf1 = Normalize(d=2) >>> tf2 = Normalize(d=2) >>> tf = ChainedInputTransform(tf1=tf1, tf2=tf2) >>> list(tf.keys()) ['tf1', 'tf2'] >>> tf["tf1"] Normalize() """ super().__init__(OrderedDict(transforms)) self.transform_on_train = False self.transform_on_eval = False self.transform_on_fantasize = False for tf in transforms.values(): self.is_one_to_many |= tf.is_one_to_many self.transform_on_train |= tf.transform_on_train self.transform_on_eval |= tf.transform_on_eval self.transform_on_fantasize |= tf.transform_on_fantasize def transform(self, X: Tensor) -> Tensor: r"""Transform the inputs to a model. Individual transforms are applied in sequence. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ for tf in self.values(): X = tf.forward(X) return X def untransform(self, X: Tensor) -> Tensor: r"""Un-transform the inputs to a model. Un-transforms of the individual transforms are applied in reverse sequence. Args: X: A `batch_shape x n x d`-dim tensor of transformed inputs. Returns: A `batch_shape x n x d`-dim tensor of un-transformed inputs. """ for tf in reversed(self.values()): X = tf.untransform(X) return X def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Args: other: Another input transform. Returns: A boolean indicating if the other transform is equivalent. """ return super().equals(other=other) and all( t1.equals(t2) for t1, t2 in zip(self.values(), other.values()) ) def preprocess_transform(self, X: Tensor) -> Tensor: r"""Apply transforms for preprocessing inputs. The main use cases for this method are 1) to preprocess training data before calling `set_train_data` and 2) preprocess `X_baseline` for noisy acquisition functions so that `X_baseline` is "preprocessed" with the same transformations as the cached training inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of (transformed) inputs. """ for tf in self.values(): X = tf.preprocess_transform(X) return X class ReversibleInputTransform(InputTransform, ABC): r"""An abstract class for a reversible input transform. Properties: reverse: A boolean indicating if the functionality of transform and untransform methods should be swapped. :meta private: """ reverse: bool def transform(self, X: Tensor) -> Tensor: r"""Transform the inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ return self._untransform(X) if self.reverse else self._transform(X) def untransform(self, X: Tensor) -> Tensor: r"""Un-transform the inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of un-transformed inputs. """ return self._transform(X) if self.reverse else self._untransform(X) @abstractmethod def _transform(self, X: Tensor) -> Tensor: r"""Forward transform the inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ pass # pragma: no cover @abstractmethod def _untransform(self, X: Tensor) -> Tensor: r"""Reverse transform the inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ pass # pragma: no cover def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Args: other: Another input transform. Returns: A boolean indicating if the other transform is equivalent. """ return super().equals(other=other) and (self.reverse == other.reverse) class AffineInputTransform(ReversibleInputTransform, Module): def __init__( self, d: int, coefficient: Tensor, offset: Tensor, indices: Optional[Union[List[int], Tensor]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, reverse: bool = False, ) -> None: r"""Apply affine transformation to input: `output = (input - offset) / coefficient` Args: d: The dimension of the input space. coefficient: Tensor of linear coefficients, shape must to be broadcastable with `(batch_shape x n x d)`-dim input tensors. offset: Tensor of offset coefficients, shape must to be broadcastable with `(batch_shape x n x d)`-dim input tensors. indices: The indices of the inputs to transform. If omitted, take all dimensions of the inputs into account. Either a list of ints or a Tensor of type `torch.long`. batch_shape: The batch shape of the inputs (assuming input tensors of shape `batch_shape x n x d`). If provided, perform individual transformation per batch, otherwise uses a single transformation. transform_on_train: A boolean indicating whether to apply the transform in train() mode. Default: True. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: True. reverse: A boolean indicating whether the forward pass should untransform the inputs. """ super().__init__() if (indices is not None) and (len(indices) == 0): raise ValueError("`indices` list is empty!") if (indices is not None) and (len(indices) > 0): indices = torch.as_tensor( indices, dtype=torch.long, device=coefficient.device ) if len(indices) > d: raise ValueError("Can provide at most `d` indices!") if (indices > d - 1).any(): raise ValueError("Elements of `indices` have to be smaller than `d`!") if len(indices.unique()) != len(indices): raise ValueError("Elements of `indices` tensor must be unique!") self.register_buffer("indices", indices) torch.broadcast_shapes(coefficient.shape, offset.shape) self._d = d self.register_buffer("_coefficient", coefficient) self.register_buffer("_offset", offset) self.batch_shape = batch_shape self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize self.reverse = reverse @property def coefficient(self) -> Tensor: r"""The tensor of linear coefficients.""" coeff = self._coefficient return coeff if self.learn_coefficients and self.training else coeff.detach() @property def offset(self) -> Tensor: r"""The tensor of offset coefficients.""" offset = self._offset return offset if self.learn_coefficients and self.training else offset.detach() @property def learn_coefficients(self) -> bool: return getattr(self, "_learn_coefficients", False) @learn_coefficients.setter def learn_coefficients(self, value: bool) -> None: r"""A boolean denoting whether to learn the coefficients from inputs during model training. """ self._learn_coefficients = value @subset_transform def _transform(self, X: Tensor) -> Tensor: r"""Apply affine transformation to input. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ if self.learn_coefficients and self.training: self._check_shape(X) self._update_coefficients(X) self._to(X) return (X - self.offset) / self.coefficient @subset_transform def _untransform(self, X: Tensor) -> Tensor: r"""Apply inverse of affine transformation. Args: X: A `batch_shape x n x d`-dim tensor of transformed inputs. Returns: A `batch_shape x n x d`-dim tensor of un-transformed inputs. """ self._to(X) return self.coefficient * X + self.offset def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Args: other: Another input transform. Returns: A boolean indicating if the other transform is equivalent. """ if hasattr(self, "indices") != hasattr(other, "indices"): return False isequal = ( super().equals(other=other) and (self._d == other._d) and torch.allclose(self.coefficient, other.coefficient) and torch.allclose(self.offset, other.offset) and self.learn_coefficients == other.learn_coefficients ) if hasattr(self, "indices"): isequal = isequal and (self.indices == other.indices).all() return isequal def _check_shape(self, X: Tensor) -> None: """Checking input dimensions, included to increase code sharing among the derived classes Normalize and InputStandardize. """ if X.size(-1) != self.offset.size(-1): raise BotorchTensorDimensionError( f"Wrong input dimension. Received {X.size(-1)}, " f"expected {self.offset.size(-1)}." ) n = len(self.batch_shape) + 2 if X.ndim < n: raise ValueError( f"`X` must have at least {n} dimensions, {n - 2} batch and 2 innate" f" , but has {X.ndim}." ) torch.broadcast_shapes(self.coefficient.shape, self.offset.shape, X.shape) def _to(self, X: Tensor) -> None: r"""Makes coefficient and offset have same device and dtype as X.""" self._coefficient = self.coefficient.to(X) self._offset = self.offset.to(X) def _update_coefficients(self, X: Tensor) -> None: r"""Updates affine coefficients. Implemented by subclasses, e.g. Normalize and InputStandardize. """ raise NotImplementedError( "Only subclasses of AffineInputTransform implement " "_update_coefficients, e.g. Normalize and InputStandardize." ) class Normalize(AffineInputTransform): r"""Normalize the inputs to the unit cube. If no explicit bounds are provided this module is stateful: If in train mode, calling `forward` updates the module state (i.e. the normalizing bounds). If in eval mode, calling `forward` simply applies the normalization using the current module state. """ def __init__( self, d: int, indices: Optional[Union[List[int], Tensor]] = None, bounds: Optional[Tensor] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, reverse: bool = False, min_range: float = 1e-8, learn_bounds: Optional[bool] = None, ) -> None: r"""Normalize the inputs to the unit cube. Args: d: The dimension of the input space. indices: The indices of the inputs to normalize. If omitted, take all dimensions of the inputs into account. bounds: If provided, use these bounds to normalize the inputs. If omitted, learn the bounds in train mode. batch_shape: The batch shape of the inputs (assuming input tensors of shape `batch_shape x n x d`). If provided, perform individual normalization per batch, otherwise uses a single normalization. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: True. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: True. reverse: A boolean indicating whether the forward pass should untransform the inputs. min_range: Amount of noise to add to the range to ensure no division by zero errors. learn_bounds: Whether to learn the bounds in train mode. Defaults to False if bounds are provided, otherwise defaults to True. """ if learn_bounds is not None: self.learn_coefficients = learn_bounds else: self.learn_coefficients = bounds is None transform_dimension = d if indices is None else len(indices) if bounds is not None: if indices is not None and bounds.size(-1) == d: bounds = bounds[..., indices] if bounds.size(-1) != transform_dimension: raise BotorchTensorDimensionError( "Dimensions of provided `bounds` are incompatible with " f"transform_dimension = {transform_dimension}!" ) offset = bounds[..., 0:1, :] coefficient = bounds[..., 1:2, :] - offset if coefficient.ndim > 2: batch_shape = coefficient.shape[:-2] else: coefficient = torch.ones(*batch_shape, 1, transform_dimension) offset = torch.zeros(*batch_shape, 1, transform_dimension) if self.learn_coefficients is False: warn( "learn_bounds is False and no bounds were provided. The bounds " "will not be updated and the transform will be a no-op.", UserInputWarning, ) super().__init__( d=d, coefficient=coefficient, offset=offset, indices=indices, batch_shape=batch_shape, transform_on_train=transform_on_train, transform_on_eval=transform_on_eval, transform_on_fantasize=transform_on_fantasize, reverse=reverse, ) self.min_range = min_range @property def ranges(self): return self.coefficient @property def mins(self): return self.offset @property def bounds(self) -> Tensor: r"""The bounds used for normalizing the inputs.""" return torch.cat([self.offset, self.offset + self.coefficient], dim=-2) @property def learn_bounds(self) -> bool: return self.learn_coefficients def _update_coefficients(self, X) -> None: """Computes the normalization bounds and updates the affine coefficients, which determine the base class's behavior. """ # Aggregate mins and ranges over extra batch and marginal dims batch_ndim = min(len(self.batch_shape), X.ndim - 2) # batch rank of `X` reduce_dims = (*range(X.ndim - batch_ndim - 2), X.ndim - 2) self._offset = torch.amin(X, dim=reduce_dims).unsqueeze(-2) self._coefficient = torch.amax(X, dim=reduce_dims).unsqueeze(-2) - self.offset self._coefficient.clamp_(min=self.min_range) def get_init_args(self) -> Dict[str, Any]: r"""Get the arguments necessary to construct an exact copy of the transform.""" return { "d": self._d, "indices": getattr(self, "indices", None), "bounds": self.bounds, "batch_shape": self.batch_shape, "transform_on_train": self.transform_on_train, "transform_on_eval": self.transform_on_eval, "transform_on_fantasize": self.transform_on_fantasize, "reverse": self.reverse, "min_range": self.min_range, "learn_bounds": self.learn_bounds, } class InputStandardize(AffineInputTransform): r"""Standardize inputs (zero mean, unit variance). In train mode, calling `forward` updates the module state (i.e. the mean/std normalizing constants). If in eval mode, calling `forward` simply applies the standardization using the current module state. """ def __init__( self, d: int, indices: Optional[Union[List[int], Tensor]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, reverse: bool = False, min_std: float = 1e-8, ) -> None: r"""Standardize inputs (zero mean, unit variance). Args: d: The dimension of the input space. indices: The indices of the inputs to standardize. If omitted, take all dimensions of the inputs into account. batch_shape: The batch shape of the inputs (asssuming input tensors of shape `batch_shape x n x d`). If provided, perform individual normalization per batch, otherwise uses a single normalization. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: True transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True reverse: A boolean indicating whether the forward pass should untransform the inputs. min_std: Amount of noise to add to the standard deviation to ensure no division by zero errors. """ transform_dimension = d if indices is None else len(indices) super().__init__( d=d, coefficient=torch.ones(*batch_shape, 1, transform_dimension), offset=torch.zeros(*batch_shape, 1, transform_dimension), indices=indices, batch_shape=batch_shape, transform_on_train=transform_on_train, transform_on_eval=transform_on_eval, transform_on_fantasize=transform_on_fantasize, reverse=reverse, ) self.min_std = min_std self.learn_coefficients = True @property def stds(self): return self.coefficient @property def means(self): return self.offset def _update_coefficients(self, X: Tensor) -> None: """Computes the normalization bounds and updates the affine coefficients, which determine the base class's behavior. """ # Aggregate means and standard deviations over extra batch and marginal dims batch_ndim = min(len(self.batch_shape), X.ndim - 2) # batch rank of `X` reduce_dims = (*range(X.ndim - batch_ndim - 2), X.ndim - 2) coefficient, self._offset = ( values.unsqueeze(-2) for values in torch.std_mean(X, dim=reduce_dims, unbiased=True) ) self._coefficient = coefficient.clamp_(min=self.min_std) class Round(InputTransform, Module): r"""A discretization transformation for discrete inputs. If `approximate=False` (the default), uses PyTorch's `round`. If `approximate=True`, a differentiable approximate rounding function is used, with a temperature parameter of `tau`. This method is a piecewise approximation of a rounding function where each piece is a hyperbolic tangent function. For integers, this will typically be used in conjunction with normalization as follows: In eval() mode (i.e. after training), the inputs pass would typically be normalized to the unit cube (e.g. during candidate optimization). 1. These are unnormalized back to the raw input space. 2. The integers are rounded. 3. All values are normalized to the unit cube. In train() mode, the inputs can either (a) be normalized to the unit cube or (b) provided using their raw values. In the case of (a) transform_on_train should be set to True, so that the normalized inputs are unnormalized before rounding. In the case of (b) transform_on_train should be set to False, so that the raw inputs are rounded and then normalized to the unit cube. By default, the straight through estimators are used for the gradients as proposed in [Daulton2022bopr]_. This transformation supports differentiable approximate rounding (currently only for integers). The rounding function is approximated with a piece-wise function where each piece is a hyperbolic tangent function. For categorical parameters, the input must be one-hot encoded. Example: >>> bounds = torch.tensor([[0, 5], [0, 1], [0, 1]]).t() >>> integer_indices = [0] >>> categorical_features = {1: 2} >>> unnormalize_tf = Normalize( >>> d=d, >>> bounds=bounds, >>> transform_on_eval=True, >>> transform_on_train=True, >>> reverse=True, >>> ) >>> round_tf = Round(integer_indices, categorical_features) >>> normalize_tf = Normalize(d=d, bounds=bounds) >>> tf = ChainedInputTransform( >>> tf1=unnormalize_tf, tf2=round_tf, tf3=normalize_tf >>> ) """ def __init__( self, integer_indices: Union[List[int], LongTensor, None] = None, categorical_features: Optional[Dict[int, int]] = None, transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, approximate: bool = False, tau: float = 1e-3, **kwargs, ) -> None: r"""Initialize transform. Args: integer_indices: The indices of the integer inputs. categorical_features: A dictionary mapping the starting index of each categorical feature to its cardinality. This assumes that categoricals are one-hot encoded. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: True. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: True. approximate: A boolean indicating whether approximate or exact rounding should be used. Default: False. tau: The temperature parameter for approximate rounding. """ indices = kwargs.get("indices") if indices is not None: warn( "`indices` is marked for deprecation in favor of `integer_indices`.", DeprecationWarning, ) integer_indices = indices if approximate and categorical_features is not None: raise NotImplementedError super().__init__() self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize integer_indices = integer_indices if integer_indices is not None else [] self.register_buffer( "integer_indices", torch.as_tensor(integer_indices, dtype=torch.long) ) self.categorical_features = categorical_features or {} self.approximate = approximate self.tau = tau def transform(self, X: Tensor) -> Tensor: r"""Discretize the inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of discretized inputs. """ X_rounded = X.clone() # round integers X_int = X_rounded[..., self.integer_indices] if self.approximate: X_int = approximate_round(X_int, tau=self.tau) else: X_int = RoundSTE.apply(X_int) X_rounded[..., self.integer_indices] = X_int # discrete categoricals to the category with the largest value # in the continuous relaxation of the one-hot encoding for start, card in self.categorical_features.items(): end = start + card X_rounded[..., start:end] = OneHotArgmaxSTE.apply(X[..., start:end]) return X_rounded def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Args: other: Another input transform. Returns: A boolean indicating if the other transform is equivalent. """ return ( super().equals(other=other) and (self.integer_indices == other.integer_indices).all() and self.categorical_features == other.categorical_features and self.approximate == other.approximate and self.tau == other.tau ) def get_init_args(self) -> Dict[str, Any]: r"""Get the arguments necessary to construct an exact copy of the transform.""" return { "integer_indices": self.integer_indices, "categorical_features": self.categorical_features, "transform_on_train": self.transform_on_train, "transform_on_eval": self.transform_on_eval, "transform_on_fantasize": self.transform_on_fantasize, "approximate": self.approximate, "tau": self.tau, } class Log10(ReversibleInputTransform, Module): r"""A base-10 log transformation.""" def __init__( self, indices: List[int], transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, reverse: bool = False, ) -> None: r"""Initialize transform. Args: indices: The indices of the inputs to log transform. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: True. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: True. reverse: A boolean indicating whether the forward pass should untransform the inputs. """ super().__init__() self.register_buffer("indices", torch.tensor(indices, dtype=torch.long)) self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize self.reverse = reverse @subset_transform def _transform(self, X: Tensor) -> Tensor: r"""Log transform the inputs. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d`-dim tensor of transformed inputs. """ return X.log10() @subset_transform def _untransform(self, X: Tensor) -> Tensor: r"""Reverse the log transformation. Args: X: A `batch_shape x n x d`-dim tensor of normalized inputs. Returns: A `batch_shape x n x d`-dim tensor of un-normalized inputs. """ return 10.0**X class Warp(ReversibleInputTransform, GPyTorchModule): r"""A transform that uses learned input warping functions. Each specified input dimension is warped using the CDF of a Kumaraswamy distribution. Typically, MAP estimates of the parameters of the Kumaraswamy distribution, for each input dimension, are learned jointly with the GP hyperparameters. TODO: implement support using independent warping functions for each output in batched multi-output and multi-task models. For now, ModelListGPs should be used to learn independent warping functions for each output. """ # TODO: make minimum value dtype-dependent _min_concentration_level = 1e-4 def __init__( self, indices: List[int], transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, reverse: bool = False, eps: float = 1e-7, concentration1_prior: Optional[Prior] = None, concentration0_prior: Optional[Prior] = None, batch_shape: Optional[torch.Size] = None, ) -> None: r"""Initialize transform. Args: indices: The indices of the inputs to warp. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: True. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: True. reverse: A boolean indicating whether the forward pass should untransform the inputs. eps: A small value used to clip values to be in the interval (0, 1). concentration1_prior: A prior distribution on the concentration1 parameter of the Kumaraswamy distribution. concentration0_prior: A prior distribution on the concentration0 parameter of the Kumaraswamy distribution. batch_shape: The batch shape. """ super().__init__() self.register_buffer("indices", torch.tensor(indices, dtype=torch.long)) self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize self.reverse = reverse self.batch_shape = batch_shape or torch.Size([]) self._X_min = eps self._X_range = 1 - 2 * eps if len(self.batch_shape) > 0: # Note: this follows the gpytorch shape convention for lengthscales # There is ongoing discussion about the extra `1`. # TODO: update to follow new gpytorch convention resulting from # https://github.com/cornellius-gp/gpytorch/issues/1317 batch_shape = self.batch_shape + torch.Size([1]) else: batch_shape = self.batch_shape for i in (0, 1): p_name = f"concentration{i}" self.register_parameter( p_name, nn.Parameter(torch.full(batch_shape + self.indices.shape, 1.0)), ) if concentration0_prior is not None: self.register_prior( "concentration0_prior", concentration0_prior, lambda m: m.concentration0, lambda m, v: m._set_concentration(i=0, value=v), ) if concentration1_prior is not None: self.register_prior( "concentration1_prior", concentration1_prior, lambda m: m.concentration1, lambda m, v: m._set_concentration(i=1, value=v), ) for i in (0, 1): p_name = f"concentration{i}" constraint = GreaterThan( self._min_concentration_level, transform=None, # set the initial value to be the identity transformation initial_value=1.0, ) self.register_constraint(param_name=p_name, constraint=constraint) def _set_concentration(self, i: int, value: Union[float, Tensor]) -> None: if not torch.is_tensor(value): value = torch.as_tensor(value).to(self.concentration0) self.initialize(**{f"concentration{i}": value}) @subset_transform def _transform(self, X: Tensor) -> Tensor: r"""Warp the inputs through the Kumaraswamy CDF. Args: X: A `input_batch_shape x (batch_shape) x n x d`-dim tensor of inputs. batch_shape here can either be self.batch_shape or 1's such that it is broadcastable with self.batch_shape if self.batch_shape is set. Returns: A `input_batch_shape x (batch_shape) x n x d`-dim tensor of transformed inputs. """ # normalize to [eps, 1-eps], IDEA: could use Normalize and ChainedTransform. return self._k.cdf( torch.clamp( X * self._X_range + self._X_min, self._X_min, 1.0 - self._X_min, ) ) @subset_transform def _untransform(self, X: Tensor) -> Tensor: r"""Warp the inputs through the Kumaraswamy inverse CDF. Args: X: A `input_batch_shape x batch_shape x n x d`-dim tensor of inputs. Returns: A `input_batch_shape x batch_shape x n x d`-dim tensor of transformed inputs. """ if len(self.batch_shape) > 0: if self.batch_shape != X.shape[-2 - len(self.batch_shape) : -2]: raise BotorchTensorDimensionError( "The right most batch dims of X must match self.batch_shape: " f"({self.batch_shape})." ) # unnormalize from [eps, 1-eps] to [0,1] return ((self._k.icdf(X) - self._X_min) / self._X_range).clamp(0.0, 1.0) @property def _k(self) -> Kumaraswamy: """Returns a Kumaraswamy distribution with the concentration parameters.""" return Kumaraswamy( concentration1=self.concentration1, concentration0=self.concentration0, ) class AppendFeatures(InputTransform, Module): r"""A transform that appends the input with a given set of features either provided beforehand or generated on the fly via a callable. As an example, the predefined set of features can be used with `RiskMeasureMCObjective` to optimize risk measures as described in [Cakmak2020risk]_. A tutorial notebook implementing the rhoKG acqusition function introduced in [Cakmak2020risk]_ can be found at https://botorch.org/tutorials/risk_averse_bo_with_environmental_variables. The steps for using this to obtain samples of a risk measure are as follows: - Train a model on `(x, w)` inputs and the corresponding observations; - Pass in an instance of `AppendFeatures` with the `feature_set` denoting the samples of `W` as the `input_transform` to the trained model; - Call `posterior(...).rsample(...)` on the model with `x` inputs only to get the joint posterior samples over `(x, w)`s, where the `w`s come from the `feature_set`; - Pass these posterior samples through the `RiskMeasureMCObjective` of choice to get the samples of the risk measure. Note: The samples of the risk measure obtained this way are in general biased since the `feature_set` does not fully represent the distribution of the environmental variable. Possible examples for using a callable include statistical models that are built on PyTorch, built-in mathematical operations such as torch.sum, or custom scripted functions. By this, this input transform allows for advanced feature engineering and transfer learning models within the optimization loop. Example: >>> # We consider 1D `x` and 1D `w`, with `W` having a >>> # uniform distribution over [0, 1] >>> model = SingleTaskGP( ... train_X=torch.rand(10, 2), ... train_Y=torch.randn(10, 1), ... input_transform=AppendFeatures(feature_set=torch.rand(10, 1)) ... ) >>> mll = ExactMarginalLogLikelihood(model.likelihood, model) >>> fit_gpytorch_mll(mll) >>> test_x = torch.rand(3, 1) >>> # `posterior_samples` is a `10 x 30 x 1`-dim tensor >>> posterior_samples = model.posterior(test_x).rsamples(torch.size([10])) >>> risk_measure = VaR(alpha=0.8, n_w=10) >>> # `risk_measure_samples` is a `10 x 3`-dim tensor of samples of the >>> # risk measure VaR >>> risk_measure_samples = risk_measure(posterior_samples) """ is_one_to_many: bool = True def __init__( self, feature_set: Optional[Tensor] = None, f: Optional[Callable[[Tensor], Tensor]] = None, indices: Optional[List[int]] = None, fkwargs: Optional[Dict[str, Any]] = None, skip_expand: bool = False, transform_on_train: bool = False, transform_on_eval: bool = True, transform_on_fantasize: bool = False, ) -> None: r"""Append `feature_set` to each input or generate a set of features to append on the fly via a callable. Args: feature_set: An `n_f x d_f`-dim tensor denoting the features to be appended to the inputs. Default: None. f: A callable mapping a `batch_shape x q x d`-dim input tensor `X` to a `batch_shape x q x n_f x d_f`-dimensional output tensor. Default: None. indices: List of indices denoting the indices of the features to be passed into f. Per default all features are passed to `f`. Default: None. fkwargs: Dictionary of keyword arguments passed to the callable `f`. Default: None. skip_expand: A boolean indicating whether to expand the input tensor before appending features. This is intended for use with an `InputPerturbation`. If `True`, the input tensor will be expected to be of shape `batch_shape x (q * n_f) x d`. Not implemented in combination with a callable. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: False. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: False. """ super().__init__() if (feature_set is None) and (f is None): raise ValueError( "Either a `feature_set` or a callable `f` has to be provided." ) if (feature_set is not None) and (f is not None): raise ValueError( "Only one can be used: either `feature_set` or callable `f`." ) if feature_set is not None: if feature_set.dim() != 2: raise ValueError("`feature_set` must be an `n_f x d_f`-dim tensor!") self.register_buffer("feature_set", feature_set) self._f = None if f is not None: if skip_expand: raise ValueError( "`skip_expand` option is not supported in case of using a callable" ) if (indices is not None) and (len(indices) == 0): raise ValueError("`indices` list is empty!") if indices is not None: indices = torch.tensor(indices, dtype=torch.long) if len(indices.unique()) != len(indices): raise ValueError("Elements of `indices` tensor must be unique!") self.indices = indices else: self.indices = slice(None) self._f = f self.fkwargs = fkwargs or {} self.skip_expand = skip_expand self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize def transform(self, X: Tensor) -> Tensor: r"""Transform the inputs by appending `feature_set` to each input or by generating a set of features to be appended on the fly via a callable. For each `1 x d`-dim element in the input tensor, this will produce an `n_f x (d + d_f)`-dim tensor with `feature_set` appended as the last `d_f` dimensions. For a generic `batch_shape x q x d`-dim `X`, this translates to a `batch_shape x (q * n_f) x (d + d_f)`-dim output, where the values corresponding to `X[..., i, :]` are found in `output[..., i * n_f: (i + 1) * n_f, :]`. Note: Adding the `feature_set` on the `q-batch` dimension is necessary to avoid introducing additional bias by evaluating the inputs on independent GP sample paths. Args: X: A `batch_shape x q x d`-dim tensor of inputs. If `self.skip_expand` is `True`, then `X` should be of shape `batch_shape x (q * n_f) x d`, typically obtained by passing a `batch_shape x q x d` shape input through an `InputPerturbation` with `n_f` perturbation values. Returns: A `batch_shape x (q * n_f) x (d + d_f)`-dim tensor of appended inputs. """ if self._f is not None: expanded_features = self._f(X[..., self.indices], **self.fkwargs) n_f = expanded_features.shape[-2] else: n_f = self.feature_set.shape[-2] if self.skip_expand: expanded_X = X.view(*X.shape[:-2], -1, n_f, X.shape[-1]) else: expanded_X = X.unsqueeze(dim=-2).expand(*X.shape[:-1], n_f, -1) if self._f is None: expanded_features = self.feature_set.expand(*expanded_X.shape[:-1], -1) appended_X = torch.cat([expanded_X, expanded_features], dim=-1) return appended_X.view(*X.shape[:-2], -1, appended_X.shape[-1]) class FilterFeatures(InputTransform, Module): r"""A transform that filters the input with a given set of features indices. As an example, this can be used in a multiobjective optimization with `ModelListGP` in which the specific models only share subsets of features (feature selection). A reason could be that it is known that specific features do not have any impact on a specific objective but they need to be included in the model for another one. """ def __init__( self, feature_indices: Tensor, transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, ) -> None: r"""Filter features from a model. Args: feature_set: An one-dim tensor denoting the indices of the features to be kept and fed to the model. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: True. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: True. """ super().__init__() if feature_indices.dim() != 1: raise ValueError("`feature_indices` must be a one-dimensional tensor!") if feature_indices.dtype != torch.int64: raise ValueError("`feature_indices` tensor must be int64/long!") if (feature_indices < 0).any(): raise ValueError( "Elements of `feature_indices` have to be larger/equal to zero!" ) if len(feature_indices.unique()) != len(feature_indices): raise ValueError("Elements of `feature_indices` tensor must be unique!") self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize self.register_buffer("feature_indices", feature_indices) def transform(self, X: Tensor) -> Tensor: r"""Transform the inputs by keeping only the in `feature_indices` specified feature indices and filtering out the others. Args: X: A `batch_shape x q x d`-dim tensor of inputs. Returns: A `batch_shape x q x e`-dim tensor of filtered inputs, where `e` is the length of `feature_indices`. """ return X[..., self.feature_indices] def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Args: other: Another input transform Returns: A boolean indicating if the other transform is equivalent. """ if len(self.feature_indices) != len(other.feature_indices): return False return super().equals(other=other) class InputPerturbation(InputTransform, Module): r"""A transform that adds the set of perturbations to the given input. Similar to `AppendFeatures`, this can be used with `RiskMeasureMCObjective` to optimize risk measures. See `AppendFeatures` for additional discussion on optimizing risk measures. A tutorial notebook using this with `qNoisyExpectedImprovement` can be found at https://botorch.org/tutorials/risk_averse_bo_with_input_perturbations. """ is_one_to_many: bool = True def __init__( self, perturbation_set: Union[Tensor, Callable[[Tensor], Tensor]], bounds: Optional[Tensor] = None, indices: Optional[List[int]] = None, multiplicative: bool = False, transform_on_train: bool = False, transform_on_eval: bool = True, transform_on_fantasize: bool = False, ) -> None: r"""Add `perturbation_set` to each input. Args: perturbation_set: An `n_p x d`-dim tensor denoting the perturbations to be added to the inputs. Alternatively, this can be a callable that returns `batch x n_p x d`-dim tensor of perturbations for input of shape `batch x d`. This is useful for heteroscedastic perturbations. bounds: A `2 x d`-dim tensor of lower and upper bounds for each column of the input. If given, the perturbed inputs will be clamped to these bounds. indices: A list of indices specifying a subset of inputs on which to apply the transform. Note that `len(indices)` should be equal to the second dimension of `perturbation_set` and `bounds`. The dimensionality of the input `X.shape[-1]` can be larger if we only transform a subset. multiplicative: A boolean indicating whether the input perturbations are additive or multiplicative. If True, inputs will be multiplied with the perturbations. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: False. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: False. """ super().__init__() if isinstance(perturbation_set, Tensor): if perturbation_set.dim() != 2: raise ValueError("`perturbation_set` must be an `n_p x d`-dim tensor!") self.register_buffer("perturbation_set", perturbation_set) else: self.perturbation_set = perturbation_set if bounds is not None: if ( isinstance(perturbation_set, Tensor) and bounds.shape[-1] != perturbation_set.shape[-1] ): raise ValueError( "`bounds` must have the same number of columns (last dimension) as " f"the `perturbation_set`! Got {bounds.shape[-1]} and " f"{perturbation_set.shape[-1]}." ) self.register_buffer("bounds", bounds) else: self.bounds = None self.register_buffer("_perturbations", None) self.indices = indices self.multiplicative = multiplicative self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize def transform(self, X: Tensor) -> Tensor: r"""Transform the inputs by adding `perturbation_set` to each input. For each `1 x d`-dim element in the input tensor, this will produce an `n_p x d`-dim tensor with the `perturbation_set` added to the input. For a generic `batch_shape x q x d`-dim `X`, this translates to a `batch_shape x (q * n_p) x d`-dim output, where the values corresponding to `X[..., i, :]` are found in `output[..., i * n_w: (i + 1) * n_w, :]`. Note: Adding the `perturbation_set` on the `q-batch` dimension is necessary to avoid introducing additional bias by evaluating the inputs on independent GP sample paths. Args: X: A `batch_shape x q x d`-dim tensor of inputs. Returns: A `batch_shape x (q * n_p) x d`-dim tensor of perturbed inputs. """ # NOTE: If we had access to n_p without evaluating _perturbations when the # perturbation_set is a function, we could move this into `_transform`. # Further, we could remove the two `transpose` calls below if one were # willing to accept a different ordering of the transformed output. self._perturbations = self._expanded_perturbations(X) # make space for n_p dimension, switch n_p with n after transform, and flatten. return self._transform(X.unsqueeze(-3)).transpose(-3, -2).flatten(-3, -2) @subset_transform def _transform(self, X: Tensor): p = self._perturbations Y = X * p if self.multiplicative else X + p if self.bounds is not None: return torch.maximum(torch.minimum(Y, self.bounds[1]), self.bounds[0]) return Y @property def batch_shape(self): """Returns a shape tuple such that `subset_transform` pre-allocates a (b x n_p x n x d) - dim tensor, where `b` is the batch shape of the input `X` of the transform and `n_p` is the number of perturbations. NOTE: this function is dependent on calling `_expanded_perturbations(X)` because `n_p` is inaccessible otherwise if `perturbation_set` is a function. """ return self._perturbations.shape[:-2] def _expanded_perturbations(self, X: Tensor) -> Tensor: p = self.perturbation_set if isinstance(p, Tensor): p = p.expand(X.shape[-2], *p.shape) # p is batch_shape x n x n_p x d else: p = p(X) if self.indices is None else p(X[..., self.indices]) return p.transpose(-3, -2) # p is batch_shape x n_p x n x d class OneHotToNumeric(InputTransform, Module): r"""Transform categorical parameters from a one-hot to a numeric representation. This assumes that the categoricals are the trailing dimensions. """ def __init__( self, dim: int, categorical_features: Optional[Dict[int, int]] = None, transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, ) -> None: r"""Initialize. Args: dim: The dimension of the one-hot-encoded input. categorical_features: A dictionary mapping the starting index of each categorical feature to its cardinality. This assumes that categoricals are one-hot encoded. transform_on_train: A boolean indicating whether to apply the transforms in train() mode. Default: False. transform_on_eval: A boolean indicating whether to apply the transform in eval() mode. Default: True. transform_on_fantasize: A boolean indicating whether to apply the transform when called from within a `fantasize` call. Default: False. Returns: A `batch_shape x n x d'`-dim tensor of where the one-hot encoded categoricals are transformed to integer representation. """ super().__init__() self.transform_on_train = transform_on_train self.transform_on_eval = transform_on_eval self.transform_on_fantasize = transform_on_fantasize categorical_features = categorical_features or {} # sort by starting index self.categorical_features = OrderedDict( sorted(categorical_features.items(), key=lambda x: x[0]) ) if len(self.categorical_features) > 0: self.categorical_start_idx = min(self.categorical_features.keys()) # check that the trailing dimensions are categoricals end = self.categorical_start_idx err_msg = ( f"{self.__class__.__name__} requires that the categorical " "parameters are the rightmost elements." ) for start, card in self.categorical_features.items(): # the end of one one-hot representation should be followed # by the start of the next if end != start: raise ValueError(err_msg) # This assumes that the categoricals are the trailing # dimensions end = start + card if end != dim: # check end raise ValueError(err_msg) # the numeric representation dimension is the total number of parameters # (continuous, integer, and categorical) self.numeric_dim = self.categorical_start_idx + len(categorical_features) def transform(self, X: Tensor) -> Tensor: r"""Transform the categorical inputs into integer representation. Args: X: A `batch_shape x n x d`-dim tensor of inputs. Returns: A `batch_shape x n x d'`-dim tensor of where the one-hot encoded categoricals are transformed to integer representation. """ if len(self.categorical_features) > 0: X_numeric = X[..., : self.numeric_dim].clone() idx = self.categorical_start_idx for start, card in self.categorical_features.items(): X_numeric[..., idx] = X[..., start : start + card].argmax(dim=-1) idx += 1 return X_numeric return X def untransform(self, X: Tensor) -> Tensor: r"""Transform the categoricals from integer representation to one-hot. Args: X: A `batch_shape x n x d'`-dim tensor of transformed inputs, where the categoricals are represented as integers. Returns: A `batch_shape x n x d`-dim tensor of inputs, where the categoricals have been transformed to one-hot representation. """ if len(self.categorical_features) > 0: self.numeric_dim one_hot_categoricals = [ # note that self.categorical_features is sorted by the starting index # in one-hot representation one_hot( X[..., idx - len(self.categorical_features)].long(), num_classes=cardinality, ) for idx, cardinality in enumerate(self.categorical_features.values()) ] X = torch.cat( [ X[..., : self.categorical_start_idx], *one_hot_categoricals, ], dim=-1, ) return X def equals(self, other: InputTransform) -> bool: r"""Check if another input transform is equivalent. Args: other: Another input transform. Returns: A boolean indicating if the other transform is equivalent. """ return ( type(self) is type(other) and (self.transform_on_train == other.transform_on_train) and (self.transform_on_eval == other.transform_on_eval) and (self.transform_on_fantasize == other.transform_on_fantasize) and self.categorical_features == other.categorical_features )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Botorch Warnings. """ class BotorchWarning(Warning): r"""Base botorch warning.""" pass class BadInitialCandidatesWarning(BotorchWarning): r"""Warning issued if set of initial candidates for optimziation is bad.""" pass class InputDataWarning(BotorchWarning): r"""Warning raised when input data does not comply with conventions.""" pass class CostAwareWarning(BotorchWarning): r"""Warning raised in the context of cost-aware acquisition strategies.""" pass class OptimizationWarning(BotorchWarning): r"""Optimization-releated warnings.""" pass class SamplingWarning(BotorchWarning): r"""Sampling related warnings.""" pass class BotorchTensorDimensionWarning(BotorchWarning): r"""Warning raised when a tensor possibly violates a botorch convention.""" pass class UserInputWarning(BotorchWarning): r"""Warning raised when a potential issue is detected with user provided inputs.""" pass def _get_single_precision_warning(dtype_str: str) -> str: msg = ( f"The model inputs are of type {dtype_str}. It is strongly recommended " "to use double precision in BoTorch, as this improves both " "precision and stability and can help avoid numerical errors. " "See https://github.com/pytorch/botorch/discussions/1444" ) return msg
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.exceptions.errors import ( BotorchError, BotorchTensorDimensionError, CandidateGenerationError, InputDataError, ModelFittingError, OptimizationTimeoutError, UnsupportedError, ) from botorch.exceptions.warnings import ( BadInitialCandidatesWarning, BotorchTensorDimensionWarning, BotorchWarning, CostAwareWarning, InputDataWarning, OptimizationWarning, SamplingWarning, ) __all__ = [ "BadInitialCandidatesWarning", "BotorchError", "BotorchTensorDimensionError", "BotorchTensorDimensionWarning", "BotorchWarning", "CostAwareWarning", "InputDataWarning", "InputDataError", "BadInitialCandidatesWarning", "CandidateGenerationError", "ModelFittingError", "OptimizationTimeoutError", "OptimizationWarning", "SamplingWarning", "UnsupportedError", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Botorch Errors. """ from typing import Any import numpy as np class BotorchError(Exception): r"""Base botorch exception.""" pass class CandidateGenerationError(BotorchError): r"""Exception raised during generating candidates.""" pass class DeprecationError(BotorchError): r"""Exception raised due to deprecations""" pass class InputDataError(BotorchError): r"""Exception raised when input data does not comply with conventions.""" pass class UnsupportedError(BotorchError): r"""Currently unsupported feature.""" pass class BotorchTensorDimensionError(BotorchError): r"""Exception raised when a tensor violates a botorch convention.""" pass class ModelFittingError(Exception): r"""Exception raised when attempts to fit a model terminate unsuccessfully.""" pass class OptimizationTimeoutError(BotorchError): r"""Exception raised when optimization times out.""" def __init__( self, /, *args: Any, current_x: np.ndarray, runtime: float, **kwargs: Any ) -> None: r""" Args: *args: Standard args to `BoTorchError`. current_x: A numpy array representing the current iterate. runtime: The total runtime in seconds after which the optimization timed out. **kwargs: Standard kwargs to `BoTorchError`. """ super().__init__(*args, **kwargs) self.current_x = current_x self.runtime = runtime
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch from botorch.exceptions.errors import BotorchTensorDimensionError from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.distributions import MultivariateNormal from linear_operator.operators import LinearOperator from torch import Tensor class HigherOrderGPPosterior(GPyTorchPosterior): r""" Posterior class for a Higher order Gaussian process model [Zhe2019hogp]_. Extends the standard GPyTorch posterior class by overwriting the rsample method. The posterior variance is handled internally by the HigherOrderGP model. HOGP is a tensorized GP model so the posterior covariance grows to be extremely large, but is highly structured, which means that we can exploit Kronecker identities to sample from the posterior using Matheron's rule as described in [Doucet2010sampl]_. In general, this posterior should ONLY be used for HOGP models that have highly structured covariances. It should also only be used internally when called from the HigherOrderGP.posterior(...) method. At this time, the posterior does not support gradients with respect to the training data. """ def __init__( self, distribution: MultivariateNormal, joint_covariance_matrix: LinearOperator, train_train_covar: LinearOperator, test_train_covar: LinearOperator, train_targets: Tensor, output_shape: torch.Size, num_outputs: int, ) -> None: r"""A Posterior for HigherOrderGP models. Args: distribution: Posterior multivariate normal distribution. joint_covariance_matrix: Joint test train covariance matrix over the entire tensor. train_train_covar: Covariance matrix of train points in the data space. test_train_covar: Covariance matrix of test x train points in the data space. train_targets: Training responses vectorized. output_shape: Shape output training responses. num_outputs: Batch shaping of model. """ super().__init__(distribution=distribution) self.joint_covariance_matrix = joint_covariance_matrix self.train_train_covar = train_train_covar self.test_train_covar = test_train_covar self.train_targets = train_targets self.output_shape = output_shape self._is_mt = True self.num_outputs = num_outputs @property def base_sample_shape(self): r"""The shape of a base sample used for constructing posterior samples. Overwrites the standard `base_sample_shape` call to inform samplers that `n + 2 n_train` samples need to be drawn rather than n samples. """ joint_covar = self.joint_covariance_matrix batch_shape = joint_covar.shape[:-2] sampling_shape = torch.Size( [joint_covar.shape[-2] + self.train_train_covar.shape[-2]] ) return batch_shape + sampling_shape @property def batch_range(self) -> Tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the `base_sample_shape`. The base samples are expanded over the t-batches to provide consistency in the acquisition values, i.e., to ensure that a candidate produces same value regardless of its position on the t-batch. """ return (0, -1) def _extended_shape( self, sample_shape: torch.Size = torch.Size() # noqa: B008 ) -> torch.Size: r"""Returns the shape of the samples produced by the posterior with the given `sample_shape`. """ return sample_shape + self.output_shape def _prepare_base_samples( self, sample_shape: torch.Size, base_samples: Tensor = None ) -> Tensor: covariance_matrix = self.joint_covariance_matrix joint_size = covariance_matrix.shape[-1] batch_shape = covariance_matrix.batch_shape if base_samples is not None: if base_samples.shape[: len(sample_shape)] != sample_shape: raise RuntimeError("sample_shape disagrees with shape of base_samples.") appended_shape = joint_size + self.train_train_covar.shape[-1] if appended_shape != base_samples.shape[-1]: # get base_samples to the correct shape by expanding as sample shape, # batch shape, then rest of dimensions. We have to add first the sample # shape, then the batch shape of the model, and then finally the shape # of the test data points squeezed into a single dimension, accessed # from the test_train_covar. base_sample_shapes = ( sample_shape + batch_shape + self.test_train_covar.shape[-2:-1] ) if base_samples.nelement() == base_sample_shapes.numel(): base_samples = base_samples.reshape(base_sample_shapes) new_base_samples = torch.randn( *sample_shape, *batch_shape, appended_shape - base_samples.shape[-1], device=base_samples.device, dtype=base_samples.dtype, ) base_samples = torch.cat((base_samples, new_base_samples), dim=-1) else: raise BotorchTensorDimensionError( "The base samples are not compatible with base sample shape. " f"Received base samples of shape {base_samples.shape}, " f"expected {base_sample_shapes}." ) if base_samples is None: # TODO: Allow qMC sampling base_samples = torch.randn( *sample_shape, *batch_shape, joint_size, device=covariance_matrix.device, dtype=covariance_matrix.dtype, ) noise_base_samples = torch.randn( *sample_shape, *batch_shape, self.train_train_covar.shape[-1], device=covariance_matrix.device, dtype=covariance_matrix.dtype, ) else: # finally split up the base samples noise_base_samples = base_samples[..., joint_size:] base_samples = base_samples[..., :joint_size] perm_list = [*range(1, base_samples.ndim), 0] return base_samples.permute(*perm_list), noise_base_samples.permute(*perm_list) def rsample_from_base_samples( self, sample_shape: torch.Size, base_samples: Optional[Tensor], ) -> Tensor: r"""Sample from the posterior (with gradients) using base samples. As the posterior covariance is difficult to draw from in this model, we implement Matheron's rule as described in [Doucet2010sampl]-. This may not work entirely correctly for deterministic base samples unless base samples are provided that are of shape `n + 2 * n_train` because the sampling method draws `2 * n_train` samples as well as the standard `n`. samples. Args: sample_shape: A `torch.Size` object specifying the sample shape. To draw `n` samples, set to `torch.Size([n])`. To draw `b` batches of `n` samples each, set to `torch.Size([b, n])`. base_samples: An (optional) Tensor of `N(0, I)` base samples of appropriate dimension, typically obtained from a `Sampler`. This is used for deterministic optimization. Returns: Samples from the posterior, a tensor of shape `self._extended_shape(sample_shape=sample_shape)`. """ base_samples, noise_base_samples = self._prepare_base_samples( sample_shape, base_samples ) # base samples now have trailing sample dimension covariance_matrix = self.joint_covariance_matrix covar_root = covariance_matrix.root_decomposition().root samples = covar_root.matmul(base_samples[..., : covar_root.shape[-1], :]) # now pluck out Y_x and X_x noiseless_train_marginal_samples = samples[ ..., : self.train_train_covar.shape[-1], : ] test_marginal_samples = samples[..., self.train_train_covar.shape[-1] :, :] # we need to add noise to the train_joint_samples # THIS ASSUMES CONSTANT NOISE # The following assumes test_train_covar is a SumLinearOperator. TODO: Improve noise_std = self.train_train_covar.linear_ops[1]._diag[..., 0] ** 0.5 # TODO: cleanup the reshaping here # expands the noise to allow broadcasting against the noise base samples # reshape_as or view_as don't work here because we need to expand to # broadcast against `samples x batch_shape x output_shape` while noise_std # is `batch_shape x 1`. if self.num_outputs > 1 or noise_std.ndim > 1: ntms_dims = [ i == noise_std.shape[0] for i in noiseless_train_marginal_samples.shape ] for matched in ntms_dims: if not matched: noise_std = noise_std.unsqueeze(-1) # we need to add noise into the noiseless samples noise_marginal_samples = noise_std * noise_base_samples train_marginal_samples = ( noiseless_train_marginal_samples + noise_marginal_samples ) # compute y - Y_x train_rhs = self.train_targets - train_marginal_samples # K_{train, train}^{-1} (y - Y_x) # internally, this solve is done using Kronecker algebra and is fast. kinv_rhs = self.train_train_covar.solve(train_rhs) # multiply by cross-covariance test_updated_samples = self.test_train_covar.matmul(kinv_rhs) # add samples test_cond_samples = test_marginal_samples + test_updated_samples test_cond_samples = test_cond_samples.permute( test_cond_samples.ndim - 1, *range(0, test_cond_samples.ndim - 1) ) # reshape samples to be the actual size of the train targets return test_cond_samples.reshape(*sample_shape, *self.output_shape) def rsample( self, sample_shape: Optional[torch.Size] = None, ) -> Tensor: r"""Sample from the posterior (with gradients). Args: sample_shape: A `torch.Size` object specifying the sample shape. To draw `n` samples, set to `torch.Size([n])`. To draw `b` batches of `n` samples each, set to `torch.Size([b, n])`. Returns: Samples from the posterior, a tensor of shape `self._extended_shape(sample_shape=sample_shape)`. """ if sample_shape is None: sample_shape = torch.Size([1]) return self.rsample_from_base_samples( sample_shape=sample_shape, base_samples=None )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Deterministic (degenerate) posteriors. Used in conjunction with deterministic models. """ from __future__ import annotations from typing import Optional from warnings import warn import torch from botorch.posteriors.posterior import Posterior from torch import Tensor class DeterministicPosterior(Posterior): r"""Deterministic posterior. [DEPRECATED] Use `EnsemblePosterior` instead. """ def __init__(self, values: Tensor) -> None: r""" Args: values: Values of the samples produced by this posterior. """ warn( "`DeterministicPosterior` is marked for deprecation, consider using " "`EnsemblePosterior`.", DeprecationWarning, ) self.values = values @property def device(self) -> torch.device: r"""The torch device of the posterior.""" return self.values.device @property def dtype(self) -> torch.dtype: r"""The torch dtype of the posterior.""" return self.values.dtype def _extended_shape( self, sample_shape: torch.Size = torch.Size() # noqa: B008 ) -> torch.Size: r"""Returns the shape of the samples produced by the posterior with the given `sample_shape`. """ return sample_shape + self.values.shape @property def mean(self) -> Tensor: r"""The mean of the posterior as a `(b) x n x m`-dim Tensor.""" return self.values @property def variance(self) -> Tensor: r"""The variance of the posterior as a `(b) x n x m`-dim Tensor. As this is a deterministic posterior, this is a tensor of zeros. """ return torch.zeros_like(self.values) def rsample( self, sample_shape: Optional[torch.Size] = None, ) -> Tensor: r"""Sample from the posterior (with gradients). For the deterministic posterior, this just returns the values expanded to the requested shape. Args: sample_shape: A `torch.Size` object specifying the sample shape. To draw `n` samples, set to `torch.Size([n])`. To draw `b` batches of `n` samples each, set to `torch.Size([b, n])`. Returns: Samples from the posterior, a tensor of shape `self._extended_shape(sample_shape=sample_shape)`. """ if sample_shape is None: sample_shape = torch.Size([1]) return self.values.expand(self._extended_shape(sample_shape))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Posterior module to be used with GPyTorch models. """ from __future__ import annotations import warnings from contextlib import ExitStack from typing import Optional, Tuple, TYPE_CHECKING, Union import torch from botorch.exceptions.errors import BotorchTensorDimensionError from botorch.posteriors.base_samples import _reshape_base_samples_non_interleaved from botorch.posteriors.torch import TorchPosterior from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal from linear_operator import settings as linop_settings from linear_operator.operators import ( BlockDiagLinearOperator, DenseLinearOperator, LinearOperator, SumLinearOperator, ) from torch import Tensor from torch.distributions import Normal if TYPE_CHECKING: from botorch.posteriors.posterior_list import PosteriorList # pragma: no cover class GPyTorchPosterior(TorchPosterior): r"""A posterior based on GPyTorch's multi-variate Normal distributions.""" distribution: MultivariateNormal def __init__( self, distribution: Optional[MultivariateNormal] = None, mvn: Optional[MultivariateNormal] = None, ) -> None: r"""A posterior based on GPyTorch's multi-variate Normal distributions. Args: distribution: A GPyTorch MultivariateNormal (single-output case) or MultitaskMultivariateNormal (multi-output case). mvn: Deprecated. """ if mvn is not None: if distribution is not None: raise RuntimeError( "Got both a `distribution` and an `mvn` argument. " "Use the `distribution` only." ) warnings.warn( "The `mvn` argument of `GPyTorchPosterior`s has been renamed to " "`distribution` and will be removed in a future version.", DeprecationWarning, ) distribution = mvn if distribution is None: raise RuntimeError("GPyTorchPosterior must have a distribution specified.") super().__init__(distribution=distribution) self._is_mt = isinstance(distribution, MultitaskMultivariateNormal) @property def mvn(self) -> MultivariateNormal: r"""Expose the distribution as a backwards-compatible attribute.""" return self.distribution @property def base_sample_shape(self) -> torch.Size: r"""The shape of a base sample used for constructing posterior samples.""" return self.distribution.batch_shape + self.distribution.base_sample_shape @property def batch_range(self) -> Tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the `base_sample_shape`. The base samples are expanded over the t-batches to provide consistency in the acquisition values, i.e., to ensure that a candidate produces same value regardless of its position on the t-batch. """ if self._is_mt: return (0, -2) else: return (0, -1) def _extended_shape( self, sample_shape: torch.Size = torch.Size() # noqa: B008 ) -> torch.Size: r"""Returns the shape of the samples produced by the posterior with the given `sample_shape`. """ base_shape = self.distribution.batch_shape + self.distribution.event_shape if not self._is_mt: base_shape += torch.Size([1]) return sample_shape + base_shape def rsample_from_base_samples( self, sample_shape: torch.Size, base_samples: Tensor, ) -> Tensor: r"""Sample from the posterior (with gradients) using base samples. This is intended to be used with a sampler that produces the corresponding base samples, and enables acquisition optimization via Sample Average Approximation. Args: sample_shape: A `torch.Size` object specifying the sample shape. To draw `n` samples, set to `torch.Size([n])`. To draw `b` batches of `n` samples each, set to `torch.Size([b, n])`. base_samples: A Tensor of `N(0, I)` base samples of shape `sample_shape x base_sample_shape`, typically obtained from a `Sampler`. This is used for deterministic optimization. Returns: Samples from the posterior, a tensor of shape `self._extended_shape(sample_shape=sample_shape)`. """ if base_samples.shape[: len(sample_shape)] != sample_shape: raise RuntimeError("`sample_shape` disagrees with shape of `base_samples`.") if self._is_mt: base_samples = _reshape_base_samples_non_interleaved( mvn=self.distribution, base_samples=base_samples, sample_shape=sample_shape, ) with ExitStack() as es: if linop_settings._fast_covar_root_decomposition.is_default(): es.enter_context(linop_settings._fast_covar_root_decomposition(False)) samples = self.distribution.rsample( sample_shape=sample_shape, base_samples=base_samples ) if not self._is_mt: samples = samples.unsqueeze(-1) return samples def rsample( self, sample_shape: Optional[torch.Size] = None, base_samples: Optional[Tensor] = None, ) -> Tensor: r"""Sample from the posterior (with gradients). Args: sample_shape: A `torch.Size` object specifying the sample shape. To draw `n` samples, set to `torch.Size([n])`. To draw `b` batches of `n` samples each, set to `torch.Size([b, n])`. base_samples: An (optional) Tensor of `N(0, I)` base samples of appropriate dimension, typically obtained from a `Sampler`. This is used for deterministic optimization. Returns: Samples from the posterior, a tensor of shape `self._extended_shape(sample_shape=sample_shape)`. """ if sample_shape is None: sample_shape = torch.Size([1]) if base_samples is not None: warnings.warn( "Use of `base_samples` with `rsample` is deprecated. Use " "`rsample_from_base_samples` instead.", DeprecationWarning, ) if base_samples.shape[: len(sample_shape)] != sample_shape: raise RuntimeError( "`sample_shape` disagrees with shape of `base_samples`." ) # get base_samples to the correct shape base_samples = base_samples.expand(self._extended_shape(sample_shape)) if not self._is_mt: # Remove output dimension in single output case. base_samples = base_samples.squeeze(-1) return self.rsample_from_base_samples( sample_shape=sample_shape, base_samples=base_samples ) with ExitStack() as es: if linop_settings._fast_covar_root_decomposition.is_default(): es.enter_context(linop_settings._fast_covar_root_decomposition(False)) samples = self.distribution.rsample( sample_shape=sample_shape, base_samples=base_samples ) # make sure there always is an output dimension if not self._is_mt: samples = samples.unsqueeze(-1) return samples @property def mean(self) -> Tensor: r"""The posterior mean.""" mean = self.distribution.mean if not self._is_mt: mean = mean.unsqueeze(-1) return mean @property def variance(self) -> Tensor: r"""The posterior variance.""" variance = self.distribution.variance if not self._is_mt: variance = variance.unsqueeze(-1) return variance def quantile(self, value: Tensor) -> Tensor: r"""Compute the quantiles of the marginal distributions.""" if value.numel() > 1: return torch.stack([self.quantile(v) for v in value], dim=0) marginal = Normal(loc=self.mean, scale=self.variance.sqrt()) return marginal.icdf(value) def density(self, value: Tensor) -> Tensor: r"""The probability density of the marginal distributions.""" if value.numel() > 1: return torch.stack([self.density(v) for v in value], dim=0) marginal = Normal(loc=self.mean, scale=self.variance.sqrt()) return marginal.log_prob(value).exp() def _validate_scalarize_inputs(weights: Tensor, m: int) -> None: if weights.ndim > 1: raise BotorchTensorDimensionError("`weights` must be one-dimensional") if m != weights.size(0): raise RuntimeError( f"Output shape not equal to that of weights. Output shape is {m} and " f"weights are {weights.shape}" ) def scalarize_posterior_gpytorch( posterior: GPyTorchPosterior, weights: Tensor, offset: float = 0.0, ) -> Tuple[Tensor, Union[Tensor, LinearOperator]]: r"""Helper function for `scalarize_posterior`, producing a mean and variance. This mean and variance are consumed by `scalarize_posterior` to produce a `GPyTorchPosterior`. Args: posterior: The posterior over `m` outcomes to be scalarized. Supports `t`-batching. weights: A tensor of weights of size `m`. offset: The offset of the affine transformation. Returns: The transformed (single-output) posterior. If the input posterior has mean `mu` and covariance matrix `Sigma`, this posterior has mean `weights^T * mu` and variance `weights^T Sigma w`. Example: Example for a model with two outcomes: >>> X = torch.rand(1, 2) >>> posterior = model.posterior(X) >>> weights = torch.tensor([0.5, 0.25]) >>> mean, cov = scalarize_posterior_gpytorch(posterior, weights=weights) >>> mvn = MultivariateNormal(mean, cov) >>> new_posterior = GPyTorchPosterior """ mean = posterior.mean q, m = mean.shape[-2:] _validate_scalarize_inputs(weights, m) batch_shape = mean.shape[:-2] mvn = posterior.distribution cov = mvn.lazy_covariance_matrix if mvn.islazy else mvn.covariance_matrix if m == 1: # just scaling, no scalarization necessary new_mean = offset + (weights[0] * mean).view(*batch_shape, q) new_cov = weights[0] ** 2 * cov return new_mean, new_cov new_mean = offset + (mean @ weights).view(*batch_shape, q) if q == 1: new_cov = weights.unsqueeze(-2) @ (cov @ weights.unsqueeze(-1)) else: # we need to handle potentially different representations of the multi-task mvn if mvn._interleaved: w_cov = weights.repeat(q).unsqueeze(0) sum_shape = batch_shape + torch.Size([q, m, q, m]) sum_dims = (-1, -2) else: # special-case the independent setting if isinstance(cov, BlockDiagLinearOperator): new_cov = SumLinearOperator( *[ cov.base_linear_op[..., i, :, :] * weights[i].pow(2) for i in range(cov.base_linear_op.size(-3)) ] ) return new_mean, new_cov w_cov = torch.repeat_interleave(weights, q).unsqueeze(0) sum_shape = batch_shape + torch.Size([m, q, m, q]) sum_dims = (-2, -3) cov_scaled = w_cov * cov * w_cov.transpose(-1, -2) # TODO: Do not instantiate full covariance for LinearOperators # (ideally we simplify this in GPyTorch: # https://github.com/cornellius-gp/gpytorch/issues/1055) if isinstance(cov_scaled, LinearOperator): cov_scaled = cov_scaled.to_dense() new_cov = cov_scaled.view(sum_shape).sum(dim=sum_dims[0]).sum(dim=sum_dims[1]) new_cov = DenseLinearOperator(new_cov) return new_mean, new_cov def scalarize_posterior( posterior: Union[GPyTorchPosterior, PosteriorList], weights: Tensor, offset: float = 0.0, ) -> GPyTorchPosterior: r"""Affine transformation of a multi-output posterior. Args: posterior: The posterior over `m` outcomes to be scalarized. Supports `t`-batching. Can be either a `GPyTorchPosterior`, or a `PosteriorList` that contains GPyTorchPosteriors all with q=1. weights: A tensor of weights of size `m`. offset: The offset of the affine transformation. Returns: The transformed (single-output) posterior. If the input posterior has mean `mu` and covariance matrix `Sigma`, this posterior has mean `weights^T * mu` and variance `weights^T Sigma w`. Example: Example for a model with two outcomes: >>> X = torch.rand(1, 2) >>> posterior = model.posterior(X) >>> weights = torch.tensor([0.5, 0.25]) >>> new_posterior = scalarize_posterior(posterior, weights=weights) """ # GPyTorchPosterior case if hasattr(posterior, "distribution"): mean, cov = scalarize_posterior_gpytorch(posterior, weights, offset) mvn = MultivariateNormal(mean, cov) return GPyTorchPosterior(mvn) # PosteriorList case if not hasattr(posterior, "posteriors"): raise NotImplementedError( "scalarize_posterior only works with a posterior that has an attribute " "`distribution`, such as a GPyTorchPosterior, or a posterior that contains " "sub-posteriors in an attribute `posteriors`, as in a PosteriorList." ) mean = posterior.mean q, m = mean.shape[-2:] _validate_scalarize_inputs(weights, m) batch_shape = mean.shape[:-2] if q != 1: raise NotImplementedError( "scalarize_posterior only works with a PosteriorList if each sub-posterior " "has q=1." ) means = [post.mean for post in posterior.posteriors] if {mean.shape[-1] for mean in means} != {1}: raise NotImplementedError( "scalarize_posterior only works with a PosteriorList if each sub-posterior " "has one outcome." ) new_mean = offset + (mean @ weights).view(*batch_shape, q) new_cov = (posterior.variance @ (weights**2))[:, None] mvn = MultivariateNormal(new_mean, new_cov) return GPyTorchPosterior(mvn)
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Callable, Optional, Tuple import torch from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.distributions import MultivariateNormal from torch import Tensor MCMC_DIM = -3 # Location of the MCMC batch dimension TOL = 1e-6 # Bisection tolerance def batched_bisect( f: Callable, target: float, bounds: Tensor, tol: float = TOL, max_steps: int = 32 ): r"""Batched bisection with a fixed number of steps. Args: f: Target function that takes a `(b1 x ... x bk)`-dim tensor and returns a `(b1 x ... x bk)`-dim tensor. target: Scalar target value of type float. bounds: Lower and upper bounds, of size `2 x b1 x ... x bk`. tol: We termniate if all elements satisfy are within `tol` of the `target`. max_steps: Maximum number of bisection steps. Returns: Tensor X of size `b1 x ... x bk` such that `f(X) = target`. """ # Make sure target is actually contained in the interval f1, f2 = f(bounds[0]), f(bounds[1]) if not ((f1 <= target) & (target <= f2)).all(): raise ValueError( "The target is not contained in the interval specified by the bounds" ) bounds = bounds.clone() # Will be modified in-place center = bounds.mean(dim=0) f_center = f(center) for _ in range(max_steps): go_left = f_center > target bounds[1, go_left] = center[go_left] bounds[0, ~go_left] = center[~go_left] center = bounds.mean(dim=0) f_center = f(center) # Check convergence if (f_center - target).abs().max() <= tol: return center return center def _quantile(posterior: FullyBayesianPosterior, value: Tensor) -> Tensor: r"""Compute the posterior quantiles for the mixture of models.""" if value.numel() > 1: return torch.stack( [_quantile(posterior=posterior, value=v) for v in value], dim=0 ) if value <= 0 or value >= 1: raise ValueError("value is expected to be in the range (0, 1).") dist = torch.distributions.Normal( loc=posterior.mean, scale=posterior.variance.sqrt() ) if posterior.mean.shape[MCMC_DIM] == 1: # Analytical solution return dist.icdf(value).squeeze(MCMC_DIM) icdf_val = dist.icdf(value) low = icdf_val.min(dim=MCMC_DIM).values - TOL high = icdf_val.max(dim=MCMC_DIM).values + TOL bounds = torch.cat((low.unsqueeze(0), high.unsqueeze(0)), dim=0) return batched_bisect( f=lambda x: dist.cdf(x.unsqueeze(MCMC_DIM)).mean(dim=MCMC_DIM), target=value.item(), bounds=bounds, ) class FullyBayesianPosterior(GPyTorchPosterior): r"""A posterior for a fully Bayesian model. The MCMC batch dimension that corresponds to the models in the mixture is located at `MCMC_DIM` (defined at the top of this file). Note that while each MCMC sample corresponds to a Gaussian posterior, the fully Bayesian posterior is rather a mixture of Gaussian distributions. """ def __init__(self, distribution: MultivariateNormal) -> None: r"""A posterior for a fully Bayesian model. Args: distribution: A GPyTorch MultivariateNormal (single-output case) """ super().__init__(distribution=distribution) self._mean = ( distribution.mean if self._is_mt else distribution.mean.unsqueeze(-1) ) self._variance = ( distribution.variance if self._is_mt else distribution.variance.unsqueeze(-1) ) self._mixture_mean: Optional[Tensor] = None self._mixture_variance: Optional[Tensor] = None @property def mixture_mean(self) -> Tensor: r"""The posterior mean for the mixture of models.""" if self._mixture_mean is None: self._mixture_mean = self._mean.mean(dim=MCMC_DIM) return self._mixture_mean @property def mixture_variance(self) -> Tensor: r"""The posterior variance for the mixture of models.""" if self._mixture_variance is None: num_mcmc_samples = self.mean.shape[MCMC_DIM] t1 = self._variance.sum(dim=MCMC_DIM) / num_mcmc_samples t2 = self._mean.pow(2).sum(dim=MCMC_DIM) / num_mcmc_samples t3 = -(self._mean.sum(dim=MCMC_DIM) / num_mcmc_samples).pow(2) self._mixture_variance = t1 + t2 + t3 return self._mixture_variance def quantile(self, value: Tensor) -> Tensor: r"""Compute the posterior quantiles for the mixture of models.""" return _quantile(posterior=self, value=value) @property def batch_range(self) -> Tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the `base_sample_shape`. The base samples are expanded over the t-batches to provide consistency in the acquisition values, i.e., to ensure that a candidate produces same value regardless of its position on the t-batch. """ if self._is_mt: return (0, -2) else: return (0, -1)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.posteriors.deterministic import DeterministicPosterior from botorch.posteriors.fully_bayesian import FullyBayesianPosterior from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.posteriors.higher_order import HigherOrderGPPosterior from botorch.posteriors.multitask import MultitaskGPPosterior from botorch.posteriors.posterior import Posterior from botorch.posteriors.posterior_list import PosteriorList from botorch.posteriors.torch import TorchPosterior from botorch.posteriors.transformed import TransformedPosterior __all__ = [ "DeterministicPosterior", "FullyBayesianPosterior", "GPyTorchPosterior", "HigherOrderGPPosterior", "MultitaskGPPosterior", "Posterior", "PosteriorList", "TorchPosterior", "TransformedPosterior", ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Abstract base module for all botorch posteriors. """ from __future__ import annotations from functools import cached_property from typing import Any, List, Optional import torch from botorch.posteriors.fully_bayesian import FullyBayesianPosterior, MCMC_DIM from botorch.posteriors.posterior import Posterior from torch import Tensor class PosteriorList(Posterior): r"""A Posterior represented by a list of independent Posteriors. When at least one of the posteriors is a `FullyBayesianPosterior`, the other posteriors are expanded to match the size of the `FullyBayesianPosterior`. """ def __init__(self, *posteriors: Posterior) -> None: r"""A Posterior represented by a list of independent Posteriors. Args: *posteriors: A variable number of single-outcome posteriors. Example: >>> p_1 = model_1.posterior(test_X) >>> p_2 = model_2.posterior(test_X) >>> p_12 = PosteriorList(p_1, p_2) Note: This is typically produced automatically in `ModelList`; it should generally not be necessary for the end user to invoke it manually. """ self.posteriors = list(posteriors) @cached_property def _is_fully_bayesian(self) -> bool: r"""Check if any of the posteriors is a `FullyBayesianPosterior`.""" return any(isinstance(p, FullyBayesianPosterior) for p in self.posteriors) def _get_mcmc_batch_dimension(self) -> int: """Return the number of MCMC samples in the corresponding batch dimension.""" mcmc_samples = [ p.mean.shape[MCMC_DIM] for p in self.posteriors if isinstance(p, FullyBayesianPosterior) ] if len(set(mcmc_samples)) > 1: raise NotImplementedError( "All MCMC batch dimensions must have the same size, got shapes: " f"{mcmc_samples}." ) return mcmc_samples[0] @staticmethod def _reshape_tensor(X: Tensor, mcmc_samples: int) -> Tensor: """Reshape a tensor without an MCMC batch dimension to match the shape.""" X = X.unsqueeze(MCMC_DIM) return X.expand(*X.shape[:MCMC_DIM], mcmc_samples, *X.shape[MCMC_DIM + 1 :]) def _reshape_and_cat(self, tensors: List[Tensor]): r"""Reshape, if needed, and concatenate (across dim=-1) a list of tensors.""" if self._is_fully_bayesian: mcmc_samples = self._get_mcmc_batch_dimension() return torch.cat( [ x if isinstance(p, FullyBayesianPosterior) else self._reshape_tensor(x, mcmc_samples=mcmc_samples) for x, p in zip(tensors, self.posteriors) ], dim=-1, ) else: return torch.cat(tensors, dim=-1) @property def device(self) -> torch.device: r"""The torch device of the posterior.""" devices = {p.device for p in self.posteriors} if len(devices) > 1: raise NotImplementedError( # pragma: no cover "Multi-device posteriors are currently not supported. " "The devices of the constituent posteriors are: {devices}." ) return next(iter(devices)) @property def dtype(self) -> torch.dtype: r"""The torch dtype of the posterior.""" dtypes = {p.dtype for p in self.posteriors} if len(dtypes) > 1: raise NotImplementedError( "Multi-dtype posteriors are currently not supported. " "The dtypes of the constituent posteriors are: {dtypes}." ) return next(iter(dtypes)) def _extended_shape( self, sample_shape: torch.Size = torch.Size() # noqa: B008 ) -> torch.Size: r"""Returns the shape of the samples produced by the posterior with the given `sample_shape`. If there's at least one `FullyBayesianPosterior`, the MCMC dimension is included the `_extended_shape`. """ if self._is_fully_bayesian: mcmc_shape = torch.Size([self._get_mcmc_batch_dimension()]) extend_dim = MCMC_DIM + 1 # The dimension to inject MCMC shape. extended_shapes = [] for p in self.posteriors: es = p._extended_shape(sample_shape=sample_shape) if self._is_fully_bayesian and not isinstance(p, FullyBayesianPosterior): # Extend the shapes of non-fully Bayesian ones to match. extended_shapes.append(es[:extend_dim] + mcmc_shape + es[extend_dim:]) else: extended_shapes.append(es) batch_shapes = [es[:-1] for es in extended_shapes] if len(set(batch_shapes)) > 1: raise NotImplementedError( "`PosteriorList` is only supported if the constituent posteriors " f"all have the same `batch_shape`. Batch shapes: {batch_shapes}." ) # Last dimension is the output dimension (concatenation dimension). return batch_shapes[0] + torch.Size([sum(es[-1] for es in extended_shapes)]) @property def mean(self) -> Tensor: r"""The mean of the posterior as a `(b) x n x m`-dim Tensor. This is only supported if all posteriors provide a mean. """ return self._reshape_and_cat(tensors=[p.mean for p in self.posteriors]) @property def variance(self) -> Tensor: r"""The variance of the posterior as a `(b) x n x m`-dim Tensor. This is only supported if all posteriors provide a variance. """ return self._reshape_and_cat(tensors=[p.variance for p in self.posteriors]) def rsample( self, sample_shape: Optional[torch.Size] = None, ) -> Tensor: r"""Sample from the posterior (with gradients). Args: sample_shape: A `torch.Size` object specifying the sample shape. To draw `n` samples, set to `torch.Size([n])`. To draw `b` batches of `n` samples each, set to `torch.Size([b, n])`. base_samples: An (optional) Tensor of `N(0, I)` base samples of appropriate dimension, typically obtained from a `Sampler`. This is used for deterministic optimization. Deprecated. Returns: Samples from the posterior, a tensor of shape `self._extended_shape(sample_shape=sample_shape)`. """ samples = [p.rsample(sample_shape=sample_shape) for p in self.posteriors] return self._reshape_and_cat(tensors=samples) def __getattr__(self, name: str) -> Any: r"""A catch-all for attributes not defined on the posterior level. Raises an attribute error. """ raise AttributeError( f"`PosteriorList` does not define the attribute {name}. " "Consider accessing the attributes of the individual posteriors instead." )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import torch from gpytorch.distributions.multitask_multivariate_normal import ( MultitaskMultivariateNormal, ) from torch import Tensor def _reshape_base_samples_non_interleaved( mvn: MultitaskMultivariateNormal, base_samples: Tensor, sample_shape: torch.Size ) -> Tensor: r"""Reshape base samples to account for non-interleaved MT-MVNs. This method is important for making sure that the `n`th base sample only effects the posterior sample for the `p`th point if `p >= n`. Without this reshaping, for M>=2, the posterior samples for all `n` points would be affected. Args: mvn: A MultitaskMultivariateNormal distribution. base_samples: A `sample_shape x `batch_shape` x n x m`-dim tensor of base_samples. sample_shape: The sample shape. Returns: A `sample_shape x `batch_shape` x n x m`-dim tensor of base_samples suitable for a non-interleaved-multi-task or single-task covariance matrix. """ if not mvn._interleaved: new_shape = sample_shape + mvn._output_shape[:-2] + mvn._output_shape[:-3:-1] base_samples = ( base_samples.transpose(-1, -2) .view(new_shape) .reshape(sample_shape + mvn.loc.shape) .view(base_samples.shape) ) return base_samples