python_code
stringlengths
0
229k
import os import threading from datetime import datetime import torch import torch.distributed.rpc as rpc import torch.multiprocessing as mp import torch.nn as nn from torch import optim import torchvision batch_size = 20 image_w = 64 image_h = 64 num_classes = 30 batch_update_size = 5 num_batches = 6 def timed_log(text): print(f"{datetime.now().strftime('%H:%M:%S')} {text}") class BatchUpdateParameterServer(object): def __init__(self, batch_update_size=batch_update_size): self.model = torchvision.models.resnet50(num_classes=num_classes) self.lock = threading.Lock() self.future_model = torch.futures.Future() self.batch_update_size = batch_update_size self.curr_update_size = 0 self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9) for p in self.model.parameters(): p.grad = torch.zeros_like(p) def get_model(self): return self.model @staticmethod @rpc.functions.async_execution def update_and_fetch_model(ps_rref, grads): self = ps_rref.local_value() timed_log(f"PS got {self.curr_update_size}/{batch_update_size} updates") for p, g in zip(self.model.parameters(), grads): p.grad += g with self.lock: self.curr_update_size += 1 fut = self.future_model if self.curr_update_size >= self.batch_update_size: for p in self.model.parameters(): p.grad /= self.batch_update_size self.curr_update_size = 0 self.optimizer.step() self.optimizer.zero_grad(set_to_none=False) fut.set_result(self.model) timed_log("PS updated model") self.future_model = torch.futures.Future() return fut class Trainer(object): def __init__(self, ps_rref): self.ps_rref = ps_rref self.loss_fn = nn.MSELoss() self.one_hot_indices = torch.LongTensor(batch_size) \ .random_(0, num_classes) \ .view(batch_size, 1) def get_next_batch(self): for _ in range(num_batches): inputs = torch.randn(batch_size, 3, image_w, image_h) labels = torch.zeros(batch_size, num_classes) \ .scatter_(1, self.one_hot_indices, 1) yield inputs.cuda(), labels.cuda() def train(self): name = rpc.get_worker_info().name m = self.ps_rref.rpc_sync().get_model().cuda() for inputs, labels in self.get_next_batch(): timed_log(f"{name} processing one batch") self.loss_fn(m(inputs), labels).backward() timed_log(f"{name} reporting grads") m = rpc.rpc_sync( self.ps_rref.owner(), BatchUpdateParameterServer.update_and_fetch_model, args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]), ).cuda() timed_log(f"{name} got updated model") def run_trainer(ps_rref): trainer = Trainer(ps_rref) trainer.train() def run_ps(trainers): timed_log("Start training") ps_rref = rpc.RRef(BatchUpdateParameterServer()) futs = [] for trainer in trainers: futs.append( rpc.rpc_async(trainer, run_trainer, args=(ps_rref,)) ) torch.futures.wait_all(futs) timed_log("Finish training") def run(rank, world_size): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' options=rpc.TensorPipeRpcBackendOptions( num_worker_threads=16, rpc_timeout=0 # infinite timeout ) if rank != 0: rpc.init_rpc( f"trainer{rank}", rank=rank, world_size=world_size, rpc_backend_options=options ) # trainer passively waiting for ps to kick off training iterations else: rpc.init_rpc( "ps", rank=rank, world_size=world_size, rpc_backend_options=options ) run_ps([f"trainer{r}" for r in range(1, world_size)]) # block until all rpcs finish rpc.shutdown() if __name__=="__main__": world_size = batch_update_size + 1 mp.spawn(run, args=(world_size, ), nprocs=world_size, join=True)
import argparse import os from threading import Lock import torch import torch.distributed.autograd as dist_autograd import torch.distributed.rpc as rpc import torch.multiprocessing as mp import torch.nn as nn import torch.nn.functional as F from torch import optim from torch.distributed.optim import DistributedOptimizer from torchvision import datasets, transforms # --------- MNIST Network to train, from pytorch/examples ----- class Net(nn.Module): def __init__(self, num_gpus=0): super(Net, self).__init__() print(f"Using {num_gpus} GPUs to train") self.num_gpus = num_gpus device = torch.device( "cuda:0" if torch.cuda.is_available() and self.num_gpus > 0 else "cpu") print(f"Putting first 2 convs on {str(device)}") # Put conv layers on the first cuda device self.conv1 = nn.Conv2d(1, 32, 3, 1).to(device) self.conv2 = nn.Conv2d(32, 64, 3, 1).to(device) # Put rest of the network on the 2nd cuda device, if there is one if "cuda" in str(device) and num_gpus > 1: device = torch.device("cuda:1") print(f"Putting rest of layers on {str(device)}") self.dropout1 = nn.Dropout2d(0.25).to(device) self.dropout2 = nn.Dropout2d(0.5).to(device) self.fc1 = nn.Linear(9216, 128).to(device) self.fc2 = nn.Linear(128, 10).to(device) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) # Move tensor to next device if necessary next_device = next(self.fc1.parameters()).device x = x.to(next_device) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output # --------- Helper Methods -------------------- # On the local node, call a method with first arg as the value held by the # RRef. Other args are passed in as arguments to the function called. # Useful for calling instance methods. def call_method(method, rref, *args, **kwargs): return method(rref.local_value(), *args, **kwargs) # Given an RRef, return the result of calling the passed in method on the value # held by the RRef. This call is done on the remote node that owns # the RRef. args and kwargs are passed into the method. # Example: If the value held by the RRef is of type Foo, then # remote_method(Foo.bar, rref, arg1, arg2) is equivalent to calling # <foo_instance>.bar(arg1, arg2) on the remote node and getting the result # back. def remote_method(method, rref, *args, **kwargs): args = [method, rref] + list(args) return rpc.rpc_sync(rref.owner(), call_method, args=args, kwargs=kwargs) # --------- Parameter Server -------------------- class ParameterServer(nn.Module): def __init__(self, num_gpus=0): super().__init__() model = Net(num_gpus=num_gpus) self.model = model self.input_device = torch.device( "cuda:0" if torch.cuda.is_available() and num_gpus > 0 else "cpu") def forward(self, inp): inp = inp.to(self.input_device) out = self.model(inp) # This output is forwarded over RPC, which as of 1.5.0 only accepts CPU tensors. # Tensors must be moved in and out of GPU memory due to this. out = out.to("cpu") return out # Use dist autograd to retrieve gradients accumulated for this model. # Primarily used for verification. def get_dist_gradients(self, cid): grads = dist_autograd.get_gradients(cid) # This output is forwarded over RPC, which as of 1.5.0 only accepts CPU tensors. # Tensors must be moved in and out of GPU memory due to this. cpu_grads = {} for k, v in grads.items(): k_cpu, v_cpu = k.to("cpu"), v.to("cpu") cpu_grads[k_cpu] = v_cpu return cpu_grads # Wrap local parameters in a RRef. Needed for building the # DistributedOptimizer which optimizes parameters remotely. def get_param_rrefs(self): param_rrefs = [rpc.RRef(param) for param in self.model.parameters()] return param_rrefs param_server = None global_lock = Lock() def get_parameter_server(num_gpus=0): global param_server # Ensure that we get only one handle to the ParameterServer. with global_lock: if not param_server: # construct it once param_server = ParameterServer(num_gpus=num_gpus) return param_server def run_parameter_server(rank, world_size): # The parameter server just acts as a host for the model and responds to # requests from trainers, hence it does not need to run a loop. # rpc.shutdown() will wait for all workers to complete by default, which # in this case means that the parameter server will wait for all trainers # to complete, and then exit. print("PS master initializing RPC") rpc.init_rpc(name="parameter_server", rank=rank, world_size=world_size) print("RPC initialized! Running parameter server...") rpc.shutdown() print("RPC shutdown on parameter server.") # --------- Trainers -------------------- # nn.Module corresponding to the network trained by this trainer. The # forward() method simply invokes the network on the given parameter # server. class TrainerNet(nn.Module): def __init__(self, num_gpus=0): super().__init__() self.num_gpus = num_gpus self.param_server_rref = rpc.remote( "parameter_server", get_parameter_server, args=(num_gpus,)) def get_global_param_rrefs(self): remote_params = remote_method( ParameterServer.get_param_rrefs, self.param_server_rref) return remote_params def forward(self, x): model_output = remote_method( ParameterServer.forward, self.param_server_rref, x) return model_output def run_training_loop(rank, num_gpus, train_loader, test_loader): # Runs the typical neural network forward + backward + optimizer step, but # in a distributed fashion. net = TrainerNet(num_gpus=num_gpus) # Build DistributedOptimizer. param_rrefs = net.get_global_param_rrefs() opt = DistributedOptimizer(optim.SGD, param_rrefs, lr=0.03) for i, (data, target) in enumerate(train_loader): with dist_autograd.context() as cid: model_output = net(data) target = target.to(model_output.device) loss = F.nll_loss(model_output, target) if i % 5 == 0: print(f"Rank {rank} training batch {i} loss {loss.item()}") dist_autograd.backward(cid, [loss]) # Ensure that dist autograd ran successfully and gradients were # returned. assert remote_method( ParameterServer.get_dist_gradients, net.param_server_rref, cid) != {} opt.step(cid) print("Training complete!") print("Getting accuracy....") get_accuracy(test_loader, net) def get_accuracy(test_loader, model): model.eval() correct_sum = 0 # Use GPU to evaluate if possible device = torch.device("cuda:0" if model.num_gpus > 0 and torch.cuda.is_available() else "cpu") with torch.no_grad(): for i, (data, target) in enumerate(test_loader): out = model(data) pred = out.argmax(dim=1, keepdim=True) pred, target = pred.to(device), target.to(device) correct = pred.eq(target.view_as(pred)).sum().item() correct_sum += correct print(f"Accuracy {correct_sum / len(test_loader.dataset)}") # Main loop for trainers. def run_worker(rank, world_size, num_gpus, train_loader, test_loader): print(f"Worker rank {rank} initializing RPC") rpc.init_rpc( name=f"trainer_{rank}", rank=rank, world_size=world_size) print(f"Worker {rank} done initializing RPC") run_training_loop(rank, num_gpus, train_loader, test_loader) rpc.shutdown() # --------- Launcher -------------------- if __name__ == '__main__': parser = argparse.ArgumentParser( description="Parameter-Server RPC based training") parser.add_argument( "--world_size", type=int, default=4, help="""Total number of participating processes. Should be the sum of master node and all training nodes.""") parser.add_argument( "--rank", type=int, default=None, help="Global rank of this process. Pass in 0 for master.") parser.add_argument( "--num_gpus", type=int, default=0, help="""Number of GPUs to use for training, currently supports between 0 and 2 GPUs. Note that this argument will be passed to the parameter servers.""") parser.add_argument( "--master_addr", type=str, default="localhost", help="""Address of master, will default to localhost if not provided. Master must be able to accept network traffic on the address + port.""") parser.add_argument( "--master_port", type=str, default="29500", help="""Port that master is listening on, will default to 29500 if not provided. Master must be able to accept network traffic on the host and port.""") args = parser.parse_args() assert args.rank is not None, "must provide rank argument." assert args.num_gpus <= 3, f"Only 0-2 GPUs currently supported (got {args.num_gpus})." os.environ['MASTER_ADDR'] = args.master_addr os.environ['MASTER_PORT'] = args.master_port processes = [] world_size = args.world_size # Note that Linux uses "fork" by default, which may cause deadlock. # Besides, cuda doesn't support "fork" and Windows only supports "spawn" mp.set_start_method("spawn") if args.rank == 0: p = mp.Process(target=run_parameter_server, args=(0, world_size)) p.start() processes.append(p) else: # Get data to train on train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=32, shuffle=True) # start training worker on this node p = mp.Process( target=run_worker, args=( args.rank, world_size, args.num_gpus, train_loader, test_loader)) p.start() processes.append(p) for p in processes: p.join()
import argparse import gymnasium as gym import numpy as np import os from itertools import count import torch import torch.distributed.rpc as rpc import torch.multiprocessing as mp import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote from torch.distributions import Categorical TOTAL_EPISODE_STEP = 5000 AGENT_NAME = "agent" OBSERVER_NAME = "observer{}" parser = argparse.ArgumentParser(description='PyTorch RPC RL example') parser.add_argument('--world-size', type=int, default=2, metavar='W', help='world size for RPC, rank 0 is the agent, others are observers') parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)') parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 543)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)') args = parser.parse_args() torch.manual_seed(args.seed) def _call_method(method, rref, *args, **kwargs): r""" a helper function to call a method on the given RRef """ return method(rref.local_value(), *args, **kwargs) def _remote_method(method, rref, *args, **kwargs): r""" a helper function to run method on the owner of rref and fetch back the result using RPC """ args = [method, rref] + list(args) return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs) class Policy(nn.Module): r""" Borrowing the ``Policy`` class from the Reinforcement Learning example. Copying the code to make these two examples independent. See https://github.com/pytorch/examples/tree/main/reinforcement_learning """ def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.dropout = nn.Dropout(p=0.6) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, x): x = self.affine1(x) x = self.dropout(x) x = F.relu(x) action_scores = self.affine2(x) return F.softmax(action_scores, dim=1) class Observer: r""" An observer has exclusive access to its own environment. Each observer captures the state from its environment, and send the state to the agent to select an action. Then, the observer applies the action to its environment and reports the reward to the agent. It is true that CartPole-v1 is a relatively inexpensive environment, and it might be an overkill to use RPC to connect observers and trainers in this specific use case. However, the main goal of this tutorial to how to build an application using the RPC API. Developers can extend the similar idea to other applications with much more expensive environment. """ def __init__(self): self.id = rpc.get_worker_info().id self.env = gym.make('CartPole-v1') self.env.reset(seed=args.seed) def run_episode(self, agent_rref, n_steps): r""" Run one episode of n_steps. Args: agent_rref (RRef): an RRef referencing the agent object. n_steps (int): number of steps in this episode """ state, ep_reward = self.env.reset()[0], 0 for step in range(n_steps): # send the state to the agent to get an action action = _remote_method(Agent.select_action, agent_rref, self.id, state) # apply the action to the environment, and get the reward state, reward, terminated, truncated, _ = self.env.step(action) # report the reward to the agent for training purpose _remote_method(Agent.report_reward, agent_rref, self.id, reward) if terminated or truncated: break class Agent: def __init__(self, world_size): self.ob_rrefs = [] self.agent_rref = RRef(self) self.rewards = {} self.saved_log_probs = {} self.policy = Policy() self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2) self.eps = np.finfo(np.float32).eps.item() self.running_reward = 0 self.reward_threshold = gym.make('CartPole-v1').spec.reward_threshold for ob_rank in range(1, world_size): ob_info = rpc.get_worker_info(OBSERVER_NAME.format(ob_rank)) self.ob_rrefs.append(remote(ob_info, Observer)) self.rewards[ob_info.id] = [] self.saved_log_probs[ob_info.id] = [] def select_action(self, ob_id, state): r""" This function is mostly borrowed from the Reinforcement Learning example. See https://github.com/pytorch/examples/tree/main/reinforcement_learning The main difference is that instead of keeping all probs in one list, the agent keeps probs in a dictionary, one key per observer. NB: no need to enforce thread-safety here as GIL will serialize executions. """ state = torch.from_numpy(state).float().unsqueeze(0) probs = self.policy(state) m = Categorical(probs) action = m.sample() self.saved_log_probs[ob_id].append(m.log_prob(action)) return action.item() def report_reward(self, ob_id, reward): r""" Observers call this function to report rewards. """ self.rewards[ob_id].append(reward) def run_episode(self, n_steps=0): r""" Run one episode. The agent will tell each oberser to run n_steps. """ futs = [] for ob_rref in self.ob_rrefs: # make async RPC to kick off an episode on all observers futs.append( rpc_async( ob_rref.owner(), _call_method, args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps) ) ) # wait until all obervers have finished this episode for fut in futs: fut.wait() def finish_episode(self): r""" This function is mostly borrowed from the Reinforcement Learning example. See https://github.com/pytorch/examples/tree/main/reinforcement_learning The main difference is that it joins all probs and rewards from different observers into one list, and uses the minimum observer rewards as the reward of the current episode. """ # joins probs and rewards from different observers into lists R, probs, rewards = 0, [], [] for ob_id in self.rewards: probs.extend(self.saved_log_probs[ob_id]) rewards.extend(self.rewards[ob_id]) # use the minimum observer reward to calculate the running reward min_reward = min([sum(self.rewards[ob_id]) for ob_id in self.rewards]) self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward # clear saved probs and rewards for ob_id in self.rewards: self.rewards[ob_id] = [] self.saved_log_probs[ob_id] = [] policy_loss, returns = [], [] for r in rewards[::-1]: R = r + args.gamma * R returns.insert(0, R) returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + self.eps) for log_prob, R in zip(probs, returns): policy_loss.append(-log_prob * R) self.optimizer.zero_grad() policy_loss = torch.cat(policy_loss).sum() policy_loss.backward() self.optimizer.step() return min_reward def run_worker(rank, world_size): r""" This is the entry point for all processes. The rank 0 is the agent. All other ranks are observers. """ os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' if rank == 0: # rank0 is the agent rpc.init_rpc(AGENT_NAME, rank=rank, world_size=world_size) agent = Agent(world_size) for i_episode in count(1): n_steps = int(TOTAL_EPISODE_STEP / (args.world_size - 1)) agent.run_episode(n_steps=n_steps) last_reward = agent.finish_episode() if i_episode % args.log_interval == 0: print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format( i_episode, last_reward, agent.running_reward)) if agent.running_reward > agent.reward_threshold: print("Solved! Running reward is now {}!".format(agent.running_reward)) break else: # other ranks are the observer rpc.init_rpc(OBSERVER_NAME.format(rank), rank=rank, world_size=world_size) # observers passively waiting for instructions from agents rpc.shutdown() def main(): mp.spawn( run_worker, args=(args.world_size, ), nprocs=args.world_size, join=True ) if __name__ == '__main__': main()
import os import torch import torch.distributed.autograd as dist_autograd import torch.distributed.rpc as rpc import torch.multiprocessing as mp import torch.optim as optim from torch.distributed.optim import DistributedOptimizer import rnn def _run_trainer(): r""" The trainer creates a distributed RNNModel and a DistributedOptimizer. Then, it performs training using random input data. """ batch = 5 ntoken = 7 ninp = 2 nhid = 3 nindices = 6 nlayers = 4 hidden = ( torch.randn(nlayers, nindices, nhid), torch.randn(nlayers, nindices, nhid) ) model = rnn.RNNModel('ps', ntoken, ninp, nhid, nlayers) # setup distributed optimizer opt = DistributedOptimizer( optim.SGD, model.parameter_rrefs(), lr=0.05, ) criterion = torch.nn.CrossEntropyLoss() def get_next_batch(): for _ in range(5): data = torch.LongTensor(batch, nindices) % ntoken target = torch.LongTensor(batch, ntoken) % nindices yield data, target # train for 10 iterations for epoch in range(10): # create distributed autograd context for data, target in get_next_batch(): with dist_autograd.context() as context_id: hidden[0].detach_() hidden[1].detach_() output, hidden = model(data, hidden) loss = criterion(output, target) # run distributed backward pass dist_autograd.backward(context_id, [loss]) # run distributed optimizer opt.step(context_id) # not necessary to zero grads as each iteration creates a different # distributed autograd context which hosts different grads print("Training epoch {}".format(epoch)) def run_worker(rank, world_size): r""" A wrapper function that initializes RPC, calls the function, and shuts down RPC. """ os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' if rank == 1: rpc.init_rpc("trainer", rank=rank, world_size=world_size) _run_trainer() else: rpc.init_rpc("ps", rank=rank, world_size=world_size) # parameter server does nothing pass # block until all rpcs finish rpc.shutdown() if __name__ == "__main__": world_size = 2 mp.spawn(run_worker, args=(world_size, ), nprocs=world_size, join=True)
import torch import torch.nn as nn import torch.distributed.rpc as rpc from torch.distributed.rpc import RRef def _call_method(method, rref, *args, **kwargs): r""" a helper function to call a method on the given RRef """ return method(rref.local_value(), *args, **kwargs) def _remote_method(method, rref, *args, **kwargs): r""" a helper function to run method on the owner of rref and fetch back the result using RPC """ return rpc.rpc_sync( rref.owner(), _call_method, args=[method, rref] + list(args), kwargs=kwargs ) def _parameter_rrefs(module): r""" Create one RRef for each parameter in the given local module, and return a list of RRefs. """ param_rrefs = [] for param in module.parameters(): param_rrefs.append(RRef(param)) return param_rrefs class EmbeddingTable(nn.Module): r""" Encoding layers of the RNNModel """ def __init__(self, ntoken, ninp, dropout): super(EmbeddingTable, self).__init__() self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(ntoken, ninp) if torch.cuda.is_available(): self.encoder = self.encoder.cuda() nn.init.uniform_(self.encoder.weight, -0.1, 0.1) def forward(self, input): if torch.cuda.is_available(): input = input.cuda() return self.drop(self.encoder(input)).cpu() class Decoder(nn.Module): r""" Decoding layers of the RNNModel """ def __init__(self, ntoken, nhid, dropout): super(Decoder, self).__init__() self.drop = nn.Dropout(dropout) self.decoder = nn.Linear(nhid, ntoken) nn.init.zeros_(self.decoder.bias) nn.init.uniform_(self.decoder.weight, -0.1, 0.1) def forward(self, output): return self.decoder(self.drop(output)) class RNNModel(nn.Module): r""" A distributed RNN model which puts embedding table and decoder parameters on a remote parameter server, and locally holds parameters for the LSTM module. The structure of the RNN model is borrowed from the word language model example. See https://github.com/pytorch/examples/blob/main/word_language_model/model.py """ def __init__(self, ps, ntoken, ninp, nhid, nlayers, dropout=0.5): super(RNNModel, self).__init__() # setup embedding table remotely self.emb_table_rref = rpc.remote(ps, EmbeddingTable, args=(ntoken, ninp, dropout)) # setup LSTM locally self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout) # setup decoder remotely self.decoder_rref = rpc.remote(ps, Decoder, args=(ntoken, nhid, dropout)) def forward(self, input, hidden): # pass input to the remote embedding table and fetch emb tensor back emb = _remote_method(EmbeddingTable.forward, self.emb_table_rref, input) output, hidden = self.rnn(emb, hidden) # pass output to the remote decoder and get the decoded output back decoded = _remote_method(Decoder.forward, self.decoder_rref, output) return decoded, hidden def parameter_rrefs(self): remote_params = [] # get RRefs of embedding table remote_params.extend(_remote_method(_parameter_rrefs, self.emb_table_rref)) # create RRefs for local parameters remote_params.extend(_parameter_rrefs(self.rnn)) # get RRefs of decoder remote_params.extend(_remote_method(_parameter_rrefs, self.decoder_rref)) return remote_params
import torch from torch.fx import symbolic_trace, replace_pattern ''' How to Use the FX Subgraph Rewriter For easy subgraph rewriting, FX exposes the utility function: replace_pattern(gm : GraphModule, pattern : Callable, replacement : Callable) -> None `replace_pattern` matches all possible non-overlapping sets of operators and their data dependencies (`pattern`) in the Graph of a GraphModule (`gm`), then replaces each of these matched subgraphs with another subgraph (`replacement). The docstring for `replace_pattern` (located in `subgraph_rewriter.py`) gives an in-depth explanation as to how `pattern` and `replacement` should be specified, what happens during pattern matching, and other important technical details. This tutorial, therefore, is only meant to give an overview as to the FX Subgraph Rewriter's basic functionality. Let's go rewrite a Graph! ''' # Sample module class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, w1, w2): val1 = torch.neg(w1) m1 = torch.cat([val1, w2]).sum() val2 = torch.neg(w1) m2 = torch.cat([val2, w2]).sum() return x + torch.max(m1) + torch.max(m2) # Symbolically trace an instance of `M` traced = symbolic_trace(M()) # Define the pattern. The FX Subgraph Rewriter will match all # non-overlapping instances of the pattern in the larger graph. # Note that Pattern-matching is done based on data dependencies, # not Node names. Even though we're operating on Nodes named `a1` and # `a2` instead of `w1` and `w2`, the pattern is still a valid match # for the two instances of `torch.cat([w1, w2]).sum()` above. Only # operations that contribute to the single output value of the pattern # are considered def pattern(a1, a2): val1 = torch.neg(a1) return torch.cat([val1, a2]).sum() # Define the replacement (same rules as the pattern) def replacement(w1, w2): return torch.stack([w1, w2]) # Replace `pattern` with `replacement` in `traced` replace_pattern(traced, pattern, replacement) # After calling `replace_pattern`, the generated code is: ''' def forward(self, x, w1, w2): stack = torch.stack([w1, w2]) max_1 = torch.max(stack); stack = None add = x + max_1; x = max_1 = None stack_1 = torch.stack([w1, w2]); w1 = w2 = None max_2 = torch.max(stack_1); stack_1 = None add_1 = add + max_2; add = max_2 = None return add_1 '''
import torch from torch.fx import symbolic_trace, Tracer, Graph, GraphModule, Node from typing import Any, Callable, Dict, Optional, Tuple, Union """ How to Create and Use Custom Tracers `Tracer`--the class that implements the symbolic tracing functionality of `torch.fx.symbolic_trace`--can be subclassed to override various behaviors of the tracing process. In this tutorial, we'll demonstrate how to customize the symbolic tracing process using some handwritten Tracers. Each example will show that, by simply overriding a few methods in the `Tracer` class, you can alter the Graph produced by symbolic tracing. For a complete description of the methods that can be changed, refer to the docstrings of the methods in the Tracer class. Information can be found at: https://pytorch.org/docs/master/fx.html#torch.fx.Tracer If you want a real-world example of a custom tracer, check out FX's AST Rewriter in `rewriter.py`. `RewritingTracer` inherits from Tracer but overrides the `trace` function so that we can rewrite all calls to `assert` to the more FX-friendly `torch.assert`. Note that a call to `symbolic_trace(m)` is equivalent to `GraphModule(m, Tracer().trace(m))`. (`Tracer` is the default implementation of Tracer as defined in `symbolic_trace.py`.) """ """ Custom Tracer #1: Trace Through All `torch.nn.ReLU` Submodules During symbolic tracing, some submodules are traced through and their constituent ops are recorded; other submodules appear as an atomic "call_module" Node in the IR. A module in this latter category is called a "leaf module". By default, all modules in the PyTorch standard library (`torch.nn`) are leaf modules. We can change this by creating a custom Tracer and overriding `is_leaf_module`. In this case, we'll keep the default behavior for all `torch.nn` Modules except for `ReLU`. """ class M1(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) default_traced: GraphModule = symbolic_trace(M1()) """ Tracing with the default tracer and calling `print_tabular` produces: opcode name target args kwargs ----------- ------ -------- --------- -------- placeholder x x () {} call_module relu_1 relu (x,) {} output output output (relu_1,) {} """ default_traced.graph.print_tabular() class LowerReluTracer(Tracer): def is_leaf_module(self, m : torch.nn.Module, qualname : str): if isinstance(m, torch.nn.ReLU): return False return super().is_leaf_module(m, qualname) """ Tracing with our custom tracer and calling `print_tabular` produces: opcode name target args kwargs ------------- ------ --------------------------------- --------- ------------------ placeholder x x () {} call_function relu_1 <function relu at 0x7f66f7170b80> (x,) {'inplace': False} output output output (relu_1,) {} """ lower_relu_tracer = LowerReluTracer() custom_traced_graph: Graph = lower_relu_tracer.trace(M1()) custom_traced_graph.print_tabular() """ Custom Tracer #2: Add an Extra Attribute to Each Node Here, we'll override `create_node` so that we can add a new attribute to each Node during its creation """ class M2(torch.nn.Module): def forward(self, a, b): return a + b class TaggingTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Any], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: n = super().create_node(kind, target, args, kwargs, name) n.tag = "foo" return n custom_traced_graph: Graph = TaggingTracer().trace(M2()) def assert_all_nodes_have_tags(g: Graph) -> bool: for n in g.nodes: if not hasattr(n, "tag") or not n.tag == "foo": return False return True # Prints "True" print(assert_all_nodes_have_tags(custom_traced_graph))
import torch from torch.fx import symbolic_trace import operator """ How to Replace One Op With Another 1. Iterate through all Nodes in your GraphModule's Graph. 2. Determine if the current Node should be replaced. (Suggested: match on the Node's ``target`` attribute). 3. Create a replacement Node and add it to the Graph. 4. Use the FX built-in ``replace_all_uses_with`` to replace all uses of the current Node with the replacement. 5. Delete the old Node from the graph. 6. Call ``recompile`` on the GraphModule. This updates the generated Python code to reflect the new Graph state. Currently, FX does not provide any way to guarantee that replaced operators are syntactically valid. It's up to the user to confirm that any new operators will work with the existing operands. The following code demonstrates an example of replacing any instance of addition with a bitwise AND. To examine how the Graph evolves during op replacement, add the statement `print(traced.graph)` after the line you want to inspect. Alternatively, call `traced.graph.print_tabular()` to see the IR in a tabular format. """ # Sample module class M(torch.nn.Module): def forward(self, x, y): return x + y, torch.add(x, y), x.add(y) # Symbolically trace an instance of the module traced = symbolic_trace(M()) # As demonstrated in the above example, there are several different ways # to denote addition. The possible cases are: # 1. `x + y` - A `call_function` Node with target `operator.add`. # We can match for equality on that `operator.add` directly. # 2. `torch.add(x, y)` - A `call_function` Node with target # `torch.add`. Similarly, we can match this function directly. # 3. `x.add(y)` - The Tensor method call, whose target we can match # as a string. patterns = set([operator.add, torch.add, "add"]) # Go through all the nodes in the Graph for n in traced.graph.nodes: # If the target matches one of the patterns if any(n.target == pattern for pattern in patterns): # Set the insert point, add the new node, and replace all uses # of `n` with the new node with traced.graph.inserting_after(n): new_node = traced.graph.call_function(torch.bitwise_and, n.args, n.kwargs) n.replace_all_uses_with(new_node) # Remove the old node from the graph traced.graph.erase_node(n) # Don't forget to recompile! traced.recompile()
from enum import Enum, auto import torch from torch.fx import GraphModule, Node, Proxy, symbolic_trace ''' Wrap Graph Output Dynamically The following code demonstrates how change an existing Graph based on parameters specified at runtime. We'll let the user specify an activation function from a predefined Enum list, then we'll symbolically trace it. Next, we'll create a Proxy from the last operation in the Graph. We'll call our traced activation function with this Proxy and insert the ``output`` Node from that call into our Graph. (This final step will automatically inline the entire traced function.) ''' # Sample module class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): y = torch.cat([x, y]) return y # Symbolically trace an instance of `M` traced = symbolic_trace(M()) # Selected activation functions class ActivationFunction(Enum): RELU = auto() LEAKY_RELU = auto() PRELU = auto() # Map activation function names to their implementation activation_functions = { ActivationFunction.RELU: torch.nn.ReLU(), ActivationFunction.LEAKY_RELU: torch.nn.LeakyReLU(), ActivationFunction.PRELU: torch.nn.PReLU(), } def wrap_in_activation_function(m: GraphModule, fn: ActivationFunction) -> GraphModule: # Get output node output_node: Optional[Node] = None for n in reversed(m.graph.nodes): if n.op == "output": output_node = n break assert output_node # Get the actual output (the "input" of the output node). This is # the Node we want to wrap in a user-specified activation function assert len(output_node.all_input_nodes) == 1 wrap_node = output_node.all_input_nodes[0] # Wrap the actual output in a Proxy wrap_proxy = Proxy(wrap_node) # Get the implementation of the specified activation function and # symbolically trace it fn_impl = activation_functions[fn] fn_impl_traced = symbolic_trace(fn_impl) # Call the specified activation function using the Proxy wrapper for # `output_op`. The result of this call is another Proxy, which we # can hook into our existing Graph. with traced.graph.inserting_after(wrap_node): fn_impl_output_node = fn_impl_traced(wrap_proxy) new_args = (fn_impl_output_node.node,) output_node.args = new_args m.recompile() # Example call x, y = torch.randn(5, 3), torch.randn(5, 3) orig_output = traced(x, y) wrap_in_activation_function(traced, ActivationFunction.LEAKY_RELU) new_output = traced(x, y) torch.testing.assert_close(new_output, torch.nn.LeakyReLU()(orig_output))
""" This file demonstrates using a custom FX Tracer to override the behavior of `torch.autograd.profiler.record_function` and make profiler ranges appear in FX-traced code. This is done with Python dynamic patching magic, allowing us to explicitly emit calls to `torch.ops.profiler._record_function_enter/_record_function_exit`. Please note that before https://github.com/pytorch/pytorch/pull/65180 lands, these ranges may be elimineated by `Graph.eliminate_dead_code` """ import torch import torch.fx # Setup: a module with `record_function` class Foo(torch.nn.Module): def forward(self, x): with torch.profiler.record_function('foo'): return torch.relu(x) f = Foo() x = torch.randn(5, 3, 2) with torch.autograd.profiler.profile() as prof: f(x) print(prof) # "foo" range is correctly recorded with normal execution """ ------------------- ------------ ------------ ------------ ------------ ------------ ------------ Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls ------------------- ------------ ------------ ------------ ------------ ------------ ------------ aten::zeros 6.10% 10.298us 10.04% 16.943us 16.943us 1 aten::empty 2.88% 4.857us 2.88% 4.857us 4.857us 1 aten::zero_ 1.06% 1.788us 1.06% 1.788us 1.788us 1 foo 21.28% 35.925us 89.96% 151.888us 151.888us 1 aten::empty 11.59% 19.572us 11.59% 19.572us 19.572us 1 aten::relu 23.81% 40.203us 57.09% 96.391us 96.391us 1 aten::clamp_min 3.87% 6.539us 33.28% 56.188us 56.188us 1 aten::empty 1.09% 1.847us 1.09% 1.847us 1.847us 1 aten::clamp_min 28.31% 47.802us 28.31% 47.802us 47.802us 1 ------------------- ------------ ------------ ------------ ------------ ------------ ------------ Self CPU time total: 168.831us """ traced = torch.fx.symbolic_trace(f) with torch.autograd.profiler.profile() as prof: traced(x) print(prof) # "foo" range is not recorded with FX tracing """ ------------------- ------------ ------------ ------------ ------------ ------------ ------------ Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls ------------------- ------------ ------------ ------------ ------------ ------------ ------------ aten::relu 23.50% 10.618us 100.00% 45.186us 45.186us 1 aten::clamp_min 18.05% 8.154us 76.50% 34.568us 34.568us 1 aten::empty 11.77% 5.317us 11.77% 5.317us 5.317us 1 aten::clamp_min 46.69% 21.097us 46.69% 21.097us 21.097us 1 ------------------- ------------ ------------ ------------ ------------ ------------ ------------ Self CPU time total: 45.186us """ class ProfilerTracer(torch.fx.Tracer): def trace(self, root, concrete_args=None): orig_record_function_enter = torch.autograd.profiler.record_function.__enter__ orig_record_function_exit = torch.autograd.profiler.record_function.__exit__ def fake_profiler_enter(_self): nonlocal self handle_proxy = self.create_proxy( kind='call_function', target=torch.ops.profiler._record_function_enter, args=(_self.name,), kwargs={}) assert getattr(_self, '_fx_profiler_ctx', None) is None setattr(_self, '_fx_profiler_ctx', handle_proxy) return handle_proxy def fake_profiler_exit(_self, exc_type, exc_value, traceback): assert hasattr(_self, '_fx_profiler_ctx') handle_proxy = _self._fx_profiler_ctx torch.ops.profiler._record_function_exit(handle_proxy) setattr(_self, '_fx_profiler_ctx', None) torch.autograd.profiler.record_function.__enter__ = fake_profiler_enter torch.autograd.profiler.record_function.__exit__ = fake_profiler_exit try: return super().trace(root, concrete_args) finally: torch.autograd.profiler.record_function.__enter__ = orig_record_function_enter torch.autograd.profiler.record_function.__exit__ = orig_record_function_exit pt = ProfilerTracer() graph_with_profiler = pt.trace(f) traced_with_profiler = torch.fx.GraphModule(pt.root, graph_with_profiler) with torch.autograd.profiler.profile() as prof: traced_with_profiler(x) print(prof) # "foo" range is recorded with special tracer behavior """ ------------------- ------------ ------------ ------------ ------------ ------------ ------------ Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls ------------------- ------------ ------------ ------------ ------------ ------------ ------------ foo 19.76% 39.928us 100.00% 202.055us 202.055us 1 aten::empty 3.93% 7.950us 3.93% 7.950us 7.950us 1 aten::relu 33.79% 68.282us 76.30% 154.177us 154.177us 1 aten::clamp_min 27.32% 55.198us 42.51% 85.895us 85.895us 1 aten::empty 1.28% 2.585us 1.28% 2.585us 2.585us 1 aten::clamp_min 13.91% 28.112us 13.91% 28.112us 28.112us 1 ------------------- ------------ ------------ ------------ ------------ ------------ ------------ Self CPU time total: 202.055us """
""" Recording Module Hierarchy With a Custom Tracer In this example, we are going to define a custom `fx.Tracer` instance that-- for each recorded operation--also notes down the qualified name of the module from which that operation originated. The _qualified name_ is the path to the Module from the root module. More information about this concept can be found in the documentation for `Module.get_submodule`: https://github.com/pytorch/pytorch/blob/9f2aea7b88f69fc74ad90b1418663802f80c1863/torch/nn/modules/module.py#L385 """ import torch import torch.fx from typing import Any, Callable, Dict, Optional, Tuple class ModulePathTracer(torch.fx.Tracer): """ ModulePathTracer is an FX tracer that--for each operation--also records the qualified name of the Module from which the operation originated. """ # The current qualified name of the Module being traced. The top-level # module is signified by empty string. This is updated when entering # call_module and restored when exiting call_module current_module_qualified_name : str = '' # A map from FX Node to the qualname of the Module from which it # originated. This is recorded by `create_proxy` when recording an # operation node_to_originating_module : Dict[torch.fx.Node, str] = {} def call_module(self, m: torch.nn.Module, forward: Callable[..., Any], args : Tuple[Any, ...], kwargs : Dict[str, Any]) -> Any: """ Override of Tracer.call_module (see https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.call_module). This override: 1) Stores away the qualified name of the caller for restoration later 2) Installs the qualified name of the caller in `current_module_qualified_name` for retrieval by `create_proxy` 3) Delegates into the normal Tracer.call_module method 4) Restores the caller's qualified name into current_module_qualified_name """ old_qualname = self.current_module_qualified_name try: self.current_module_qualified_name = self.path_of_module(m) return super().call_module(m, forward, args, kwargs) finally: self.current_module_qualified_name = old_qualname def create_proxy(self, kind: str, target: torch.fx.node.Target, args: Tuple[Any, ...], kwargs: Dict[str, Any], name: Optional[str] = None, type_expr: Optional[Any] = None): """ Override of `Tracer.create_proxy`. This override intercepts the recording of every operation and stores away the current traced module's qualified name in `node_to_originating_module` """ proxy = super().create_proxy(kind, target, args, kwargs, name, type_expr) self.node_to_originating_module[proxy.node] = self.current_module_qualified_name return proxy # Testing: let's see how this works on a torchvision ResNet18 model import torchvision.models as models # Model under test rn18 = models.resnet18() # Instantiate our ModulePathTracer and use that to trace our ResNet18 tracer = ModulePathTracer() traced_rn18 = tracer.trace(rn18) # Print (node, module qualified name) for every node in the Graph for node in traced_rn18.nodes: module_qualname = tracer.node_to_originating_module.get(node) print('Node', node, 'is from module', module_qualname) """ Node x is from module Node conv1 is from module conv1 Node bn1 is from module bn1 Node relu is from module relu Node maxpool is from module maxpool Node layer1_0_conv1 is from module layer1.0.conv1 Node layer1_0_bn1 is from module layer1.0.bn1 Node layer1_0_relu is from module layer1.0.relu Node layer1_0_conv2 is from module layer1.0.conv2 Node layer1_0_bn2 is from module layer1.0.bn2 Node add is from module layer1.0 Node layer1_0_relu_1 is from module layer1.0.relu Node layer1_1_conv1 is from module layer1.1.conv1 Node layer1_1_bn1 is from module layer1.1.bn1 Node layer1_1_relu is from module layer1.1.relu Node layer1_1_conv2 is from module layer1.1.conv2 Node layer1_1_bn2 is from module layer1.1.bn2 Node add_1 is from module layer1.1 Node layer1_1_relu_1 is from module layer1.1.relu Node layer2_0_conv1 is from module layer2.0.conv1 Node layer2_0_bn1 is from module layer2.0.bn1 Node layer2_0_relu is from module layer2.0.relu Node layer2_0_conv2 is from module layer2.0.conv2 Node layer2_0_bn2 is from module layer2.0.bn2 Node layer2_0_downsample_0 is from module layer2.0.downsample.0 Node layer2_0_downsample_1 is from module layer2.0.downsample.1 Node add_2 is from module layer2.0 Node layer2_0_relu_1 is from module layer2.0.relu Node layer2_1_conv1 is from module layer2.1.conv1 Node layer2_1_bn1 is from module layer2.1.bn1 Node layer2_1_relu is from module layer2.1.relu Node layer2_1_conv2 is from module layer2.1.conv2 Node layer2_1_bn2 is from module layer2.1.bn2 Node add_3 is from module layer2.1 Node layer2_1_relu_1 is from module layer2.1.relu Node layer3_0_conv1 is from module layer3.0.conv1 Node layer3_0_bn1 is from module layer3.0.bn1 Node layer3_0_relu is from module layer3.0.relu Node layer3_0_conv2 is from module layer3.0.conv2 Node layer3_0_bn2 is from module layer3.0.bn2 Node layer3_0_downsample_0 is from module layer3.0.downsample.0 Node layer3_0_downsample_1 is from module layer3.0.downsample.1 Node add_4 is from module layer3.0 Node layer3_0_relu_1 is from module layer3.0.relu Node layer3_1_conv1 is from module layer3.1.conv1 Node layer3_1_bn1 is from module layer3.1.bn1 Node layer3_1_relu is from module layer3.1.relu Node layer3_1_conv2 is from module layer3.1.conv2 Node layer3_1_bn2 is from module layer3.1.bn2 Node add_5 is from module layer3.1 Node layer3_1_relu_1 is from module layer3.1.relu Node layer4_0_conv1 is from module layer4.0.conv1 Node layer4_0_bn1 is from module layer4.0.bn1 Node layer4_0_relu is from module layer4.0.relu Node layer4_0_conv2 is from module layer4.0.conv2 Node layer4_0_bn2 is from module layer4.0.bn2 Node layer4_0_downsample_0 is from module layer4.0.downsample.0 Node layer4_0_downsample_1 is from module layer4.0.downsample.1 Node add_6 is from module layer4.0 Node layer4_0_relu_1 is from module layer4.0.relu Node layer4_1_conv1 is from module layer4.1.conv1 Node layer4_1_bn1 is from module layer4.1.bn1 Node layer4_1_relu is from module layer4.1.relu Node layer4_1_conv2 is from module layer4.1.conv2 Node layer4_1_bn2 is from module layer4.1.bn2 Node add_7 is from module layer4.1 Node layer4_1_relu_1 is from module layer4.1.relu Node avgpool is from module avgpool Node flatten is from module Node fc is from module fc Node output is from module None """
import torch import torch.fx """ In this example we are going do define a library of "composite" operations. Composite operations are those that are defined as callable functions that are composed of several other operations in their implementation. Composite operations allow you to choose at what level of abstraction you want to interpret/manipulate the code. We show that we can provide a function to inline these functions as well as use a custom Tracer to auto- matically inline such functions. Composite operations can be useful for exposing higher- level context to a backend/transform while still maintaining the ability to examine things at a more fine-grained level. """ def sigmoid_lowp(x : torch.Tensor): x = x.float() x = x.sigmoid() return x.half() # wrap() indicates that the passed-in function should always # be recorded as a call_function node rather than being traced # through. Later, we will see how we can: # a. Inline the implementation of such a function and # b. Define a tracer that automatically traces through such a function torch.fx.wrap(sigmoid_lowp) def add_lowp(a : torch.Tensor, b : torch.Tensor): a, b = a.float(), b.float() c = a + b return c.half() torch.fx.wrap(add_lowp) # Let's see what happens when we symbolically trace through some code # that uses these functions class Foo(torch.nn.Module): def forward(self, x, y): x = sigmoid_lowp(x) y = sigmoid_lowp(y) return add_lowp(x, y) traced = torch.fx.symbolic_trace(Foo()) print(traced.code) """ def forward(self, x, y): sigmoid_lowp = __main___sigmoid_lowp(x); x = None sigmoid_lowp_1 = __main___sigmoid_lowp(y); y = None add_lowp = __main___add_lowp(sigmoid_lowp, sigmoid_lowp_1); sigmoid_lowp = sigmoid_lowp_1 = None return add_lowp """ # Notice that the calls to `sigmoid_lowp` and `add_lowp` # appear literally in the trace; they are not traced through # ***** Inlining calls ***** # Now let's define a function that allows for inlining these calls # during graph manipulation. def inline_lowp_func(n : torch.fx.Node): # If we find a call to a function in our "lowp" module, inline it if n.op == 'call_function' and n.target.__module__ == inline_lowp_func.__module__: # We want to insert the operations comprising the implementation of the # function before the function itself. Then, we can swap the output value # of the function call with the output value for its implementation nodes tracer = torch.fx.proxy.GraphAppendingTracer(n.graph) with n.graph.inserting_before(n): # We can inline code by using `fx.Proxy` instances. # map_arg traverses all aggregate types and applies the given function # to Node instances in the data structure. In this case, we are applying # the fx.Proxy constructor. proxy_args = torch.fx.node.map_arg(n.args, lambda x: torch.fx.Proxy(x, tracer)) proxy_kwargs = torch.fx.node.map_arg(n.kwargs, lambda x: torch.fx.Proxy(x, tracer)) # Call the function itself with proxy arguments. This will emit # nodes in the graph corresponding to the operations in the im- # plementation of the function output_proxy = n.target(*proxy_args, **proxy_kwargs) # Now replace the original node's uses with the output node of # the implementation. node.replace_all_uses_with(output_proxy.node) # Delete the old node node.graph.erase_node(node) for node in traced.graph.nodes: if node.op == 'call_function' and node.target is sigmoid_lowp: inline_lowp_func(node) # Don't forget to recompile after graph manipulation traced.recompile() print(traced.code) """ def forward(self, x, y): float_1 = x.float(); x = None sigmoid = float_1.sigmoid(); float_1 = None half = sigmoid.half(); sigmoid = None float_2 = y.float(); y = None sigmoid_1 = float_2.sigmoid(); float_2 = None half_1 = sigmoid_1.half(); sigmoid_1 = None add_lowp = __main___add_lowp(half, half_1); half = half_1 = None return add_lowp """ # At this point, the implementation of `sigmoid_lowp` has been substituted # in for all of the calls to that function. # ***** Inlining calls during tracing ***** # Now we are going to define a custom tracer that can selectively inline # calls to certain composite operations on-the-fly. # New instance of our module f = Foo() class InliningTracer(torch.fx.Tracer): FNS_TO_INLINE = [add_lowp] def create_node(self, kind, target, args, kwargs, name=None, type_expr=None): if kind == 'call_function' and target in self.FNS_TO_INLINE: tracer = torch.fx.proxy.GraphAppendingTracer(self.graph) # Trace through the implementation of the function rather than # create a node proxy_args = torch.fx.node.map_arg(args, lambda x: torch.fx.Proxy(x, tracer)) proxy_kwargs = torch.fx.node.map_arg(kwargs, lambda x: torch.fx.Proxy(x, tracer)) return target(*proxy_args, **proxy_kwargs).node else: return super().create_node(kind, target, args, kwargs, name, type_expr) tracer = InliningTracer() graph = tracer.trace(f) module = torch.fx.GraphModule(f, graph) print(module.code) """ def forward(self, x, y): sigmoid_lowp = __main___sigmoid_lowp(x); x = None sigmoid_lowp_1 = __main___sigmoid_lowp(y); y = None float_1 = sigmoid_lowp.float(); sigmoid_lowp = None float_2 = sigmoid_lowp_1.float(); sigmoid_lowp_1 = None add = float_1 + float_2; float_1 = float_2 = None half = add.half(); add = None return half """ # As you can see, the implementation for `add_lowp` has been # inlined in the course of tracing with our InliningTracer. # Such functionality can be used to, for example, implement # a backend that wants to see the lowered form of some operations # but the high-level form of another. # ***** Future direction ***** # # We may define an API, such as `Tracer.is_leaf_function`, that # Tracer implementers can use to more easily specify the inlining # behavior implemented in InliningTracer. Such a method would return # True by default, but a Tracer can override it and return `False` for # functions the Tracer wants to be traced through.
import torch import torch.fx as fx # An inverse mapping is one that takes a function f(x) and returns a function g # such that f(g(x)) == x. For example,since log(exp(x)) == x, exp and log are # inverses. invert_mapping = {} def add_inverse(a, b): invert_mapping[a] = b invert_mapping[b] = a inverses = [ (torch.sin, torch.arcsin), (torch.cos, torch.arccos), (torch.tan, torch.arctan), (torch.exp, torch.log), ] for a, b in inverses: add_inverse(a, b) # The general strategy is that we walk the graph backwards, transforming each # node into its inverse. To do so, we swap the outputs and inputs of the # functions, and then we look up its inverse in `invert_mapping`. Note that # this transform assumes that all operations take in only one input and return # one output. def invert(model: torch.nn.Module) -> torch.nn.Module: fx_model = fx.symbolic_trace(model) new_graph = fx.Graph() # As we're building up a new graph env = {} for node in reversed(fx_model.graph.nodes): if node.op == 'call_function': # This creates a node in the new graph with the inverse function, # and passes `env[node.name]` (i.e. the previous output node) as # input. new_node = new_graph.call_function(invert_mapping[node.target], (env[node.name],)) env[node.args[0].name] = new_node elif node.op == 'output': # We turn the output into an input placeholder new_node = new_graph.placeholder(node.name) env[node.args[0].name] = new_node elif node.op == 'placeholder': # We turn the input placeholder into an output new_graph.output(env[node.name]) else: raise RuntimeError("Not implemented") new_graph.lint() return fx.GraphModule(fx_model, new_graph) def f(x): return torch.exp(torch.tan(x)) res = invert(f) print(res.code) """ def forward(self, output): log_1 = torch.log(output); output = None arctan_1 = torch.arctan(log_1); log_1 = None return arctan_1 """ print(f(res((torch.arange(5) + 1)))) # [1., 2., 3., 4, 5.]
import torch from torch.fx import Proxy, Graph, GraphModule ''' How to Create a Graph Using Proxy Objects Instead of Tracing It's possible to directly create a Proxy object around a raw Node. This can be used to create a Graph independently of symbolic tracing. The following code demonstrates how to use Proxy with a raw Node to append operations to a fresh Graph. We'll create two parameters (``x`` and ``y``), perform some operations on those parameters, then add everything we created to the new Graph. We'll then wrap that Graph in a GraphModule. Doing so creates a runnable instance of ``nn.Module`` where previously-created operations are represented in the Module's ``forward`` function. By the end of the tutorial, we'll have added the following method to an empty ``nn.Module`` class. .. code-block:: python def forward(self, x, y): cat_1 = torch.cat([x, y]); x = y = None tanh_1 = torch.tanh(cat_1); cat_1 = None neg_1 = torch.neg(tanh_1); tanh_1 = None return neg_1 ''' # Create a graph independently of symbolic tracing graph = Graph() tracer = torch.fx.proxy.GraphAppendingTracer(graph) # Create raw Nodes raw1 = graph.placeholder('x') raw2 = graph.placeholder('y') # Initialize Proxies using the raw Nodes and graph's default tracer y = Proxy(raw1, tracer) z = Proxy(raw2, tracer) # y = Proxy(raw1) # z = Proxy(raw2) # Create other operations using the Proxies `y` and `z` a = torch.cat([y, z]) b = torch.tanh(a) c = torch.neg(b) # By using the graph's own appending tracer to create Proxies, # notice we can now use n-ary operators on operations without # multiple tracers being created at run-time (line 52) which leads # to errors # To try this out for yourself, replace lines 42, 43 # with 44, 45 z = torch.add(b, c) # Create a new output Node and add it to the Graph. By doing this, the # Graph will contain all the Nodes we just created (since they're all # linked to the output Node) graph.output(c.node) # Wrap our created Graph in a GraphModule to get a final, runnable # `nn.Module` instance mod = GraphModule(torch.nn.Module(), graph)
import torch from torch.fx import Proxy, symbolic_trace from torch.fx.node import map_arg ''' How to Inline a Function Into an Existing Graph One reason you might want to inline a function is to get around FX's default tracing behavior. For example, unless you've defined a custom Tracer, the out-of-the-box implementation of ``symbolic_trace`` causes references to ``torch.nn`` module instances to appear as ``call_module`` calls rather than being traced through. Let's say this behavior is almost what you need; the only problem is that there's a single module call that you want to replace with an inlined trace of the function. Creating a custom Tracer would be too much. Instead, you can accomplish this using Proxies. The following code demonstrates how to trace a module and inline it into an existing Graph using Proxy. We'll trace our Graph, then iterate through its Nodes until we find the right place to swap out the ``call_module`` Node with an inlined trace. At that point, we'll create Proxies from the Node's args and kwargs. Finally, we'll call the function we want to replace with those Proxies--which will, in essence, "trace" that function. Finally, we'll insert the result of that call into our Graph. (This last step will automatically inline the function.) ''' # Sample module class M(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) + 1.0 # Symbolically trace an instance of `M`. After tracing, `self.relu` is # represented as a `call_module` Node. The full operation in the # generated `forward` function's code will appear as `self.relu(x)` m = symbolic_trace(M()) # Insert nodes from the ReLU graph in place of the original call to # `self.relu` # create a graph-appending tracer pointing to the original graph tracer = torch.fx.proxy.GraphAppendingTracer(m.graph) for node in m.graph.nodes: # Find `call_module` Node in `m` that corresponds to `self.relu`. # This is the Node we want to swap out for an inlined version of the # same call if (node.op, node.target) == ("call_module", "relu"): with m.graph.inserting_before(node): # Create a Proxy from each Node in the current Node's # args/kwargs proxy_args = map_arg(node.args, lambda n: Proxy(n, tracer)) proxy_kwargs = map_arg(node.kwargs, lambda n: Proxy(n, tracer)) # Call `m.relu` with the newly-created Proxy arguments. # `m.relu` is the generic version of the function; by # calling it with Proxies created from Nodes in `m`, we're # emitting Nodes that reference exiting values in the IR. # The result of this call is another Proxy, which we can # hook into our existing Graph to complete the function # inlining. proxy_output = m.relu(*proxy_args, **proxy_kwargs) # Replace the relu `call_module` node with the inlined # version of the function node.replace_all_uses_with(proxy_output.node) # Make sure that the old relu Node is erased m.graph.erase_node(node)
import torch import torch.fx import operator # Does this path not exist? Check that you've done the following: # 1) Read README.md and follow the instructions to build libinterpreter. # 2) If this file still does not exist after you've followed those instructions, # check if it is under a different extension (e.g. `dylib` on mac or `dll` on # windows). torch.classes.load_library('build/libinterpreter.so') # This is what a lowering pass should look like: a function that takes # a valid nn.Module, symbolically traces it, lowers the Module to some # representation, and wraps that representation up into another # nn.Module instance that handles dispatch to the compiled/lowered code. # This will ensure that this lowering transformation still fits into the # PyTorch programming model and enables features like composing with other # transformations and TorchScript compilation. def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module: # ===== Stage 1: Symbolic trace the module ===== mod = torch.fx.symbolic_trace(orig_mod) # ===== Stage 2: Lower GraphModule representation to the C++ # interpreter's instruction format ====== instructions = [] constant_idx = 0 constants = {} fn_input_names = [] target_to_name = { operator.add : "add", operator.mul : "mul" } output_node : Optional[torch.fx.Node] = None # For each instruction, create a triple # (instruction_name : str, inputs : List[str], output : str) # to feed into the C++ interpreter for n in mod.graph.nodes: target, args, out_name = n.target, n.args, n.name assert len(n.kwargs) == 0, "kwargs currently not supported" if n.op == 'placeholder': # Placeholders specify function argument names. Save these # for later when we generate the wrapper GraphModule fn_input_names.append(target) elif n.op == 'call_function': assert target in target_to_name, "Unsupported call target " + target arg_names = [] for arg in args: if not isinstance(arg, torch.fx.Node): # Pull out constants. These constants will later be # fed to the interpreter C++ object via add_constant() arg_name = f'constant_{constant_idx}' constants[arg_name] = torch.Tensor( [arg] if isinstance(arg, numbers.Number) else arg) arg_names.append(arg_name) constant_idx += 1 else: arg_names.append(arg.name) instructions.append((target_to_name[target], arg_names, out_name)) elif n.op == 'output': if output_node is not None: raise RuntimeError('Multiple output nodes!') output_node = n else: raise RuntimeError('Unsupported opcode ' + n.op) interpreter = torch.classes.NativeInterpretation.ElementwiseInterpreter() # Load constants for k, v in constants.items(): interpreter.add_constant(k, v) # Specify names for positional input arguments interpreter.set_input_names(fn_input_names) # Load instructions interpreter.set_instructions(instructions) # Specify name for single output assert isinstance(output_node.args[0], torch.fx.Node) interpreter.set_output_name(output_node.args[0].name) # ===== Stage 3: Create a wrapper GraphModule around the interpreter ===== class WrapperModule(torch.nn.Module): def __init__(self, interpreter): super().__init__() self.interpreter = interpreter wrapper = WrapperModule(interpreter) # Create a forward() function that is compatible with TorchScript compilation. # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter # 3) Returns the specified return value graph = torch.fx.Graph() # Add placeholders for fn inputs placeholder_nodes = [] for name in fn_input_names: placeholder_nodes.append(graph.create_node('placeholder', name)) # Get the interpreter object interpreter_node = graph.create_node('get_attr', 'interpreter') # Add a node to call the interpreter instance output_node = graph.create_node( op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes)) # Register output graph.output(output_node) graph.lint(wrapper) # Return final GraphModule!!! return torch.fx.GraphModule(wrapper, graph) class MyElementwiseModule(torch.nn.Module): def forward(self, x, y): return x * y + y mem = MyElementwiseModule() lowered = lower_to_elementwise_interpreter(mem) print(lowered.code) # The lowered module can also be compiled into TorchScript scripted = torch.jit.script(lowered) print(scripted.graph) # Stress test correctness for _ in range(50): x, y = torch.randn(10, 20, 30), torch.randn(10, 20, 30) torch.testing.assert_allclose(lowered(x, y), mem(x, y)) torch.testing.assert_allclose(scripted(x, y), mem(x, y))
import torch import torch.nn as nn import torch.nn.init as init class Net(nn.Module): def __init__(self, upscale_factor): super(Net, self).__init__() self.relu = nn.ReLU() self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) self.pixel_shuffle = nn.PixelShuffle(upscale_factor) self._initialize_weights() def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) x = self.relu(self.conv3(x)) x = self.pixel_shuffle(self.conv4(x)) return x def _initialize_weights(self): init.orthogonal_(self.conv1.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv2.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv3.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv4.weight)
import torch.utils.data as data from os import listdir from os.path import join from PIL import Image def is_image_file(filename): return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"]) def load_img(filepath): img = Image.open(filepath).convert('YCbCr') y, _, _ = img.split() return y class DatasetFromFolder(data.Dataset): def __init__(self, image_dir, input_transform=None, target_transform=None): super(DatasetFromFolder, self).__init__() self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)] self.input_transform = input_transform self.target_transform = target_transform def __getitem__(self, index): input = load_img(self.image_filenames[index]) target = input.copy() if self.input_transform: input = self.input_transform(input) if self.target_transform: target = self.target_transform(target) return input, target def __len__(self): return len(self.image_filenames)
from __future__ import print_function import argparse from math import log10 import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from model import Net from data import get_training_set, get_test_set # Training settings parser = argparse.ArgumentParser(description='PyTorch Super Res Example') parser.add_argument('--upscale_factor', type=int, required=True, help="super resolution upscale factor") parser.add_argument('--batchSize', type=int, default=64, help='training batch size') parser.add_argument('--testBatchSize', type=int, default=10, help='testing batch size') parser.add_argument('--nEpochs', type=int, default=2, help='number of epochs to train for') parser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01') parser.add_argument('--cuda', action='store_true', help='use cuda?') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use') parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123') opt = parser.parse_args() print(opt) if opt.cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") if not opt.mps and torch.backends.mps.is_available(): raise Exception("Found mps device, please run with --mps to enable macOS GPU") torch.manual_seed(opt.seed) use_mps = opt.mps and torch.backends.mps.is_available() if opt.cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") print('===> Loading datasets') train_set = get_training_set(opt.upscale_factor) test_set = get_test_set(opt.upscale_factor) training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False) print('===> Building model') model = Net(upscale_factor=opt.upscale_factor).to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=opt.lr) def train(epoch): epoch_loss = 0 for iteration, batch in enumerate(training_data_loader, 1): input, target = batch[0].to(device), batch[1].to(device) optimizer.zero_grad() loss = criterion(model(input), target) epoch_loss += loss.item() loss.backward() optimizer.step() print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.item())) print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader))) def test(): avg_psnr = 0 with torch.no_grad(): for batch in testing_data_loader: input, target = batch[0].to(device), batch[1].to(device) prediction = model(input) mse = criterion(prediction, target) psnr = 10 * log10(1 / mse.item()) avg_psnr += psnr print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr / len(testing_data_loader))) def checkpoint(epoch): model_out_path = "model_epoch_{}.pth".format(epoch) torch.save(model, model_out_path) print("Checkpoint saved to {}".format(model_out_path)) for epoch in range(1, opt.nEpochs + 1): train(epoch) test() checkpoint(epoch)
from os.path import exists, join, basename from os import makedirs, remove from six.moves import urllib import tarfile from torchvision.transforms import Compose, CenterCrop, ToTensor, Resize from dataset import DatasetFromFolder def download_bsd300(dest="dataset"): output_image_dir = join(dest, "BSDS300/images") if not exists(output_image_dir): makedirs(dest) url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz" print("downloading url ", url) data = urllib.request.urlopen(url) file_path = join(dest, basename(url)) with open(file_path, 'wb') as f: f.write(data.read()) print("Extracting data") with tarfile.open(file_path) as tar: for item in tar: tar.extract(item, dest) remove(file_path) return output_image_dir def calculate_valid_crop_size(crop_size, upscale_factor): return crop_size - (crop_size % upscale_factor) def input_transform(crop_size, upscale_factor): return Compose([ CenterCrop(crop_size), Resize(crop_size // upscale_factor), ToTensor(), ]) def target_transform(crop_size): return Compose([ CenterCrop(crop_size), ToTensor(), ]) def get_training_set(upscale_factor): root_dir = download_bsd300() train_dir = join(root_dir, "train") crop_size = calculate_valid_crop_size(256, upscale_factor) return DatasetFromFolder(train_dir, input_transform=input_transform(crop_size, upscale_factor), target_transform=target_transform(crop_size)) def get_test_set(upscale_factor): root_dir = download_bsd300() test_dir = join(root_dir, "test") crop_size = calculate_valid_crop_size(256, upscale_factor) return DatasetFromFolder(test_dir, input_transform=input_transform(crop_size, upscale_factor), target_transform=target_transform(crop_size))
from __future__ import print_function import argparse import torch from PIL import Image from torchvision.transforms import ToTensor import numpy as np # Training settings parser = argparse.ArgumentParser(description='PyTorch Super Res Example') parser.add_argument('--input_image', type=str, required=True, help='input image to use') parser.add_argument('--model', type=str, required=True, help='model file to use') parser.add_argument('--output_filename', type=str, help='where to save the output image') parser.add_argument('--cuda', action='store_true', help='use cuda') opt = parser.parse_args() print(opt) img = Image.open(opt.input_image).convert('YCbCr') y, cb, cr = img.split() model = torch.load(opt.model) img_to_tensor = ToTensor() input = img_to_tensor(y).view(1, -1, y.size[1], y.size[0]) if opt.cuda: model = model.cuda() input = input.cuda() out = model(input) out = out.cpu() out_img_y = out[0].detach().numpy() out_img_y *= 255.0 out_img_y = out_img_y.clip(0, 255) out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L') out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC) out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC) out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB') out_img.save(opt.output_filename) print('output image saved to ', opt.output_filename)
from __future__ import print_function import argparse import os import random import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake') parser.add_argument('--dataroot', required=False, help='path to dataset') parser.add_argument('--workers', type=int, help='number of data loading workers', default=2) parser.add_argument('--batchSize', type=int, default=64, help='input batch size') parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network') parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') parser.add_argument('--ngf', type=int, default=64) parser.add_argument('--ndf', type=int, default=64) parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') parser.add_argument('--cuda', action='store_true', default=False, help='enables cuda') parser.add_argument('--dry-run', action='store_true', help='check a single training cycle works') parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use') parser.add_argument('--netG', default='', help="path to netG (to continue training)") parser.add_argument('--netD', default='', help="path to netD (to continue training)") parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints') parser.add_argument('--manualSeed', type=int, help='manual seed') parser.add_argument('--classes', default='bedroom', help='comma separated list of classes for the lsun data set') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') opt = parser.parse_args() print(opt) try: os.makedirs(opt.outf) except OSError: pass if opt.manualSeed is None: opt.manualSeed = random.randint(1, 10000) print("Random Seed: ", opt.manualSeed) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) cudnn.benchmark = True if torch.cuda.is_available() and not opt.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") if torch.backends.mps.is_available() and not opt.mps: print("WARNING: You have mps device, to enable macOS GPU run with --mps") if opt.dataroot is None and str(opt.dataset).lower() != 'fake': raise ValueError("`dataroot` parameter is required for dataset \"%s\"" % opt.dataset) if opt.dataset in ['imagenet', 'folder', 'lfw']: # folder dataset dataset = dset.ImageFolder(root=opt.dataroot, transform=transforms.Compose([ transforms.Resize(opt.imageSize), transforms.CenterCrop(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) nc=3 elif opt.dataset == 'lsun': classes = [ c + '_train' for c in opt.classes.split(',')] dataset = dset.LSUN(root=opt.dataroot, classes=classes, transform=transforms.Compose([ transforms.Resize(opt.imageSize), transforms.CenterCrop(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) nc=3 elif opt.dataset == 'cifar10': dataset = dset.CIFAR10(root=opt.dataroot, download=True, transform=transforms.Compose([ transforms.Resize(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) nc=3 elif opt.dataset == 'mnist': dataset = dset.MNIST(root=opt.dataroot, download=True, transform=transforms.Compose([ transforms.Resize(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ])) nc=1 elif opt.dataset == 'fake': dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize), transform=transforms.ToTensor()) nc=3 assert dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) use_mps = opt.mps and torch.backends.mps.is_available() if opt.cuda: device = torch.device("cuda:0") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") ngpu = int(opt.ngpu) nz = int(opt.nz) ngf = int(opt.ngf) ndf = int(opt.ndf) # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: torch.nn.init.normal_(m.weight, 0.0, 0.02) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) class Generator(nn.Module): def __init__(self, ngpu): super(Generator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (nc) x 64 x 64 ) def forward(self, input): if input.is_cuda and self.ngpu > 1: output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) else: output = self.main(input) return output netG = Generator(ngpu).to(device) netG.apply(weights_init) if opt.netG != '': netG.load_state_dict(torch.load(opt.netG)) print(netG) class Discriminator(nn.Module): def __init__(self, ngpu): super(Discriminator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*8) x 4 x 4 nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), nn.Sigmoid() ) def forward(self, input): if input.is_cuda and self.ngpu > 1: output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) else: output = self.main(input) return output.view(-1, 1).squeeze(1) netD = Discriminator(ngpu).to(device) netD.apply(weights_init) if opt.netD != '': netD.load_state_dict(torch.load(opt.netD)) print(netD) criterion = nn.BCELoss() fixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device) real_label = 1 fake_label = 0 # setup optimizer optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) if opt.dry_run: opt.niter = 1 for epoch in range(opt.niter): for i, data in enumerate(dataloader, 0): ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### # train with real netD.zero_grad() real_cpu = data[0].to(device) batch_size = real_cpu.size(0) label = torch.full((batch_size,), real_label, dtype=real_cpu.dtype, device=device) output = netD(real_cpu) errD_real = criterion(output, label) errD_real.backward() D_x = output.mean().item() # train with fake noise = torch.randn(batch_size, nz, 1, 1, device=device) fake = netG(noise) label.fill_(fake_label) output = netD(fake.detach()) errD_fake = criterion(output, label) errD_fake.backward() D_G_z1 = output.mean().item() errD = errD_real + errD_fake optimizerD.step() ############################ # (2) Update G network: maximize log(D(G(z))) ########################### netG.zero_grad() label.fill_(real_label) # fake labels are real for generator cost output = netD(fake) errG = criterion(output, label) errG.backward() D_G_z2 = output.mean().item() optimizerG.step() print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f' % (epoch, opt.niter, i, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2)) if i % 100 == 0: vutils.save_image(real_cpu, '%s/real_samples.png' % opt.outf, normalize=True) fake = netG(fixed_noise) vutils.save_image(fake.detach(), '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch), normalize=True) if opt.dry_run: break # do checkpointing torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch)) torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
import os from argparse import ArgumentParser def makedirs(name): """helper function for python 2 and 3 to call os.makedirs() avoiding an error if the directory to be created already exists""" import os, errno try: os.makedirs(name) except OSError as ex: if ex.errno == errno.EEXIST and os.path.isdir(name): # ignore existing directory pass else: # a different error happened raise def get_args(): parser = ArgumentParser(description='PyTorch/torchtext SNLI example') parser.add_argument('--epochs', type=int, default=50, help='the number of total epochs to run.') parser.add_argument('--batch_size', type=int, default=128, help='batch size. (default: 128)') parser.add_argument('--d_embed', type=int, default=100, help='the size of each embedding vector.') parser.add_argument('--d_proj', type=int, default=300, help='the size of each projection layer.') parser.add_argument('--d_hidden', type=int, default=300, help='the number of features in the hidden state.') parser.add_argument('--n_layers', type=int, default=1, help='the number of recurrent layers. (default: 50)') parser.add_argument('--log_every', type=int, default=50, help='iteration period to output log.') parser.add_argument('--lr',type=float, default=.001, help='initial learning rate.') parser.add_argument('--dev_every', type=int, default=1000, help='log period of validation results.') parser.add_argument('--save_every', type=int, default=1000, help='model checkpoint period.') parser.add_argument('--dp_ratio', type=int, default=0.2, help='probability of an element to be zeroed.') parser.add_argument('--no-bidirectional', action='store_false', dest='birnn', help='disable bidirectional LSTM.') parser.add_argument('--preserve-case', action='store_false', dest='lower', help='case-sensitivity.') parser.add_argument('--no-projection', action='store_false', dest='projection', help='disable projection layer.') parser.add_argument('--train_embed', action='store_false', dest='fix_emb', help='enable embedding word training.') parser.add_argument('--gpu', type=int, default=0, help='gpu id to use. (default: 0)') parser.add_argument('--save_path', type=str, default='results', help='save path of results.') parser.add_argument('--vector_cache', type=str, default=os.path.join(os.getcwd(), '.vector_cache/input_vectors.pt'), help='name of vector cache directory, which saved input word-vectors.') parser.add_argument('--word_vectors', type=str, default='glove.6B.100d', help='one of or a list containing instantiations of the GloVe, CharNGram, or Vectors classes.' 'Alternatively, one of or a list of available pretrained vectors: ' 'charngram.100d fasttext.en.300d fasttext.simple.300d' 'glove.42B.300d glove.840B.300d glove.twitter.27B.25d' 'glove.twitter.27B.50d glove.twitter.27B.100d glove.twitter.27B.200d' 'glove.6B.50d glove.6B.100d glove.6B.200d glove.6B.300d') parser.add_argument('--resume_snapshot', type=str, default='', help='model snapshot to resume.') parser.add_argument('--dry-run', action='store_true', help='run only a few iterations') args = parser.parse_args() return args
import torch import torch.nn as nn class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0]*size[1], -1)) return out.view(size[0], size[1], -1) class Linear(Bottle, nn.Linear): pass class Encoder(nn.Module): def __init__(self, config): super(Encoder, self).__init__() self.config = config input_size = config.d_proj if config.projection else config.d_embed dropout = 0 if config.n_layers == 1 else config.dp_ratio self.rnn = nn.LSTM(input_size=input_size, hidden_size=config.d_hidden, num_layers=config.n_layers, dropout=dropout, bidirectional=config.birnn) def forward(self, inputs): batch_size = inputs.size()[1] state_shape = self.config.n_cells, batch_size, self.config.d_hidden h0 = c0 = inputs.new_zeros(state_shape) outputs, (ht, ct) = self.rnn(inputs, (h0, c0)) return ht[-1] if not self.config.birnn else ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1) class SNLIClassifier(nn.Module): def __init__(self, config): super(SNLIClassifier, self).__init__() self.config = config self.embed = nn.Embedding(config.n_embed, config.d_embed) self.projection = Linear(config.d_embed, config.d_proj) self.encoder = Encoder(config) self.dropout = nn.Dropout(p=config.dp_ratio) self.relu = nn.ReLU() seq_in_size = 2*config.d_hidden if self.config.birnn: seq_in_size *= 2 lin_config = [seq_in_size]*2 self.out = nn.Sequential( Linear(*lin_config), self.relu, self.dropout, Linear(*lin_config), self.relu, self.dropout, Linear(*lin_config), self.relu, self.dropout, Linear(seq_in_size, config.d_out)) def forward(self, batch): prem_embed = self.embed(batch.premise) hypo_embed = self.embed(batch.hypothesis) if self.config.fix_emb: prem_embed = prem_embed.detach() hypo_embed = hypo_embed.detach() if self.config.projection: prem_embed = self.relu(self.projection(prem_embed)) hypo_embed = self.relu(self.projection(hypo_embed)) premise = self.encoder(prem_embed) hypothesis = self.encoder(hypo_embed) scores = self.out(torch.cat([premise, hypothesis], 1)) return scores
import os import time import glob import torch import torch.optim as O import torch.nn as nn from torchtext.legacy import data from torchtext.legacy import datasets from model import SNLIClassifier from util import get_args, makedirs args = get_args() if torch.cuda.is_available(): torch.cuda.set_device(args.gpu) device = torch.device('cuda:{}'.format(args.gpu)) elif torch.backends.mps.is_available(): device = torch.device('mps') else: device = torch.device('cpu') inputs = data.Field(lower=args.lower, tokenize='spacy') answers = data.Field(sequential=False) train, dev, test = datasets.SNLI.splits(inputs, answers) inputs.build_vocab(train, dev, test) if args.word_vectors: if os.path.isfile(args.vector_cache): inputs.vocab.vectors = torch.load(args.vector_cache) else: inputs.vocab.load_vectors(args.word_vectors) makedirs(os.path.dirname(args.vector_cache)) torch.save(inputs.vocab.vectors, args.vector_cache) answers.build_vocab(train) train_iter, dev_iter, test_iter = data.BucketIterator.splits( (train, dev, test), batch_size=args.batch_size, device=device) config = args config.n_embed = len(inputs.vocab) config.d_out = len(answers.vocab) config.n_cells = config.n_layers # double the number of cells for bidirectional networks if config.birnn: config.n_cells *= 2 if args.resume_snapshot: model = torch.load(args.resume_snapshot, map_location=device) else: model = SNLIClassifier(config) if args.word_vectors: model.embed.weight.data.copy_(inputs.vocab.vectors) model.to(device) criterion = nn.CrossEntropyLoss() opt = O.Adam(model.parameters(), lr=args.lr) iterations = 0 start = time.time() best_dev_acc = -1 header = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy' dev_log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}'.split(',')) log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}'.split(',')) makedirs(args.save_path) print(header) for epoch in range(args.epochs): train_iter.init_epoch() n_correct, n_total = 0, 0 for batch_idx, batch in enumerate(train_iter): # switch model to training mode, clear gradient accumulators model.train(); opt.zero_grad() iterations += 1 # forward pass answer = model(batch) # calculate accuracy of predictions in the current batch n_correct += (torch.max(answer, 1)[1].view(batch.label.size()) == batch.label).sum().item() n_total += batch.batch_size train_acc = 100. * n_correct/n_total # calculate loss of the network output with respect to training labels loss = criterion(answer, batch.label) # backpropagate and update optimizer learning rate loss.backward(); opt.step() # checkpoint model periodically if iterations % args.save_every == 0: snapshot_prefix = os.path.join(args.save_path, 'snapshot') snapshot_path = snapshot_prefix + '_acc_{:.4f}_loss_{:.6f}_iter_{}_model.pt'.format(train_acc, loss.item(), iterations) torch.save(model, snapshot_path) for f in glob.glob(snapshot_prefix + '*'): if f != snapshot_path: os.remove(f) # evaluate performance on validation set periodically if iterations % args.dev_every == 0: # switch model to evaluation mode model.eval(); dev_iter.init_epoch() # calculate accuracy on validation set n_dev_correct, dev_loss = 0, 0 with torch.no_grad(): for dev_batch_idx, dev_batch in enumerate(dev_iter): answer = model(dev_batch) n_dev_correct += (torch.max(answer, 1)[1].view(dev_batch.label.size()) == dev_batch.label).sum().item() dev_loss = criterion(answer, dev_batch.label) dev_acc = 100. * n_dev_correct / len(dev) print(dev_log_template.format(time.time()-start, epoch, iterations, 1+batch_idx, len(train_iter), 100. * (1+batch_idx) / len(train_iter), loss.item(), dev_loss.item(), train_acc, dev_acc)) # update best validation set accuracy if dev_acc > best_dev_acc: # found a model with better validation set accuracy best_dev_acc = dev_acc snapshot_prefix = os.path.join(args.save_path, 'best_snapshot') snapshot_path = snapshot_prefix + '_devacc_{}_devloss_{}__iter_{}_model.pt'.format(dev_acc, dev_loss.item(), iterations) # save model, delete previous 'best_snapshot' files torch.save(model, snapshot_path) for f in glob.glob(snapshot_prefix + '*'): if f != snapshot_path: os.remove(f) elif iterations % args.log_every == 0: # print progress message print(log_template.format(time.time()-start, epoch, iterations, 1+batch_idx, len(train_iter), 100. * (1+batch_idx) / len(train_iter), loss.item(), ' '*8, n_correct/n_total*100, ' '*12)) if args.dry_run: break
import argparse import gym import numpy as np from itertools import count from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical # Cart Pole parser = argparse.ArgumentParser(description='PyTorch actor-critic example') parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)') parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 543)') parser.add_argument('--render', action='store_true', help='render the environment') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)') args = parser.parse_args() env = gym.make('CartPole-v1') env.reset(seed=args.seed) torch.manual_seed(args.seed) SavedAction = namedtuple('SavedAction', ['log_prob', 'value']) class Policy(nn.Module): """ implements both actor and critic in one model """ def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) # actor's layer self.action_head = nn.Linear(128, 2) # critic's layer self.value_head = nn.Linear(128, 1) # action & reward buffer self.saved_actions = [] self.rewards = [] def forward(self, x): """ forward of both actor and critic """ x = F.relu(self.affine1(x)) # actor: choses action to take from state s_t # by returning probability of each action action_prob = F.softmax(self.action_head(x), dim=-1) # critic: evaluates being in the state s_t state_values = self.value_head(x) # return values for both actor and critic as a tuple of 2 values: # 1. a list with the probability of each action over the action space # 2. the value from state s_t return action_prob, state_values model = Policy() optimizer = optim.Adam(model.parameters(), lr=3e-2) eps = np.finfo(np.float32).eps.item() def select_action(state): state = torch.from_numpy(state).float() probs, state_value = model(state) # create a categorical distribution over the list of probabilities of actions m = Categorical(probs) # and sample an action using the distribution action = m.sample() # save to action buffer model.saved_actions.append(SavedAction(m.log_prob(action), state_value)) # the action to take (left or right) return action.item() def finish_episode(): """ Training code. Calculates actor and critic loss and performs backprop. """ R = 0 saved_actions = model.saved_actions policy_losses = [] # list to save actor (policy) loss value_losses = [] # list to save critic (value) loss returns = [] # list to save the true values # calculate the true value using rewards returned from the environment for r in model.rewards[::-1]: # calculate the discounted value R = r + args.gamma * R returns.insert(0, R) returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) for (log_prob, value), R in zip(saved_actions, returns): advantage = R - value.item() # calculate actor (policy) loss policy_losses.append(-log_prob * advantage) # calculate critic (value) loss using L1 smooth loss value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]))) # reset gradients optimizer.zero_grad() # sum up all the values of policy_losses and value_losses loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum() # perform backprop loss.backward() optimizer.step() # reset rewards and action buffer del model.rewards[:] del model.saved_actions[:] def main(): running_reward = 10 # run infinitely many episodes for i_episode in count(1): # reset environment and episode reward state, _ = env.reset() ep_reward = 0 # for each episode, only run 9999 steps so that we don't # infinite loop while learning for t in range(1, 10000): # select action from policy action = select_action(state) # take the action state, reward, done, _, _ = env.step(action) if args.render: env.render() model.rewards.append(reward) ep_reward += reward if done: break # update cumulative reward running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward # perform backprop finish_episode() # log results if i_episode % args.log_interval == 0: print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format( i_episode, ep_reward, running_reward)) # check if we have "solved" the cart pole problem if running_reward > env.spec.reward_threshold: print("Solved! Running reward is now {} and " "the last episode runs to {} time steps!".format(running_reward, t)) break if __name__ == '__main__': main()
import argparse import gym import numpy as np from itertools import count from collections import deque import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical parser = argparse.ArgumentParser(description='PyTorch REINFORCE example') parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)') parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 543)') parser.add_argument('--render', action='store_true', help='render the environment') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)') args = parser.parse_args() env = gym.make('CartPole-v1') env.reset(seed=args.seed) torch.manual_seed(args.seed) class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.dropout = nn.Dropout(p=0.6) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, x): x = self.affine1(x) x = self.dropout(x) x = F.relu(x) action_scores = self.affine2(x) return F.softmax(action_scores, dim=1) policy = Policy() optimizer = optim.Adam(policy.parameters(), lr=1e-2) eps = np.finfo(np.float32).eps.item() def select_action(state): state = torch.from_numpy(state).float().unsqueeze(0) probs = policy(state) m = Categorical(probs) action = m.sample() policy.saved_log_probs.append(m.log_prob(action)) return action.item() def finish_episode(): R = 0 policy_loss = [] returns = deque() for r in policy.rewards[::-1]: R = r + args.gamma * R returns.appendleft(R) returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) for log_prob, R in zip(policy.saved_log_probs, returns): policy_loss.append(-log_prob * R) optimizer.zero_grad() policy_loss = torch.cat(policy_loss).sum() policy_loss.backward() optimizer.step() del policy.rewards[:] del policy.saved_log_probs[:] def main(): running_reward = 10 for i_episode in count(1): state, _ = env.reset() ep_reward = 0 for t in range(1, 10000): # Don't infinite loop while learning action = select_action(state) state, reward, done, _, _ = env.step(action) if args.render: env.render() policy.rewards.append(reward) ep_reward += reward if done: break running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward finish_episode() if i_episode % args.log_interval == 0: print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format( i_episode, ep_reward, running_reward)) if running_reward > env.spec.reward_threshold: print("Solved! Running reward is now {} and " "the last episode runs to {} time steps!".format(running_reward, t)) break if __name__ == '__main__': main()
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys import pytorch_sphinx_theme current_dir = os.path.dirname(__file__) target_dir = os.path.abspath(os.path.join(current_dir, "../..")) sys.path.insert(0, target_dir) print(target_dir) # -- Project information ----------------------------------------------------- project = "PyTorchExamples" copyright = "2022, Meta" author = "Meta" # The full version, including alpha/beta/rc tags release = "1.11" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.napoleon", "sphinx.ext.autodoc", 'sphinx_panels'] panels_add_bootstrap_css = False # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = "pytorch_sphinx_theme" html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] panels_add_fontawesome_latex = True html_theme_options = { 'pytorch_project': 'examples', 'collapse_navigation': False, 'display_version': True, 'logo_only': False, 'analytics_id': 'UA-117752657-2', }
from __future__ import print_function import argparse import torch import torch.nn as nn import torch.optim as optim import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt class Sequence(nn.Module): def __init__(self): super(Sequence, self).__init__() self.lstm1 = nn.LSTMCell(1, 51) self.lstm2 = nn.LSTMCell(51, 51) self.linear = nn.Linear(51, 1) def forward(self, input, future = 0): outputs = [] h_t = torch.zeros(input.size(0), 51, dtype=torch.double) c_t = torch.zeros(input.size(0), 51, dtype=torch.double) h_t2 = torch.zeros(input.size(0), 51, dtype=torch.double) c_t2 = torch.zeros(input.size(0), 51, dtype=torch.double) for input_t in input.split(1, dim=1): h_t, c_t = self.lstm1(input_t, (h_t, c_t)) h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2)) output = self.linear(h_t2) outputs += [output] for i in range(future):# if we should predict the future h_t, c_t = self.lstm1(output, (h_t, c_t)) h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2)) output = self.linear(h_t2) outputs += [output] outputs = torch.cat(outputs, dim=1) return outputs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--steps', type=int, default=15, help='steps to run') opt = parser.parse_args() # set random seed to 0 np.random.seed(0) torch.manual_seed(0) # load data and make training set data = torch.load('traindata.pt') input = torch.from_numpy(data[3:, :-1]) target = torch.from_numpy(data[3:, 1:]) test_input = torch.from_numpy(data[:3, :-1]) test_target = torch.from_numpy(data[:3, 1:]) # build the model seq = Sequence() seq.double() criterion = nn.MSELoss() # use LBFGS as optimizer since we can load the whole data to train optimizer = optim.LBFGS(seq.parameters(), lr=0.8) #begin to train for i in range(opt.steps): print('STEP: ', i) def closure(): optimizer.zero_grad() out = seq(input) loss = criterion(out, target) print('loss:', loss.item()) loss.backward() return loss optimizer.step(closure) # begin to predict, no need to track gradient here with torch.no_grad(): future = 1000 pred = seq(test_input, future=future) loss = criterion(pred[:, :-future], test_target) print('test loss:', loss.item()) y = pred.detach().numpy() # draw the result plt.figure(figsize=(30,10)) plt.title('Predict future values for time sequences\n(Dashlines are predicted values)', fontsize=30) plt.xlabel('x', fontsize=20) plt.ylabel('y', fontsize=20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) def draw(yi, color): plt.plot(np.arange(input.size(1)), yi[:input.size(1)], color, linewidth = 2.0) plt.plot(np.arange(input.size(1), input.size(1) + future), yi[input.size(1):], color + ':', linewidth = 2.0) draw(y[0], 'r') draw(y[1], 'g') draw(y[2], 'b') plt.savefig('predict%d.pdf'%i) plt.close()
import numpy as np import torch np.random.seed(2) T = 20 L = 1000 N = 100 x = np.empty((N, L), 'int64') x[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1) data = np.sin(x / 1.0 / T).astype('float64') torch.save(data, open('traindata.pt', 'wb'))
import os import zipfile # PyTorch 1.1 moves _download_url_to_file # from torch.utils.model_zoo to torch.hub # PyTorch 1.0 exists another _download_url_to_file # 2 argument # TODO: If you remove support PyTorch 1.0 or older, # You should remove torch.utils.model_zoo # Ref. PyTorch #18758 # https://github.com/pytorch/pytorch/pull/18758/commits try: from torch.utils.model_zoo import _download_url_to_file except ImportError: try: from torch.hub import download_url_to_file as _download_url_to_file except ImportError: from torch.hub import _download_url_to_file def unzip(source_filename, dest_dir): with zipfile.ZipFile(source_filename) as zf: zf.extractall(path=dest_dir) if __name__ == '__main__': _download_url_to_file('https://www.dropbox.com/s/lrvwfehqdcxoza8/saved_models.zip?dl=1', 'saved_models.zip', None, True) unzip('saved_models.zip', '.')
import torch class TransformerNet(torch.nn.Module): def __init__(self): super(TransformerNet, self).__init__() # Initial convolution layers self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) self.in3 = torch.nn.InstanceNorm2d(128, affine=True) # Residual layers self.res1 = ResidualBlock(128) self.res2 = ResidualBlock(128) self.res3 = ResidualBlock(128) self.res4 = ResidualBlock(128) self.res5 = ResidualBlock(128) # Upsampling Layers self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.in4 = torch.nn.InstanceNorm2d(64, affine=True) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) # Non-linearities self.relu = torch.nn.ReLU() def forward(self, X): y = self.relu(self.in1(self.conv1(X))) y = self.relu(self.in2(self.conv2(y))) y = self.relu(self.in3(self.conv3(y))) y = self.res1(y) y = self.res2(y) y = self.res3(y) y = self.res4(y) y = self.res5(y) y = self.relu(self.in4(self.deconv1(y))) y = self.relu(self.in5(self.deconv2(y))) y = self.deconv3(y) return y class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out
from collections import namedtuple import torch from torchvision import models class Vgg16(torch.nn.Module): def __init__(self, requires_grad=False): super(Vgg16, self).__init__() vgg_pretrained_features = models.vgg16(pretrained=True).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() for x in range(4): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(4, 9): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(9, 16): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(16, 23): self.slice4.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h = self.slice1(X) h_relu1_2 = h h = self.slice2(h) h_relu2_2 = h h = self.slice3(h) h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3']) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3) return out
import argparse import os import sys import time import re import numpy as np import torch from torch.optim import Adam from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms import torch.onnx import utils from transformer_net import TransformerNet from vgg import Vgg16 def check_paths(args): try: if not os.path.exists(args.save_model_dir): os.makedirs(args.save_model_dir) if args.checkpoint_model_dir is not None and not (os.path.exists(args.checkpoint_model_dir)): os.makedirs(args.checkpoint_model_dir) except OSError as e: print(e) sys.exit(1) def train(args): if args.cuda: device = torch.device("cuda") elif args.mps: device = torch.device("mps") else: device = torch.device("cpu") np.random.seed(args.seed) torch.manual_seed(args.seed) transform = transforms.Compose([ transforms.Resize(args.image_size), transforms.CenterCrop(args.image_size), transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255)) ]) train_dataset = datasets.ImageFolder(args.dataset, transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size) transformer = TransformerNet().to(device) optimizer = Adam(transformer.parameters(), args.lr) mse_loss = torch.nn.MSELoss() vgg = Vgg16(requires_grad=False).to(device) style_transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255)) ]) style = utils.load_image(args.style_image, size=args.style_size) style = style_transform(style) style = style.repeat(args.batch_size, 1, 1, 1).to(device) features_style = vgg(utils.normalize_batch(style)) gram_style = [utils.gram_matrix(y) for y in features_style] for e in range(args.epochs): transformer.train() agg_content_loss = 0. agg_style_loss = 0. count = 0 for batch_id, (x, _) in enumerate(train_loader): n_batch = len(x) count += n_batch optimizer.zero_grad() x = x.to(device) y = transformer(x) y = utils.normalize_batch(y) x = utils.normalize_batch(x) features_y = vgg(y) features_x = vgg(x) content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2) style_loss = 0. for ft_y, gm_s in zip(features_y, gram_style): gm_y = utils.gram_matrix(ft_y) style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :]) style_loss *= args.style_weight total_loss = content_loss + style_loss total_loss.backward() optimizer.step() agg_content_loss += content_loss.item() agg_style_loss += style_loss.item() if (batch_id + 1) % args.log_interval == 0: mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format( time.ctime(), e + 1, count, len(train_dataset), agg_content_loss / (batch_id + 1), agg_style_loss / (batch_id + 1), (agg_content_loss + agg_style_loss) / (batch_id + 1) ) print(mesg) if args.checkpoint_model_dir is not None and (batch_id + 1) % args.checkpoint_interval == 0: transformer.eval().cpu() ckpt_model_filename = "ckpt_epoch_" + str(e) + "_batch_id_" + str(batch_id + 1) + ".pth" ckpt_model_path = os.path.join(args.checkpoint_model_dir, ckpt_model_filename) torch.save(transformer.state_dict(), ckpt_model_path) transformer.to(device).train() # save model transformer.eval().cpu() save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str( args.content_weight) + "_" + str(args.style_weight) + ".model" save_model_path = os.path.join(args.save_model_dir, save_model_filename) torch.save(transformer.state_dict(), save_model_path) print("\nDone, trained model saved at", save_model_path) def stylize(args): device = torch.device("cuda" if args.cuda else "cpu") content_image = utils.load_image(args.content_image, scale=args.content_scale) content_transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255)) ]) content_image = content_transform(content_image) content_image = content_image.unsqueeze(0).to(device) if args.model.endswith(".onnx"): output = stylize_onnx(content_image, args) else: with torch.no_grad(): style_model = TransformerNet() state_dict = torch.load(args.model) # remove saved deprecated running_* keys in InstanceNorm from the checkpoint for k in list(state_dict.keys()): if re.search(r'in\d+\.running_(mean|var)$', k): del state_dict[k] style_model.load_state_dict(state_dict) style_model.to(device) style_model.eval() if args.export_onnx: assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx" output = torch.onnx._export( style_model, content_image, args.export_onnx, opset_version=11, ).cpu() else: output = style_model(content_image).cpu() utils.save_image(args.output_image, output[0]) def stylize_onnx(content_image, args): """ Read ONNX model and run it using onnxruntime """ assert not args.export_onnx import onnxruntime ort_session = onnxruntime.InferenceSession(args.model) def to_numpy(tensor): return ( tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() ) ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(content_image)} ort_outs = ort_session.run(None, ort_inputs) img_out_y = ort_outs[0] return torch.from_numpy(img_out_y) def main(): main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style") subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand") train_arg_parser = subparsers.add_parser("train", help="parser for training arguments") train_arg_parser.add_argument("--epochs", type=int, default=2, help="number of training epochs, default is 2") train_arg_parser.add_argument("--batch-size", type=int, default=4, help="batch size for training, default is 4") train_arg_parser.add_argument("--dataset", type=str, required=True, help="path to training dataset, the path should point to a folder " "containing another folder with all the training images") train_arg_parser.add_argument("--style-image", type=str, default="images/style-images/mosaic.jpg", help="path to style-image") train_arg_parser.add_argument("--save-model-dir", type=str, required=True, help="path to folder where trained model will be saved.") train_arg_parser.add_argument("--checkpoint-model-dir", type=str, default=None, help="path to folder where checkpoints of trained models will be saved") train_arg_parser.add_argument("--image-size", type=int, default=256, help="size of training images, default is 256 X 256") train_arg_parser.add_argument("--style-size", type=int, default=None, help="size of style-image, default is the original size of style image") train_arg_parser.add_argument("--cuda", type=int, required=True, help="set it to 1 for running on GPU, 0 for CPU") train_arg_parser.add_argument("--seed", type=int, default=42, help="random seed for training") train_arg_parser.add_argument("--content-weight", type=float, default=1e5, help="weight for content-loss, default is 1e5") train_arg_parser.add_argument("--style-weight", type=float, default=1e10, help="weight for style-loss, default is 1e10") train_arg_parser.add_argument("--lr", type=float, default=1e-3, help="learning rate, default is 1e-3") train_arg_parser.add_argument("--log-interval", type=int, default=500, help="number of images after which the training loss is logged, default is 500") train_arg_parser.add_argument("--checkpoint-interval", type=int, default=2000, help="number of batches after which a checkpoint of the trained model will be created") eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments") eval_arg_parser.add_argument("--content-image", type=str, required=True, help="path to content image you want to stylize") eval_arg_parser.add_argument("--content-scale", type=float, default=None, help="factor for scaling down the content image") eval_arg_parser.add_argument("--output-image", type=str, required=True, help="path for saving the output image") eval_arg_parser.add_argument("--model", type=str, required=True, help="saved model to be used for stylizing the image. If file ends in .pth - PyTorch path is used, if in .onnx - Caffe2 path") eval_arg_parser.add_argument("--cuda", type=int, default=False, help="set it to 1 for running on cuda, 0 for CPU") eval_arg_parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file") eval_arg_parser.add_argument('--mps', action='store_true', default=False, help='enable macOS GPU training') args = main_arg_parser.parse_args() if args.subcommand is None: print("ERROR: specify either train or eval") sys.exit(1) if args.cuda and not torch.cuda.is_available(): print("ERROR: cuda is not available, try running on CPU") sys.exit(1) if not args.mps and torch.backends.mps.is_available(): print("WARNING: mps is available, run with --mps to enable macOS GPU") if args.subcommand == "train": check_paths(args) train(args) else: stylize(args) if __name__ == "__main__": main()
import torch from PIL import Image def load_image(filename, size=None, scale=None): img = Image.open(filename).convert('RGB') if size is not None: img = img.resize((size, size), Image.ANTIALIAS) elif scale is not None: img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS) return img def save_image(filename, data): img = data.clone().clamp(0, 255).numpy() img = img.transpose(1, 2, 0).astype("uint8") img = Image.fromarray(img) img.save(filename) def gram_matrix(y): (b, ch, h, w) = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram def normalize_batch(batch): # normalize using imagenet mean and std mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1) std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1) batch = batch.div_(255.0) return (batch - mean) / std
from __future__ import division from __future__ import print_function import argparse import gzip import os import sys import urllib try: from urllib.error import URLError from urllib.request import urlretrieve except ImportError: from urllib2 import URLError from urllib import urlretrieve RESOURCES = [ 'train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz', ] def report_download_progress(chunk_number, chunk_size, file_size): if file_size != -1: percent = min(1, (chunk_number * chunk_size) / file_size) bar = '#' * int(64 * percent) sys.stdout.write('\r0% |{:<64}| {}%'.format(bar, int(percent * 100))) def download(destination_path, url, quiet): if os.path.exists(destination_path): if not quiet: print('{} already exists, skipping ...'.format(destination_path)) else: print('Downloading {} ...'.format(url)) try: hook = None if quiet else report_download_progress urlretrieve(url, destination_path, reporthook=hook) except URLError: raise RuntimeError('Error downloading resource!') finally: if not quiet: # Just a newline. print() def unzip(zipped_path, quiet): unzipped_path = os.path.splitext(zipped_path)[0] if os.path.exists(unzipped_path): if not quiet: print('{} already exists, skipping ... '.format(unzipped_path)) return with gzip.open(zipped_path, 'rb') as zipped_file: with open(unzipped_path, 'wb') as unzipped_file: unzipped_file.write(zipped_file.read()) if not quiet: print('Unzipped {} ...'.format(zipped_path)) def main(): parser = argparse.ArgumentParser( description='Download the MNIST dataset from the internet') parser.add_argument( '-d', '--destination', default='.', help='Destination directory') parser.add_argument( '-q', '--quiet', action='store_true', help="Don't report about progress") options = parser.parse_args() if not os.path.exists(options.destination): os.makedirs(options.destination) try: for resource in RESOURCES: path = os.path.join(options.destination, resource) url = 'http://yann.lecun.com/exdb/mnist/{}'.format(resource) download(path, url, options.quiet) unzip(path, options.quiet) except KeyboardInterrupt: print('Interrupted') if __name__ == '__main__': main()
from __future__ import print_function from __future__ import unicode_literals import argparse import matplotlib.pyplot as plt import torch parser = argparse.ArgumentParser() parser.add_argument("-i", "--sample-file", required=True) parser.add_argument("-o", "--out-file", default="out.png") parser.add_argument("-d", "--dimension", type=int, default=3) options = parser.parse_args() module = torch.jit.load(options.sample_file) images = list(module.parameters())[0] for index in range(options.dimension * options.dimension): image = images[index].detach().cpu().reshape(28, 28).mul(255).to(torch.uint8) array = image.numpy() axis = plt.subplot(options.dimension, options.dimension, 1 + index) plt.imshow(array, cmap="gray") axis.get_xaxis().set_visible(False) axis.get_yaxis().set_visible(False) plt.savefig(options.out_file) print("Saved ", options.out_file)
""" This python script converts the network into Script Module """ import torch from torchvision import models # Download and load the pre-trained model model = models.resnet18(pretrained=True) # Set upgrading the gradients to False for param in model.parameters(): param.requires_grad = False # Save the model except the final FC Layer resnet18 = torch.nn.Sequential(*list(model.children())[:-1]) example_input = torch.rand(1, 3, 224, 224) script_module = torch.jit.trace(resnet18, example_input) script_module.save('resnet18_without_last_layer.pt')
import argparse import os import random import shutil import time import warnings from enum import Enum import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.datasets as datasets import torchvision.models as models import torchvision.transforms as transforms from torch.optim.lr_scheduler import StepLR from torch.utils.data import Subset model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])) parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('data', metavar='DIR', nargs='?', default='imagenet', help='path to dataset (default: imagenet)') parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256), this is the total ' 'batch size of all GPUs on the current node when ' 'using Data Parallel or Distributed Data Parallel') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay') parser.add_argument('-p', '--print-freq', default=10, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=-1, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') parser.add_argument('--dummy', action='store_true', help="use fake data to benchmark") best_acc1 = 0 def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True cudnn.benchmark = False warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed if torch.cuda.is_available(): ngpus_per_node = torch.cuda.device_count() else: ngpus_per_node = 1 if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args) def main_worker(gpu, ngpus_per_node, args): global best_acc1 args.gpu = gpu if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) # create model if args.pretrained: print("=> using pre-trained model '{}'".format(args.arch)) model = models.__dict__[args.arch](pretrained=True) else: print("=> creating model '{}'".format(args.arch)) model = models.__dict__[args.arch]() if not torch.cuda.is_available() and not torch.backends.mps.is_available(): print('using CPU, this will be slow') elif args.distributed: # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. if torch.cuda.is_available(): if args.gpu is not None: torch.cuda.set_device(args.gpu) model.cuda(args.gpu) # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs of the current node. args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model.cuda() # DistributedDataParallel will divide and allocate batch_size to all # available GPUs if device_ids are not set model = torch.nn.parallel.DistributedDataParallel(model) elif args.gpu is not None and torch.cuda.is_available(): torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) elif torch.backends.mps.is_available(): device = torch.device("mps") model = model.to(device) else: # DataParallel will divide and allocate batch_size to all available GPUs if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): model.features = torch.nn.DataParallel(model.features) model.cuda() else: model = torch.nn.DataParallel(model).cuda() if torch.cuda.is_available(): if args.gpu: device = torch.device('cuda:{}'.format(args.gpu)) else: device = torch.device("cuda") elif torch.backends.mps.is_available(): device = torch.device("mps") else: device = torch.device("cpu") # define loss function (criterion), optimizer, and learning rate scheduler criterion = nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" scheduler = StepLR(optimizer, step_size=30, gamma=0.1) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if args.gpu is None: checkpoint = torch.load(args.resume) elif torch.cuda.is_available(): # Map model to be loaded to specified single gpu. loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] if args.gpu is not None: # best_acc1 may be from a checkpoint from a different GPU best_acc1 = best_acc1.to(args.gpu) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) # Data loading code if args.dummy: print("=> Dummy data is used!") train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor()) val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor()) else: traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True) else: train_sampler = None val_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=val_sampler) if args.evaluate: validate(val_loader, model, criterion, args) return for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) # train for one epoch train(train_loader, model, criterion, optimizer, epoch, device, args) # evaluate on validation set acc1 = validate(val_loader, model, criterion, args) scheduler.step() # remember best acc@1 and save checkpoint is_best = acc1 > best_acc1 best_acc1 = max(acc1, best_acc1) if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0): save_checkpoint({ 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer' : optimizer.state_dict(), 'scheduler' : scheduler.state_dict() }, is_best) def train(train_loader, model, criterion, optimizer, epoch, device, args): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch)) # switch to train mode model.train() end = time.time() for i, (images, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) # move data to the same device as model images = images.to(device, non_blocking=True) target = target.to(device, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: progress.display(i + 1) def validate(val_loader, model, criterion, args): def run_validate(loader, base_progress=0): with torch.no_grad(): end = time.time() for i, (images, target) in enumerate(loader): i = base_progress + i if args.gpu is not None and torch.cuda.is_available(): images = images.cuda(args.gpu, non_blocking=True) if torch.backends.mps.is_available(): images = images.to('mps') target = target.to('mps') if torch.cuda.is_available(): target = target.cuda(args.gpu, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: progress.display(i + 1) batch_time = AverageMeter('Time', ':6.3f', Summary.NONE) losses = AverageMeter('Loss', ':.4e', Summary.NONE) top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE) top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE) progress = ProgressMeter( len(val_loader) + (args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset))), [batch_time, losses, top1, top5], prefix='Test: ') # switch to evaluate mode model.eval() run_validate(val_loader) if args.distributed: top1.all_reduce() top5.all_reduce() if args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset)): aux_val_dataset = Subset(val_loader.dataset, range(len(val_loader.sampler) * args.world_size, len(val_loader.dataset))) aux_val_loader = torch.utils.data.DataLoader( aux_val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) run_validate(aux_val_loader, len(val_loader)) progress.display_summary() return top1.avg def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): torch.save(state, filename) if is_best: shutil.copyfile(filename, 'model_best.pth.tar') class Summary(Enum): NONE = 0 AVERAGE = 1 SUM = 2 COUNT = 3 class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE): self.name = name self.fmt = fmt self.summary_type = summary_type self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def all_reduce(self): if torch.cuda.is_available(): device = torch.device("cuda") elif torch.backends.mps.is_available(): device = torch.device("mps") else: device = torch.device("cpu") total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device) dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) self.sum, self.count = total.tolist() self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def summary(self): fmtstr = '' if self.summary_type is Summary.NONE: fmtstr = '' elif self.summary_type is Summary.AVERAGE: fmtstr = '{name} {avg:.3f}' elif self.summary_type is Summary.SUM: fmtstr = '{name} {sum:.3f}' elif self.summary_type is Summary.COUNT: fmtstr = '{name} {count:.3f}' else: raise ValueError('invalid summary type %r' % self.summary_type) return fmtstr.format(**self.__dict__) class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter) for meter in self.meters] print('\t'.join(entries)) def display_summary(self): entries = [" *"] entries += [meter.summary() for meter in self.meters] print(' '.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits) + 'd}' return '[' + fmt + '/' + fmt.format(num_batches) + ']' def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__': main()
# This code is based on the implementation of Mohammad Pezeshki available at # https://github.com/mohammadpz/pytorch_forward_forward and licensed under the MIT License. # Modifications/Improvements to the original code have been made by Vivek V Patel. import argparse import torch import torch.nn as nn from torchvision.datasets import MNIST from torchvision.transforms import Compose, ToTensor, Normalize, Lambda from torch.utils.data import DataLoader from torch.optim import Adam def get_y_neg(y): y_neg = y.clone() for idx, y_samp in enumerate(y): allowed_indices = list(range(10)) allowed_indices.remove(y_samp.item()) y_neg[idx] = torch.tensor(allowed_indices)[ torch.randint(len(allowed_indices), size=(1,)) ].item() return y_neg.to(device) def overlay_y_on_x(x, y, classes=10): x_ = x.clone() x_[:, :classes] *= 0.0 x_[range(x.shape[0]), y] = x.max() return x_ class Net(torch.nn.Module): def __init__(self, dims): super().__init__() self.layers = [] for d in range(len(dims) - 1): self.layers = self.layers + [Layer(dims[d], dims[d + 1]).to(device)] def predict(self, x): goodness_per_label = [] for label in range(10): h = overlay_y_on_x(x, label) goodness = [] for layer in self.layers: h = layer(h) goodness = goodness + [h.pow(2).mean(1)] goodness_per_label += [sum(goodness).unsqueeze(1)] goodness_per_label = torch.cat(goodness_per_label, 1) return goodness_per_label.argmax(1) def train(self, x_pos, x_neg): h_pos, h_neg = x_pos, x_neg for i, layer in enumerate(self.layers): print("training layer: ", i) h_pos, h_neg = layer.train(h_pos, h_neg) class Layer(nn.Linear): def __init__(self, in_features, out_features, bias=True, device=None, dtype=None): super().__init__(in_features, out_features, bias, device, dtype) self.relu = torch.nn.ReLU() self.opt = Adam(self.parameters(), lr=args.lr) self.threshold = args.threshold self.num_epochs = args.epochs def forward(self, x): x_direction = x / (x.norm(2, 1, keepdim=True) + 1e-4) return self.relu(torch.mm(x_direction, self.weight.T) + self.bias.unsqueeze(0)) def train(self, x_pos, x_neg): for i in range(self.num_epochs): g_pos = self.forward(x_pos).pow(2).mean(1) g_neg = self.forward(x_neg).pow(2).mean(1) loss = torch.log( 1 + torch.exp( torch.cat([-g_pos + self.threshold, g_neg - self.threshold]) ) ).mean() self.opt.zero_grad() loss.backward() self.opt.step() if i % args.log_interval == 0: print("Loss: ", loss.item()) return self.forward(x_pos).detach(), self.forward(x_neg).detach() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--epochs", type=int, default=1000, metavar="N", help="number of epochs to train (default: 1000)", ) parser.add_argument( "--lr", type=float, default=0.03, metavar="LR", help="learning rate (default: 0.03)", ) parser.add_argument( "--no_cuda", action="store_true", default=False, help="disables CUDA training" ) parser.add_argument( "--no_mps", action="store_true", default=False, help="disables MPS training" ) parser.add_argument( "--seed", type=int, default=1, metavar="S", help="random seed (default: 1)" ) parser.add_argument( "--save_model", action="store_true", default=False, help="For saving the current Model", ) parser.add_argument( "--train_size", type=int, default=50000, help="size of training set" ) parser.add_argument( "--threshold", type=float, default=2, help="threshold for training" ) parser.add_argument("--test_size", type=int, default=10000, help="size of test set") parser.add_argument( "--save-model", action="store_true", default=False, help="For Saving the current Model", ) parser.add_argument( "--log-interval", type=int, default=10, metavar="N", help="how many batches to wait before logging training status", ) args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() if use_cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") train_kwargs = {"batch_size": args.train_size} test_kwargs = {"batch_size": args.test_size} if use_cuda: cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True} train_kwargs.update(cuda_kwargs) test_kwargs.update(cuda_kwargs) transform = Compose( [ ToTensor(), Normalize((0.1307,), (0.3081,)), Lambda(lambda x: torch.flatten(x)), ] ) train_loader = DataLoader( MNIST("./data/", train=True, download=True, transform=transform), **train_kwargs ) test_loader = DataLoader( MNIST("./data/", train=False, download=True, transform=transform), **test_kwargs ) net = Net([784, 500, 500]) x, y = next(iter(train_loader)) x, y = x.to(device), y.to(device) x_pos = overlay_y_on_x(x, y) y_neg = get_y_neg(y) x_neg = overlay_y_on_x(x, y_neg) net.train(x_pos, x_neg) print("train error:", 1.0 - net.predict(x).eq(y).float().mean().item()) x_te, y_te = next(iter(test_loader)) x_te, y_te = x_te.to(device), y_te.to(device) if args.save_model: torch.save(net.state_dict(), "mnist_ff.pt") print("test error:", 1.0 - net.predict(x_te).eq(y_te).float().mean().item())
from __future__ import print_function import argparse, random, copy import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torch.utils.data import Dataset from torchvision import datasets from torchvision import transforms as T from torch.optim.lr_scheduler import StepLR class SiameseNetwork(nn.Module): """ Siamese network for image similarity estimation. The network is composed of two identical networks, one for each input. The output of each network is concatenated and passed to a linear layer. The output of the linear layer passed through a sigmoid function. `"FaceNet" <https://arxiv.org/pdf/1503.03832.pdf>`_ is a variant of the Siamese network. This implementation varies from FaceNet as we use the `ResNet-18` model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ as our feature extractor. In addition, we aren't using `TripletLoss` as the MNIST dataset is simple, so `BCELoss` can do the trick. """ def __init__(self): super(SiameseNetwork, self).__init__() # get resnet model self.resnet = torchvision.models.resnet18(weights=None) # over-write the first conv layer to be able to read MNIST images # as resnet18 reads (3,x,x) where 3 is RGB channels # whereas MNIST has (1,x,x) where 1 is a gray-scale channel self.resnet.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) self.fc_in_features = self.resnet.fc.in_features # remove the last layer of resnet18 (linear layer which is before avgpool layer) self.resnet = torch.nn.Sequential(*(list(self.resnet.children())[:-1])) # add linear layers to compare between the features of the two images self.fc = nn.Sequential( nn.Linear(self.fc_in_features * 2, 256), nn.ReLU(inplace=True), nn.Linear(256, 1), ) self.sigmoid = nn.Sigmoid() # initialize the weights self.resnet.apply(self.init_weights) self.fc.apply(self.init_weights) def init_weights(self, m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) def forward_once(self, x): output = self.resnet(x) output = output.view(output.size()[0], -1) return output def forward(self, input1, input2): # get two images' features output1 = self.forward_once(input1) output2 = self.forward_once(input2) # concatenate both images' features output = torch.cat((output1, output2), 1) # pass the concatenation to the linear layers output = self.fc(output) # pass the out of the linear layers to sigmoid layer output = self.sigmoid(output) return output class APP_MATCHER(Dataset): def __init__(self, root, train, download=False): super(APP_MATCHER, self).__init__() # get MNIST dataset self.dataset = datasets.MNIST(root, train=train, download=download) # as `self.dataset.data`'s shape is (Nx28x28), where N is the number of # examples in MNIST dataset, a single example has the dimensions of # (28x28) for (WxH), where W and H are the width and the height of the image. # However, every example should have (CxWxH) dimensions where C is the number # of channels to be passed to the network. As MNIST contains gray-scale images, # we add an additional dimension to corresponds to the number of channels. self.data = self.dataset.data.unsqueeze(1).clone() self.group_examples() def group_examples(self): """ To ease the accessibility of data based on the class, we will use `group_examples` to group examples based on class. Every key in `grouped_examples` corresponds to a class in MNIST dataset. For every key in `grouped_examples`, every value will conform to all of the indices for the MNIST dataset examples that correspond to that key. """ # get the targets from MNIST dataset np_arr = np.array(self.dataset.targets.clone()) # group examples based on class self.grouped_examples = {} for i in range(0,10): self.grouped_examples[i] = np.where((np_arr==i))[0] def __len__(self): return self.data.shape[0] def __getitem__(self, index): """ For every example, we will select two images. There are two cases, positive and negative examples. For positive examples, we will have two images from the same class. For negative examples, we will have two images from different classes. Given an index, if the index is even, we will pick the second image from the same class, but it won't be the same image we chose for the first class. This is used to ensure the positive example isn't trivial as the network would easily distinguish the similarity between same images. However, if the network were given two different images from the same class, the network will need to learn the similarity between two different images representing the same class. If the index is odd, we will pick the second image from a different class than the first image. """ # pick some random class for the first image selected_class = random.randint(0, 9) # pick a random index for the first image in the grouped indices based of the label # of the class random_index_1 = random.randint(0, self.grouped_examples[selected_class].shape[0]-1) # pick the index to get the first image index_1 = self.grouped_examples[selected_class][random_index_1] # get the first image image_1 = self.data[index_1].clone().float() # same class if index % 2 == 0: # pick a random index for the second image random_index_2 = random.randint(0, self.grouped_examples[selected_class].shape[0]-1) # ensure that the index of the second image isn't the same as the first image while random_index_2 == random_index_1: random_index_2 = random.randint(0, self.grouped_examples[selected_class].shape[0]-1) # pick the index to get the second image index_2 = self.grouped_examples[selected_class][random_index_2] # get the second image image_2 = self.data[index_2].clone().float() # set the label for this example to be positive (1) target = torch.tensor(1, dtype=torch.float) # different class else: # pick a random class other_selected_class = random.randint(0, 9) # ensure that the class of the second image isn't the same as the first image while other_selected_class == selected_class: other_selected_class = random.randint(0, 9) # pick a random index for the second image in the grouped indices based of the label # of the class random_index_2 = random.randint(0, self.grouped_examples[other_selected_class].shape[0]-1) # pick the index to get the second image index_2 = self.grouped_examples[other_selected_class][random_index_2] # get the second image image_2 = self.data[index_2].clone().float() # set the label for this example to be negative (0) target = torch.tensor(0, dtype=torch.float) return image_1, image_2, target def train(args, model, device, train_loader, optimizer, epoch): model.train() # we aren't using `TripletLoss` as the MNIST dataset is simple, so `BCELoss` can do the trick. criterion = nn.BCELoss() for batch_idx, (images_1, images_2, targets) in enumerate(train_loader): images_1, images_2, targets = images_1.to(device), images_2.to(device), targets.to(device) optimizer.zero_grad() outputs = model(images_1, images_2).squeeze() loss = criterion(outputs, targets) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(images_1), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) if args.dry_run: break def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 # we aren't using `TripletLoss` as the MNIST dataset is simple, so `BCELoss` can do the trick. criterion = nn.BCELoss() with torch.no_grad(): for (images_1, images_2, targets) in test_loader: images_1, images_2, targets = images_1.to(device), images_2.to(device), targets.to(device) outputs = model(images_1, images_2).squeeze() test_loss += criterion(outputs, targets).sum().item() # sum up batch loss pred = torch.where(outputs > 0.5, 1, 0) # get the index of the max log-probability correct += pred.eq(targets.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) # for the 1st epoch, the average loss is 0.0001 and the accuracy 97-98% # using default settings. After completing the 10th epoch, the average # loss is 0.0000 and the accuracy 99.5-100% using default settings. print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch Siamese network Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=14, metavar='N', help='number of epochs to train (default: 14)') parser.add_argument('--lr', type=float, default=1.0, metavar='LR', help='learning rate (default: 1.0)') parser.add_argument('--gamma', type=float, default=0.7, metavar='M', help='Learning rate step gamma (default: 0.7)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() torch.manual_seed(args.seed) if use_cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") train_kwargs = {'batch_size': args.batch_size} test_kwargs = {'batch_size': args.test_batch_size} if use_cuda: cuda_kwargs = {'num_workers': 1, 'pin_memory': True, 'shuffle': True} train_kwargs.update(cuda_kwargs) test_kwargs.update(cuda_kwargs) train_dataset = APP_MATCHER('../data', train=True, download=True) test_dataset = APP_MATCHER('../data', train=False) train_loader = torch.utils.data.DataLoader(train_dataset,**train_kwargs) test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs) model = SiameseNetwork().to(device) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) test(model, device, test_loader) scheduler.step() if args.save_model: torch.save(model.state_dict(), "siamese_network.pt") if __name__ == '__main__': main()
import os import torch import torch.optim as optim import torch.nn.functional as F def train(rank, args, model, device, dataset, dataloader_kwargs): torch.manual_seed(args.seed + rank) train_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): train_epoch(epoch, args, model, device, train_loader, optimizer) def test(args, model, device, dataset, dataloader_kwargs): torch.manual_seed(args.seed) test_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs) test_epoch(model, device, test_loader) def train_epoch(epoch, args, model, device, data_loader, optimizer): model.train() pid = os.getpid() for batch_idx, (data, target) in enumerate(data_loader): optimizer.zero_grad() output = model(data.to(device)) loss = F.nll_loss(output, target.to(device)) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('{}\tTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( pid, epoch, batch_idx * len(data), len(data_loader.dataset), 100. * batch_idx / len(data_loader), loss.item())) if args.dry_run: break def test_epoch(model, device, data_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in data_loader: output = model(data.to(device)) test_loss += F.nll_loss(output, target.to(device), reduction='sum').item() # sum up batch loss pred = output.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.to(device)).sum().item() test_loss /= len(data_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(data_loader.dataset), 100. * correct / len(data_loader.dataset)))
from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.multiprocessing as mp from torch.utils.data.sampler import Sampler from torchvision import datasets, transforms from train import train, test # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--num-processes', type=int, default=2, metavar='N', help='how many training processes to use (default: 2)') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') parser.add_argument('--save_model', action='store_true', default=False, help='save the trained model to state_dict') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) if __name__ == '__main__': args = parser.parse_args() use_cuda = args.cuda and torch.cuda.is_available() use_mps = args.mps and torch.backends.mps.is_available() if use_cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transform) dataset2 = datasets.MNIST('../data', train=False, transform=transform) kwargs = {'batch_size': args.batch_size, 'shuffle': True} if use_cuda: kwargs.update({'num_workers': 1, 'pin_memory': True, }) torch.manual_seed(args.seed) mp.set_start_method('spawn', force=True) model = Net().to(device) model.share_memory() # gradients are allocated lazily, so they are not shared here processes = [] for rank in range(args.num_processes): p = mp.Process(target=train, args=(rank, args, model, device, dataset1, kwargs)) # We first train the model across `num_processes` processes p.start() processes.append(p) for p in processes: p.join() if args.save_model: torch.save(model.state_dict(), "MNIST_hogwild.pt") # Once training is complete, we can test the model test(args, model, device, dataset2, kwargs)
#!/usr/bin/env python from __future__ import print_function from itertools import count import torch import torch.nn.functional as F POLY_DEGREE = 4 W_target = torch.randn(POLY_DEGREE, 1) * 5 b_target = torch.randn(1) * 5 def make_features(x): """Builds features i.e. a matrix with columns [x, x^2, x^3, x^4].""" x = x.unsqueeze(1) return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1) def f(x): """Approximated function.""" return x.mm(W_target) + b_target.item() def poly_desc(W, b): """Creates a string description of a polynomial.""" result = 'y = ' for i, w in enumerate(W): result += '{:+.2f} x^{} '.format(w, i + 1) result += '{:+.2f}'.format(b[0]) return result def get_batch(batch_size=32): """Builds a batch i.e. (x, f(x)) pair.""" random = torch.randn(batch_size) x = make_features(random) y = f(x) return x, y # Define model fc = torch.nn.Linear(W_target.size(0), 1) for batch_idx in count(1): # Get data batch_x, batch_y = get_batch() # Reset gradients fc.zero_grad() # Forward pass output = F.smooth_l1_loss(fc(batch_x), batch_y) loss = output.item() # Backward pass output.backward() # Apply gradients for param in fc.parameters(): param.data.add_(-0.1 * param.grad) # Stop criterion if loss < 1e-3: break print('Loss: {:.6f} after {} batches'.format(loss, batch_idx)) print('==> Learned function:\t' + poly_desc(fc.weight.view(-1), fc.bias)) print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
import os import time import requests import tarfile import numpy as np import argparse import torch from torch import nn import torch.nn.functional as F from torch.optim import Adam class GraphConv(nn.Module): """ Graph Convolutional Layer described in "Semi-Supervised Classification with Graph Convolutional Networks". Given an input feature representation for each node in a graph, the Graph Convolutional Layer aims to aggregate information from the node's neighborhood to update its own representation. This is achieved by applying a graph convolutional operation that combines the features of a node with the features of its neighboring nodes. Mathematically, the Graph Convolutional Layer can be described as follows: H' = f(D^(-1/2) * A * D^(-1/2) * H * W) where: H: Input feature matrix with shape (N, F_in), where N is the number of nodes and F_in is the number of input features per node. A: Adjacency matrix of the graph with shape (N, N), representing the relationships between nodes. W: Learnable weight matrix with shape (F_in, F_out), where F_out is the number of output features per node. """ def __init__(self, input_dim, output_dim, use_bias=False): super(GraphConv, self).__init__() # Initialize the weight matrix W (in this case called `kernel`) self.kernel = nn.Parameter(torch.Tensor(input_dim, output_dim)) nn.init.xavier_normal_(self.kernel) # Initialize the weights using Xavier initialization # Initialize the bias (if use_bias is True) self.bias = None if use_bias: self.bias = nn.Parameter(torch.Tensor(output_dim)) nn.init.zeros_(self.bias) # Initialize the bias to zeros def forward(self, input_tensor, adj_mat): """ Performs a graph convolution operation. Args: input_tensor (torch.Tensor): Input tensor representing node features. adj_mat (torch.Tensor): Adjacency matrix representing graph structure. Returns: torch.Tensor: Output tensor after the graph convolution operation. """ support = torch.mm(input_tensor, self.kernel) # Matrix multiplication between input and weight matrix output = torch.spmm(adj_mat, support) # Sparse matrix multiplication between adjacency matrix and support # Add the bias (if bias is not None) if self.bias is not None: output = output + self.bias return output class GCN(nn.Module): """ Graph Convolutional Network (GCN) as described in the paper `"Semi-Supervised Classification with Graph Convolutional Networks" <https://arxiv.org/pdf/1609.02907.pdf>`. The Graph Convolutional Network is a deep learning architecture designed for semi-supervised node classification tasks on graph-structured data. It leverages the graph structure to learn node representations by propagating information through the graph using graph convolutional layers. The original implementation consists of two stacked graph convolutional layers. The ReLU activation function is applied to the hidden representations, and the Softmax activation function is applied to the output representations. """ def __init__(self, input_dim, hidden_dim, output_dim, use_bias=True, dropout_p=0.1): super(GCN, self).__init__() # Define the Graph Convolution layers self.gc1 = GraphConv(input_dim, hidden_dim, use_bias=use_bias) self.gc2 = GraphConv(hidden_dim, output_dim, use_bias=use_bias) # Define the dropout layer self.dropout = nn.Dropout(dropout_p) def forward(self, input_tensor, adj_mat): """ Performs forward pass of the Graph Convolutional Network (GCN). Args: input_tensor (torch.Tensor): Input node feature matrix with shape (N, input_dim), where N is the number of nodes and input_dim is the number of input features per node. adj_mat (torch.Tensor): Adjacency matrix of the graph with shape (N, N), representing the relationships between nodes. Returns: torch.Tensor: Output tensor with shape (N, output_dim), representing the predicted class probabilities for each node. """ # Perform the first graph convolutional layer x = self.gc1(input_tensor, adj_mat) x = F.relu(x) # Apply ReLU activation function x = self.dropout(x) # Apply dropout regularization # Perform the second graph convolutional layer x = self.gc2(x, adj_mat) # Apply log-softmax activation function for classification return F.log_softmax(x, dim=1) def load_cora(path='./cora', device='cpu'): """ The graph convolutional operation rquires normalize the adjacency matrix: D^(-1/2) * A * D^(-1/2). This step scales the adjacency matrix such that the features of neighboring nodes are weighted appropriately during aggregation. The steps involved in the renormalization trick are as follows: - Compute the degree matrix. - Compute the inverse square root of the degree matrix. - Multiply the inverse square root of the degree matrix with the adjacency matrix. """ # Set the paths to the data files content_path = os.path.join(path, 'cora.content') cites_path = os.path.join(path, 'cora.cites') # Load data from files content_tensor = np.genfromtxt(content_path, dtype=np.dtype(str)) cites_tensor = np.genfromtxt(cites_path, dtype=np.int32) # Process features features = torch.FloatTensor(content_tensor[:, 1:-1].astype(np.int32)) # Extract feature values scale_vector = torch.sum(features, dim=1) # Compute sum of features for each node scale_vector = 1 / scale_vector # Compute reciprocal of the sums scale_vector[scale_vector == float('inf')] = 0 # Handle division by zero cases scale_vector = torch.diag(scale_vector).to_sparse() # Convert the scale vector to a sparse diagonal matrix features = scale_vector @ features # Scale the features using the scale vector # Process labels classes, labels = np.unique(content_tensor[:, -1], return_inverse=True) # Extract unique classes and map labels to indices labels = torch.LongTensor(labels) # Convert labels to a tensor # Process adjacency matrix idx = content_tensor[:, 0].astype(np.int32) # Extract node indices idx_map = {id: pos for pos, id in enumerate(idx)} # Create a dictionary to map indices to positions # Map node indices to positions in the adjacency matrix edges = np.array( list(map(lambda edge: [idx_map[edge[0]], idx_map[edge[1]]], cites_tensor)), dtype=np.int32) V = len(idx) # Number of nodes E = edges.shape[0] # Number of edges adj_mat = torch.sparse_coo_tensor(edges.T, torch.ones(E), (V, V), dtype=torch.int64) # Create the initial adjacency matrix as a sparse tensor adj_mat = torch.eye(V) + adj_mat # Add self-loops to the adjacency matrix degree_mat = torch.sum(adj_mat, dim=1) # Compute the sum of each row in the adjacency matrix (degree matrix) degree_mat = torch.sqrt(1 / degree_mat) # Compute the reciprocal square root of the degrees degree_mat[degree_mat == float('inf')] = 0 # Handle division by zero cases degree_mat = torch.diag(degree_mat).to_sparse() # Convert the degree matrix to a sparse diagonal matrix adj_mat = degree_mat @ adj_mat @ degree_mat # Apply the renormalization trick return features.to_sparse().to(device), labels.to(device), adj_mat.to_sparse().to(device) def train_iter(epoch, model, optimizer, criterion, input, target, mask_train, mask_val, print_every=10): start_t = time.time() model.train() optimizer.zero_grad() # Forward pass output = model(*input) loss = criterion(output[mask_train], target[mask_train]) # Compute the loss using the training mask loss.backward() optimizer.step() # Evaluate the model performance on training and validation sets loss_train, acc_train = test(model, criterion, input, target, mask_train) loss_val, acc_val = test(model, criterion, input, target, mask_val) if epoch % print_every == 0: # Print the training progress at specified intervals print(f'Epoch: {epoch:04d} ({(time.time() - start_t):.4f}s) loss_train: {loss_train:.4f} acc_train: {acc_train:.4f} loss_val: {loss_val:.4f} acc_val: {acc_val:.4f}') def test(model, criterion, input, target, mask): model.eval() with torch.no_grad(): output = model(*input) output, target = output[mask], target[mask] loss = criterion(output, target) acc = (output.argmax(dim=1) == target).float().sum() / len(target) return loss.item(), acc.item() if __name__ == '__main__': device = 'cuda' if torch.cuda.is_available() else 'cpu' parser = argparse.ArgumentParser(description='PyTorch Graph Convolutional Network') parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train (default: 200)') parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') parser.add_argument('--l2', type=float, default=5e-4, help='weight decay (default: 5e-4)') parser.add_argument('--dropout-p', type=float, default=0.5, help='dropout probability (default: 0.5)') parser.add_argument('--hidden-dim', type=int, default=16, help='dimension of the hidden representation (default: 16)') parser.add_argument('--val-every', type=int, default=20, help='epochs to wait for print training and validation evaluation (default: 20)') parser.add_argument('--include-bias', action='store_true', default=False, help='use bias term in convolutions (default: False)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() torch.manual_seed(args.seed) if use_cuda: device = torch.device('cuda') elif use_mps: device = torch.device('mps') else: device = torch.device('cpu') print(f'Using {device} device') cora_url = 'https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz' print('Downloading dataset...') with requests.get(cora_url, stream=True) as tgz_file: with tarfile.open(fileobj=tgz_file.raw, mode='r:gz') as tgz_object: tgz_object.extractall() print('Loading dataset...') features, labels, adj_mat = load_cora(device=device) idx = torch.randperm(len(labels)).to(device) idx_test, idx_val, idx_train = idx[:1000], idx[1000:1500], idx[1500:] gcn = GCN(features.shape[1], args.hidden_dim, labels.max().item() + 1,args.include_bias, args.dropout_p).to(device) optimizer = Adam(gcn.parameters(), lr=args.lr, weight_decay=args.l2) criterion = nn.NLLLoss() for epoch in range(args.epochs): train_iter(epoch + 1, gcn, optimizer, criterion, (features, adj_mat), labels, idx_train, idx_val, args.val_every) if args.dry_run: break loss_test, acc_test = test(gcn, criterion, (features, adj_mat), labels, idx_test) print(f'Test set results: loss {loss_test:.4f} accuracy {acc_test:.4f}')
from __future__ import print_function import argparse import torch import torch.utils.data from torch import nn, optim from torch.nn import functional as F from torchvision import datasets, transforms from torchvision.utils import save_image parser = argparse.ArgumentParser(description='VAE MNIST Example') parser.add_argument('--batch-size', type=int, default=128, metavar='N', help='input batch size for training (default: 128)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() torch.manual_seed(args.seed) if args.cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=False, **kwargs) class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar model = VAE().to(device) optimizer = optim.Adam(model.parameters(), lr=1e-3) # Reconstruction + KL divergence losses summed over all elements and batch def loss_function(recon_x, x, mu, logvar): BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum') # see Appendix B from VAE paper: # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD def train(epoch): model.train() train_loss = 0 for batch_idx, (data, _) in enumerate(train_loader): data = data.to(device) optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss = loss_function(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data))) print('====> Epoch: {} Average loss: {:.4f}'.format( epoch, train_loss / len(train_loader.dataset))) def test(epoch): model.eval() test_loss = 0 with torch.no_grad(): for i, (data, _) in enumerate(test_loader): data = data.to(device) recon_batch, mu, logvar = model(data) test_loss += loss_function(recon_batch, data, mu, logvar).item() if i == 0: n = min(data.size(0), 8) comparison = torch.cat([data[:n], recon_batch.view(args.batch_size, 1, 28, 28)[:n]]) save_image(comparison.cpu(), 'results/reconstruction_' + str(epoch) + '.png', nrow=n) test_loss /= len(test_loader.dataset) print('====> Test set loss: {:.4f}'.format(test_loss)) if __name__ == "__main__": for epoch in range(1, args.epochs + 1): train(epoch) test(epoch) with torch.no_grad(): sample = torch.randn(64, 20).to(device) sample = model.decode(sample).cpu() save_image(sample.view(64, 1, 28, 28), 'results/sample_' + str(epoch) + '.png')
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import sys from setuptools import find_packages, setup # Minimum required python version REQUIRED_MAJOR = 3 REQUIRED_MINOR = 9 # Requirements for testing, formatting, and tutorials TEST_REQUIRES = ["pytest", "pytest-cov"] FMT_REQUIRES = ["flake8", "ufmt", "flake8-docstrings"] TUTORIALS_REQUIRES = [ "ax-platform", "cma", "jupyter", "kaleido", "matplotlib", "memory_profiler", "papermill", "pykeops", "torchvision", ] # Check for python version if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR): error = ( "Your version of python ({major}.{minor}) is too old. You need " "python >= {required_major}.{required_minor}." ).format( major=sys.version_info.major, minor=sys.version_info.minor, required_minor=REQUIRED_MINOR, required_major=REQUIRED_MAJOR, ) sys.exit(error) # Assign root dir location for later use root_dir = os.path.dirname(__file__) def read_deps_from_file(filname): """Read in requirements file and return items as list of strings""" with open(os.path.join(root_dir, filname), "r") as fh: return [line.strip() for line in fh.readlines() if not line.startswith("#")] # Read in the requirements from the requirements.txt file install_requires = read_deps_from_file("requirements.txt") # Allow non-pinned (usually dev) versions of gpytorch and linear_operator if os.environ.get("ALLOW_LATEST_GPYTORCH_LINOP"): # Allows more recent previously installed versions. If there is no # previously installed version, installs the latest release. install_requires = [ dep.replace("==", ">=") if "gpytorch" in dep or "linear_operator" in dep else dep for dep in install_requires ] # Read in pinned versions of the formatting tools FMT_REQUIRES += read_deps_from_file("requirements-fmt.txt") # Dev is test + formatting + docs generation DEV_REQUIRES = TEST_REQUIRES + FMT_REQUIRES + ["sphinx<=7.1.2"] # read in README.md as the long description with open(os.path.join(root_dir, "README.md"), "r") as fh: long_description = fh.read() setup( name="botorch", description="Bayesian Optimization in PyTorch", author="Meta Platforms, Inc.", license="MIT", url="https://botorch.org", project_urls={ "Documentation": "https://botorch.org", "Source": "https://github.com/pytorch/botorch", "conda": "https://anaconda.org/pytorch/botorch", }, keywords=["Bayesian optimization", "PyTorch"], classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python :: 3 :: Only", "License :: OSI Approved :: MIT License", "Topic :: Scientific/Engineering", "Intended Audience :: Science/Research", "Intended Audience :: Developers", ], long_description=long_description, long_description_content_type="text/markdown", python_requires=">=3.9", packages=find_packages(exclude=["test", "test.*"]), install_requires=install_requires, extras_require={ "dev": DEV_REQUIRES, "test": TEST_REQUIRES, "tutorials": TUTORIALS_REQUIRES, }, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from botorch import settings from botorch.logging import LOG_LEVEL_DEFAULT, logger, shape_to_str from botorch.utils.testing import BotorchTestCase class TestLogging(BotorchTestCase): def test_logger(self): # Verify log statements are properly captured # assertLogs() captures all log calls, ignoring the severity level with self.assertLogs(logger="botorch", level="INFO") as logs_cm: logger.info("Hello World!") logger.error("Goodbye Universe!") self.assertEqual( logs_cm.output, ["INFO:botorch:Hello World!", "ERROR:botorch:Goodbye Universe!"], ) def test_settings_log_level(self): # Verify the default level is applied self.assertEqual(logger.level, LOG_LEVEL_DEFAULT) # Next, verify the level of overwritten within the context manager with settings.log_level(logging.INFO): self.assertEqual(logger.level, logging.INFO) # Finally, verify the original level is set again self.assertEqual(logger.level, LOG_LEVEL_DEFAULT) def test_shape_to_str(self): self.assertEqual("``", shape_to_str(torch.Size([]))) self.assertEqual("`1`", shape_to_str(torch.Size([1]))) self.assertEqual("`1 x 2`", shape_to_str(torch.Size([1, 2]))) self.assertEqual("`1 x 2 x 3`", shape_to_str(torch.Size([1, 2, 3])))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import warnings import torch from botorch.cross_validation import batch_cross_validation, gen_loo_cv_folds from botorch.exceptions.warnings import OptimizationWarning from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP from botorch.utils.testing import _get_random_data, BotorchTestCase from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood class TestFitBatchCrossValidation(BotorchTestCase): def test_single_task_batch_cv(self): n = 10 for batch_shape, m, dtype in itertools.product( (torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double) ): tkwargs = {"device": self.device, "dtype": dtype} train_X, train_Y = _get_random_data( batch_shape=batch_shape, m=m, n=n, **tkwargs ) if m == 1: train_Y = train_Y.squeeze(-1) train_Yvar = torch.full_like(train_Y, 0.01) noiseless_cv_folds = gen_loo_cv_folds(train_X=train_X, train_Y=train_Y) # check shapes expected_shape_train_X = batch_shape + torch.Size( [n, n - 1, train_X.shape[-1]] ) expected_shape_test_X = batch_shape + torch.Size([n, 1, train_X.shape[-1]]) self.assertEqual(noiseless_cv_folds.train_X.shape, expected_shape_train_X) self.assertEqual(noiseless_cv_folds.test_X.shape, expected_shape_test_X) expected_shape_train_Y = batch_shape + torch.Size([n, n - 1, m]) expected_shape_test_Y = batch_shape + torch.Size([n, 1, m]) self.assertEqual(noiseless_cv_folds.train_Y.shape, expected_shape_train_Y) self.assertEqual(noiseless_cv_folds.test_Y.shape, expected_shape_test_Y) self.assertIsNone(noiseless_cv_folds.train_Yvar) self.assertIsNone(noiseless_cv_folds.test_Yvar) # Test SingleTaskGP with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=OptimizationWarning) cv_results = batch_cross_validation( model_cls=SingleTaskGP, mll_cls=ExactMarginalLogLikelihood, cv_folds=noiseless_cv_folds, fit_args={"optimizer_kwargs": {"options": {"maxiter": 1}}}, ) expected_shape = batch_shape + torch.Size([n, 1, m]) self.assertEqual(cv_results.posterior.mean.shape, expected_shape) self.assertEqual(cv_results.observed_Y.shape, expected_shape) # Test FixedNoiseGP noisy_cv_folds = gen_loo_cv_folds( train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar ) # check shapes self.assertEqual(noisy_cv_folds.train_X.shape, expected_shape_train_X) self.assertEqual(noisy_cv_folds.test_X.shape, expected_shape_test_X) self.assertEqual(noisy_cv_folds.train_Y.shape, expected_shape_train_Y) self.assertEqual(noisy_cv_folds.test_Y.shape, expected_shape_test_Y) self.assertEqual(noisy_cv_folds.train_Yvar.shape, expected_shape_train_Y) self.assertEqual(noisy_cv_folds.test_Yvar.shape, expected_shape_test_Y) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=OptimizationWarning) cv_results = batch_cross_validation( model_cls=FixedNoiseGP, mll_cls=ExactMarginalLogLikelihood, cv_folds=noisy_cv_folds, fit_args={"optimizer_kwargs": {"options": {"maxiter": 1}}}, ) self.assertEqual(cv_results.posterior.mean.shape, expected_shape) self.assertEqual(cv_results.observed_Y.shape, expected_shape) self.assertEqual(cv_results.observed_Y.shape, expected_shape)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from contextlib import ExitStack, nullcontext from itertools import filterfalse, product from typing import Callable, Iterable, Optional from unittest.mock import MagicMock, patch from warnings import catch_warnings, warn, WarningMessage import torch from botorch import fit from botorch.exceptions.errors import ModelFittingError, UnsupportedError from botorch.exceptions.warnings import OptimizationWarning from botorch.models import SingleTaskGP, SingleTaskVariationalGP from botorch.models.transforms.input import Normalize from botorch.models.transforms.outcome import Standardize from botorch.optim.closures import get_loss_closure_with_grads from botorch.optim.fit import fit_gpytorch_mll_scipy, fit_gpytorch_mll_torch from botorch.optim.utils import get_data_loader from botorch.settings import debug from botorch.utils.context_managers import ( module_rollback_ctx, requires_grad_ctx, TensorCheckpoint, ) from botorch.utils.testing import BotorchTestCase from gpytorch.kernels import MaternKernel from gpytorch.mlls import ExactMarginalLogLikelihood, VariationalELBO from linear_operator.utils.errors import NotPSDError MAX_ITER_MSG = "TOTAL NO. of ITERATIONS REACHED LIMIT" class MockOptimizer: def __init__( self, randomize_requires_grad: bool = True, warnings: Iterable[WarningMessage] = (), exception: Optional[BaseException] = None, ): r"""Class used to mock `optimizer` argument to `fit_gpytorch_mll.""" self.randomize_requires_grad = randomize_requires_grad self.warnings = warnings self.exception = exception self.call_count = 0 def __call__(self, mll, closure: Optional[Callable] = None): self.call_count += 1 for w in self.warnings: warn(str(w.message), w.category) if self.randomize_requires_grad: with torch.no_grad(): for param in mll.parameters(): if param.requires_grad: param[...] = torch.rand_like(param) if self.exception is not None: raise self.exception return mll, None class TestFitAPI(BotorchTestCase): r"""Unit tests for general fitting API""" def setUp(self) -> None: super().setUp() with torch.random.fork_rng(): torch.manual_seed(0) train_X = torch.linspace(0, 1, 10).unsqueeze(-1) train_F = torch.sin(2 * math.pi * train_X) train_Y = train_F + 0.1 * torch.randn_like(train_F) model = SingleTaskGP( train_X=train_X, train_Y=train_Y, input_transform=Normalize(d=1), outcome_transform=Standardize(m=1), ) self.mll = ExactMarginalLogLikelihood(model.likelihood, model) def test_fit_gpytorch_mll(self): # Test that `optimizer` is only passed when non-None with patch.object(fit, "FitGPyTorchMLL") as mock_dispatcher: fit.fit_gpytorch_mll(self.mll, optimizer=None) mock_dispatcher.assert_called_once_with( self.mll, type(self.mll.likelihood), type(self.mll.model), closure=None, closure_kwargs=None, optimizer_kwargs=None, ) fit.fit_gpytorch_mll(self.mll, optimizer="foo") mock_dispatcher.assert_called_with( self.mll, type(self.mll.likelihood), type(self.mll.model), closure=None, closure_kwargs=None, optimizer="foo", optimizer_kwargs=None, ) def test_fit_gyptorch_model(self): r"""Test support for legacy API""" # Test `option` argument options = {"foo": 0} with catch_warnings(), patch.object( fit, "fit_gpytorch_mll", new=lambda mll, optimizer_kwargs=None, **kwargs: optimizer_kwargs, ): self.assertEqual( {"options": options, "bar": 1}, fit.fit_gpytorch_model( self.mll, options=options, optimizer_kwargs={"bar": 1}, ), ) # Test `max_retries` argument with catch_warnings(), patch.object( fit, "fit_gpytorch_mll", new=lambda mll, max_attempts=None, **kwargs: max_attempts, ): self.assertEqual(100, fit.fit_gpytorch_model(self.mll, max_retries=100)) # Test `exclude` argument self.assertTrue(self.mll.model.mean_module.constant.requires_grad) with catch_warnings(), patch.object( fit, "fit_gpytorch_mll", new=lambda mll, **kwargs: mll.model.mean_module.constant.requires_grad, ): self.assertFalse( fit.fit_gpytorch_model( self.mll, options=options, exclude=["model.mean_module.constant"], ) ) self.assertTrue(self.mll.model.mean_module.constant.requires_grad) # Test collisions with catch_warnings(record=True) as ws, self.assertRaises(SyntaxError): fit.fit_gpytorch_model( self.mll, options=options, optimizer_kwargs={"options": {"bar": 1}}, ) self.assertTrue(any("marked for deprecation" in str(w.message) for w in ws)) # Test that ModelFittingErrors are rethrown as warnings def mock_fit_gpytorch_mll(*args, **kwargs): raise ModelFittingError("foo") with catch_warnings(record=True) as ws, patch.object( fit, "fit_gpytorch_mll", new=mock_fit_gpytorch_mll ): fit.fit_gpytorch_model(self.mll) self.assertTrue(any("foo" in str(w.message) for w in ws)) class TestFitFallback(BotorchTestCase): def setUp(self) -> None: super().setUp() with torch.random.fork_rng(): torch.manual_seed(0) train_X = torch.linspace(0, 1, 10).unsqueeze(-1) train_F = torch.sin(2 * math.pi * train_X) self.mlls = {} self.checkpoints = {} for model_type, output_dim in product([SingleTaskGP], [1, 2]): train_Y = train_F.repeat(1, output_dim) train_Y = train_Y + 0.1 * torch.randn_like(train_Y) model = model_type( train_X=train_X, train_Y=train_Y, input_transform=Normalize(d=1), outcome_transform=Standardize(m=output_dim), **( {} if model_type is SingleTaskGP else {"train_Yvar": torch.full_like(train_Y, 0.1)} ), ) self.assertIsInstance(model.covar_module.base_kernel, MaternKernel) model.covar_module.base_kernel.nu = 2.5 mll = ExactMarginalLogLikelihood(model.likelihood, model) for dtype in (torch.float32, torch.float64): key = model_type, output_dim self.mlls[key] = mll.to(dtype=dtype) self.checkpoints[key] = { k: TensorCheckpoint( values=v.detach().clone(), device=v.device, dtype=v.dtype ) for k, v in mll.state_dict().items() } def test_main(self): for case, mll in self.mlls.items(): self._test_main(mll, self.checkpoints[case]) def test_warnings(self): for case, mll in self.mlls.items(): self._test_warnings(mll, self.checkpoints[case]) def test_exceptions(self): for case, mll in self.mlls.items(): self._test_exceptions(mll, self.checkpoints[case]) def _test_main(self, mll, ckpt): r"""Main test for `_fit_fallback`.""" optimizer = MockOptimizer() optimizer.warnings = [ WarningMessage("test_runtime_warning", RuntimeWarning, __file__, 0), ] for should_fail in (True, False): optimizer.call_count = 0 with catch_warnings(), requires_grad_ctx( module=mll, assignments={"model.mean_module.constant": False} ), module_rollback_ctx(mll, checkpoint=ckpt): try: fit._fit_fallback( mll, None, None, max_attempts=2, optimizer=optimizer, warning_handler=lambda w: not should_fail, ) except ModelFittingError: failed = True else: failed = False # Test control flow self.assertEqual(failed, should_fail) self.assertEqual(optimizer.call_count, 2 if should_fail else 1) # Test terminal state self.assertEqual(failed, mll.training) for key, vals in mll.state_dict().items(): if failed: self.assertTrue(vals.equal(ckpt[key].values)) else: try: param = mll.get_parameter(key) self.assertNotEqual( param.equal(ckpt[key].values), param.requires_grad ) except AttributeError: pass # Test `closure_kwargs` with self.subTest("closure_kwargs"): mock_closure = MagicMock(side_effect=StopIteration("foo")) with self.assertRaisesRegex(StopIteration, "foo"): fit._fit_fallback( mll, None, None, closure=mock_closure, closure_kwargs={"ab": "cd"} ) mock_closure.assert_called_once_with(ab="cd") def _test_warnings(self, mll, ckpt): r"""Test warning handling for `_fit_fallback`.""" optimizer = MockOptimizer(randomize_requires_grad=False) optimizer.warnings = [ WarningMessage("test_runtime_warning", RuntimeWarning, __file__, 0), WarningMessage(MAX_ITER_MSG, OptimizationWarning, __file__, 0), WarningMessage( "Optimization timed out after X", OptimizationWarning, __file__, 0 ), ] warning_handlers = { "default": fit.DEFAULT_WARNING_HANDLER, "none": lambda w: False, "all": lambda w: True, } for case, warning_handler in warning_handlers.items(): with ExitStack() as es: logs = es.enter_context( self.assertLogs(level="DEBUG") if case == "default" else nullcontext() ) ws = es.enter_context(catch_warnings(record=True)) es.enter_context(debug(True)) try: fit._fit_fallback( mll, None, None, max_attempts=2, optimizer=optimizer, warning_handler=warning_handler, ) except ModelFittingError: failed = True else: failed = False # Test that warnings were resolved in the expected fashion self.assertEqual(failed, case == "none") with catch_warnings(record=True) as rethrown: unresolved = list(filterfalse(warning_handler, optimizer.warnings)) self.assertEqual(failed, len(unresolved) > 0) self.assertEqual( {str(w.message) for w in ws}, {str(w.message) for w in rethrown + unresolved}, ) if logs: # test that default filter logs certain warnings self.assertTrue(any(MAX_ITER_MSG in log for log in logs.output)) # Test default of retrying upon encountering an uncaught OptimizationWarning optimizer.warnings.append( WarningMessage("test_optim_warning", OptimizationWarning, __file__, 0) ) with self.assertRaises(ModelFittingError), catch_warnings(): fit._fit_fallback( mll, None, None, max_attempts=1, optimizer=optimizer, ) def _test_exceptions(self, mll, ckpt): r"""Test exception handling for `_fit_fallback`.""" optimizer = MockOptimizer(exception=NotPSDError("not_psd")) with catch_warnings(): # Test behavior when encountering a caught exception with self.assertLogs(level="DEBUG") as logs, self.assertRaises( ModelFittingError ): fit._fit_fallback( mll, None, None, max_attempts=1, optimizer=optimizer, ) self.assertTrue(any("not_psd" in log for log in logs.output)) self.assertTrue( # test state rollback all(v.equal(ckpt[k].values) for k, v in mll.state_dict().items()) ) # Test behavior when encountering an uncaught exception with self.assertRaisesRegex(NotPSDError, "not_psd"): fit._fit_fallback( mll, None, None, max_attempts=1, optimizer=optimizer, caught_exception_types=(), ) self.assertTrue( # test state rollback all(v.equal(ckpt[k].values) for k, v in mll.state_dict().items()) ) class TestFitFallbackAppoximate(BotorchTestCase): def setUp(self) -> None: super().setUp() with torch.random.fork_rng(): torch.manual_seed(0) train_X = torch.linspace(0, 1, 10).unsqueeze(-1) train_F = torch.sin(2 * math.pi * train_X) train_Y = train_F + 0.1 * torch.randn_like(train_F) model = SingleTaskVariationalGP( train_X=train_X, train_Y=train_Y, input_transform=Normalize(d=1), outcome_transform=Standardize(m=1), ) self.mll = mll = VariationalELBO(model.likelihood, model.model, num_data=10) self.data_loader = get_data_loader(mll.model, batch_size=1) self.closure = get_loss_closure_with_grads( mll=mll, parameters={n: p for n, p in mll.named_parameters() if p.requires_grad}, data_loader=self.data_loader, ) def test_main(self): # Test parameter updates with module_rollback_ctx(self.mll) as ckpt: fit._fit_fallback_approximate( self.mll, None, None, closure=self.closure, optimizer_kwargs={"step_limit": 3}, ) for name, param in self.mll.named_parameters(): self.assertFalse(param.equal(ckpt[name].values)) # Test dispatching pattern kwargs = {"full_batch_limit": float("inf")} with patch.object(fit, "_fit_fallback") as mock_fallback: fit._fit_fallback_approximate(self.mll, None, None, full_batch_limit=1) mock_fallback.assert_called_once_with( self.mll, None, None, closure=None, optimizer=fit_gpytorch_mll_torch, ) with patch.object(fit, "_fit_fallback") as mock_fallback: fit._fit_fallback_approximate(self.mll, None, None, **kwargs) mock_fallback.assert_called_once_with( self.mll, None, None, closure=None, optimizer=fit_gpytorch_mll_scipy, ) with patch.object(fit, "_fit_fallback") as mock_fallback: fit._fit_fallback_approximate( self.mll, None, None, closure=self.closure, **kwargs ) mock_fallback.assert_called_once_with( self.mll, None, None, closure=self.closure, optimizer=fit_gpytorch_mll_torch, ) with patch.object(fit, "_fit_fallback") as mock_fallback, patch.object( fit, "get_loss_closure_with_grads" ) as mock_get_closure: mock_get_closure.return_value = "foo" fit._fit_fallback_approximate( self.mll, None, None, data_loader=self.data_loader, **kwargs, ) params = {n: p for n, p in self.mll.named_parameters() if p.requires_grad} mock_get_closure.assert_called_once_with( mll=self.mll, data_loader=self.data_loader, parameters=params, ) mock_fallback.assert_called_once_with( self.mll, None, None, closure="foo", optimizer=fit_gpytorch_mll_torch, ) # Test exception handling with self.assertRaisesRegex( UnsupportedError, "Only one of `data_loader` or `closure` may be passed." ): fit._fit_fallback_approximate( self.mll, None, None, closure=self.closure, data_loader=self.data_loader, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import warnings import torch from botorch.acquisition import ExpectedImprovement, qExpectedImprovement from botorch.exceptions.warnings import OptimizationWarning from botorch.fit import fit_gpytorch_mll from botorch.models import FixedNoiseGP, SingleTaskGP from botorch.optim import optimize_acqf from botorch.utils.testing import BotorchTestCase from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood EPS = 1e-8 NOISE = [ [0.127], [-0.113], [-0.345], [-0.034], [-0.069], [-0.272], [0.013], [0.056], [0.087], [-0.081], ] class TestEndToEnd(BotorchTestCase): def _setUp(self, double=False): dtype = torch.double if double else torch.float train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).view(-1, 1) train_y = torch.sin(train_x * (2 * math.pi)) train_yvar = torch.tensor(0.1**2, device=self.device, dtype=dtype) noise = torch.tensor(NOISE, device=self.device, dtype=dtype) self.train_x = train_x self.train_y = train_y + noise self.train_yvar = train_yvar self.bounds = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype) model_st = SingleTaskGP(self.train_x, self.train_y) self.model_st = model_st.to(device=self.device, dtype=dtype) self.mll_st = ExactMarginalLogLikelihood( self.model_st.likelihood, self.model_st ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=OptimizationWarning) self.mll_st = fit_gpytorch_mll( self.mll_st, optimizer_kwargs={"options": {"maxiter": 5}}, max_attempts=1, ) model_fn = FixedNoiseGP( self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y) ) self.model_fn = model_fn.to(device=self.device, dtype=dtype) self.mll_fn = ExactMarginalLogLikelihood( self.model_fn.likelihood, self.model_fn ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=OptimizationWarning) self.mll_fn = fit_gpytorch_mll( self.mll_fn, optimizer_kwargs={"options": {"maxiter": 5}}, max_attempts=1, ) def test_qEI(self): for double in (True, False): self._setUp(double=double) qEI = qExpectedImprovement(self.model_st, best_f=0.0) candidates, _ = optimize_acqf( acq_function=qEI, bounds=self.bounds, q=3, num_restarts=10, raw_samples=20, options={"maxiter": 5}, ) self.assertTrue(torch.all(-EPS <= candidates)) self.assertTrue(torch.all(candidates <= 1 + EPS)) qEI = qExpectedImprovement(self.model_fn, best_f=0.0) candidates, _ = optimize_acqf( acq_function=qEI, bounds=self.bounds, q=3, num_restarts=10, raw_samples=20, options={"maxiter": 5}, ) self.assertTrue(torch.all(-EPS <= candidates)) self.assertTrue(torch.all(candidates <= 1 + EPS)) candidates_batch_limit, _ = optimize_acqf( acq_function=qEI, bounds=self.bounds, q=3, num_restarts=10, raw_samples=20, options={"maxiter": 5, "batch_limit": 5}, ) self.assertTrue(torch.all(-EPS <= candidates_batch_limit)) self.assertTrue(torch.all(candidates_batch_limit <= 1 + EPS)) def test_EI(self): for double in (True, False): self._setUp(double=double) EI = ExpectedImprovement(self.model_st, best_f=0.0) candidates, _ = optimize_acqf( acq_function=EI, bounds=self.bounds, q=1, num_restarts=10, raw_samples=20, options={"maxiter": 5}, ) self.assertTrue(-EPS <= candidates <= 1 + EPS) EI = ExpectedImprovement(self.model_fn, best_f=0.0) candidates, _ = optimize_acqf( acq_function=EI, bounds=self.bounds, q=1, num_restarts=10, raw_samples=20, options={"maxiter": 5}, ) self.assertTrue(-EPS <= candidates <= 1 + EPS)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Monolithic CUDA tests. This implements a single monolithic test for all CUDA functionality. The main reason for doing this is that if individual tests are run in separate processes, the overhead of initializing the GPU can vastly outweight the speedup from parallelization, and, in addition, this can lead to the GPU running out of memory. """ import unittest from itertools import chain from pathlib import Path from typing import Union import torch from botorch.utils.testing import BotorchTestCase @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") class TestBotorchCUDA(unittest.TestCase): def test_cuda(self): test_dir = Path(__file__).parent.resolve() tests = unittest.TestLoader().discover(test_dir) self.assertTrue(run_cuda_tests(tests)) def run_cuda_tests(tests: Union[unittest.TestCase, unittest.TestSuite]) -> bool: """Function for running all tests on cuda (except TestBotorchCUDA itself)""" if isinstance(tests, BotorchTestCase): tests.device = torch.device("cuda") test_result = tests.run() if test_result is None: # some test runners may return None on skipped tests return True passed = test_result.wasSuccessful() if not passed: # print test name print(f"test: {tests}") for error in chain(test_result.errors, test_result.failures): # print traceback print(f"error: {error[1]}") return passed elif isinstance(tests, unittest.TestSuite): return all(run_cuda_tests(tests_) for tests_ in tests) elif ( isinstance(tests, unittest.TestCase) and tests.id() == "test_cuda.TestBotorchCUDA.test_cuda" ): # ignore TestBotorchCUDA return True elif isinstance(tests, unittest.loader._FailedTest): # test failed to load, often import error print(f"test: {tests}") print(f"exception: {tests._exception}") return False else: raise ValueError(f"Unexpected type for test: {tests}")
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings import gpytorch.settings as gp_settings import linear_operator.settings as linop_settings from botorch import settings from botorch.exceptions import BotorchWarning from botorch.utils.testing import BotorchTestCase class TestSettings(BotorchTestCase): def test_flags(self): for flag in (settings.debug, settings.propagate_grads): self.assertFalse(flag.on()) self.assertTrue(flag.off()) with flag(True): self.assertTrue(flag.on()) self.assertFalse(flag.off()) self.assertFalse(flag.on()) self.assertTrue(flag.off()) def test_debug(self): # Turn on debug. settings.debug._set_state(True) # Check that debug warnings are suppressed when it is turned off. with settings.debug(False): with warnings.catch_warnings(record=True) as ws: if settings.debug.on(): warnings.warn("test", BotorchWarning) self.assertEqual(len(ws), 0) # Check that warnings are not suppressed outside of context manager. with warnings.catch_warnings(record=True) as ws: if settings.debug.on(): warnings.warn("test", BotorchWarning) self.assertEqual(len(ws), 1) # Turn off debug. settings.debug._set_state(False) # Check that warnings are not suppressed within debug. with settings.debug(True): with warnings.catch_warnings(record=True) as ws: if settings.debug.on(): warnings.warn("test", BotorchWarning) self.assertEqual(len(ws), 1) # Check that warnings are suppressed outside of context manager. with warnings.catch_warnings(record=True) as ws: if settings.debug.on(): warnings.warn("test", BotorchWarning) self.assertEqual(len(ws), 0) class TestDefaultGPyTorchLinOpSettings(BotorchTestCase): def test_default_gpytorch_linop_settings(self): self.assertTrue(linop_settings._fast_covar_root_decomposition.off()) self.assertTrue(linop_settings._fast_log_prob.off()) self.assertTrue(linop_settings._fast_solves.off()) self.assertEqual(linop_settings.cholesky_max_tries.value(), 6) self.assertEqual(linop_settings.max_cholesky_size.value(), 4096) self.assertEqual(gp_settings.max_eager_kernel_size.value(), 4096)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from botorch.test_functions.multi_fidelity import ( AugmentedBranin, AugmentedHartmann, AugmentedRosenbrock, ) from botorch.utils.testing import ( BaseTestProblemTestCaseMixIn, BotorchTestCase, SyntheticTestFunctionTestCaseMixin, ) class TestAugmentedBranin( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ AugmentedBranin(), AugmentedBranin(negate=True), AugmentedBranin(noise_std=0.1), ] class TestAugmentedHartmann( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ AugmentedHartmann(), AugmentedHartmann(negate=True), AugmentedHartmann(noise_std=0.1), ] class TestAugmentedRosenbrock( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ AugmentedRosenbrock(), AugmentedRosenbrock(negate=True), AugmentedRosenbrock(noise_std=0.1), AugmentedRosenbrock(dim=4), AugmentedRosenbrock(dim=4, negate=True), AugmentedRosenbrock(dim=4, noise_std=0.1), ] def test_min_dimension(self): with self.assertRaises(ValueError): AugmentedRosenbrock(dim=2)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.test_functions.multi_objective_multi_fidelity import ( MOMFBraninCurrin, MOMFPark, ) from botorch.utils.testing import ( BaseTestProblemTestCaseMixIn, BotorchTestCase, MultiObjectiveTestProblemTestCaseMixin, ) class TestMOMFBraninCurrin( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): functions = [MOMFBraninCurrin()] bounds = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]] def test_init(self): for f in self.functions: self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, 3) self.assertTrue( torch.equal(f.bounds, torch.tensor(self.bounds).to(f.bounds)) ) class TestMOMFPark( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): functions = [MOMFPark()] bounds = [[0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]] def test_init(self): for f in self.functions: self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, 5) self.assertTrue( torch.equal(f.bounds, torch.tensor(self.bounds).to(f.bounds)) )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.exceptions.errors import InputDataError from botorch.test_functions.synthetic import ( Ackley, Beale, Branin, Bukin, Cosine8, DixonPrice, DropWave, EggHolder, Griewank, Hartmann, HolderTable, Levy, Michalewicz, Powell, PressureVessel, Rastrigin, Rosenbrock, Shekel, SixHumpCamel, SpeedReducer, StyblinskiTang, SyntheticTestFunction, TensionCompressionString, ThreeHumpCamel, WeldedBeamSO, ) from botorch.utils.testing import ( BaseTestProblemTestCaseMixIn, BotorchTestCase, ConstrainedTestProblemTestCaseMixin, SyntheticTestFunctionTestCaseMixin, ) from torch import Tensor class DummySyntheticTestFunction(SyntheticTestFunction): dim = 2 _bounds = [(-1, 1), (-1, 1)] _optimal_value = 0 def evaluate_true(self, X: Tensor) -> Tensor: return -X.pow(2).sum(dim=-1) class DummySyntheticTestFunctionWithOptimizers(DummySyntheticTestFunction): _optimizers = [(0, 0)] class TestCustomBounds(BotorchTestCase): functions_with_custom_bounds = [ # Function name and the default dimension. (Ackley, 2), (Beale, 2), (Branin, 2), (Bukin, 2), (Cosine8, 8), (DropWave, 2), (DixonPrice, 2), (EggHolder, 2), (Griewank, 2), (Hartmann, 6), (HolderTable, 2), (Levy, 2), (Michalewicz, 2), (Powell, 4), (Rastrigin, 2), (Rosenbrock, 2), (Shekel, 4), (SixHumpCamel, 2), (StyblinskiTang, 2), (ThreeHumpCamel, 2), ] def test_custom_bounds(self): with self.assertRaisesRegex( InputDataError, "Expected the bounds to match the dimensionality of the domain. ", ): DummySyntheticTestFunctionWithOptimizers(bounds=[(0, 0)]) with self.assertRaisesRegex( ValueError, "No global optimum found within custom bounds" ): DummySyntheticTestFunctionWithOptimizers(bounds=[(1, 2), (3, 4)]) dummy = DummySyntheticTestFunctionWithOptimizers(bounds=[(-2, 2), (-3, 3)]) self.assertEqual(dummy._bounds[0], (-2, 2)) self.assertEqual(dummy._bounds[1], (-3, 3)) self.assertAllClose( dummy.bounds, torch.tensor([[-2, -3], [2, 3]], dtype=torch.double) ) # Test each function with custom bounds. for func_class, dim in self.functions_with_custom_bounds: bounds = [(-1e5, 1e5) for _ in range(dim)] bounds_tensor = torch.tensor(bounds, dtype=torch.double).T func = func_class(bounds=bounds) self.assertEqual(func._bounds, bounds) self.assertAllClose(func.bounds, bounds_tensor) class TestAckley( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Ackley(), Ackley(negate=True), Ackley(noise_std=0.1), Ackley(dim=3)] class TestBeale( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Beale(), Beale(negate=True), Beale(noise_std=0.1)] class TestBranin( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Branin(), Branin(negate=True), Branin(noise_std=0.1)] class TestBukin( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Bukin(), Bukin(negate=True), Bukin(noise_std=0.1)] class TestCosine8( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Cosine8(), Cosine8(negate=True), Cosine8(noise_std=0.1)] class TestDropWave( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [DropWave(), DropWave(negate=True), DropWave(noise_std=0.1)] class TestDixonPrice( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ DixonPrice(), DixonPrice(negate=True), DixonPrice(noise_std=0.1), DixonPrice(dim=3), ] class TestEggHolder( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [EggHolder(), EggHolder(negate=True), EggHolder(noise_std=0.1)] class TestGriewank( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ Griewank(), Griewank(negate=True), Griewank(noise_std=0.1), Griewank(dim=4), ] class TestHartmann( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ Hartmann(), Hartmann(negate=True), Hartmann(noise_std=0.1), Hartmann(dim=3), Hartmann(dim=3, negate=True), Hartmann(dim=3, noise_std=0.1), Hartmann(dim=4), Hartmann(dim=4, negate=True), Hartmann(dim=4, noise_std=0.1), ] def test_dimension(self): with self.assertRaises(ValueError): Hartmann(dim=2) class TestHolderTable( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [HolderTable(), HolderTable(negate=True), HolderTable(noise_std=0.1)] class TestLevy( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ Levy(), Levy(negate=True), Levy(noise_std=0.1), Levy(dim=3), Levy(dim=3, negate=True), Levy(dim=3, noise_std=0.1), ] class TestMichalewicz( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ Michalewicz(), Michalewicz(negate=True), Michalewicz(noise_std=0.1), Michalewicz(dim=5), Michalewicz(dim=5, negate=True), Michalewicz(dim=5, noise_std=0.1), Michalewicz(dim=10), Michalewicz(dim=10, negate=True), Michalewicz(dim=10, noise_std=0.1), ] class TestPowell( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Powell(), Powell(negate=True), Powell(noise_std=0.1)] class TestRastrigin( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ Rastrigin(), Rastrigin(negate=True), Rastrigin(noise_std=0.1), Rastrigin(dim=3), Rastrigin(dim=3, negate=True), Rastrigin(dim=3, noise_std=0.1), ] class TestRosenbrock( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ Rosenbrock(), Rosenbrock(negate=True), Rosenbrock(noise_std=0.1), Rosenbrock(dim=3), Rosenbrock(dim=3, negate=True), Rosenbrock(dim=3, noise_std=0.1), ] class TestShekel( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [Shekel(), Shekel(negate=True), Shekel(noise_std=0.1)] class TestSixHumpCamel( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [SixHumpCamel(), SixHumpCamel(negate=True), SixHumpCamel(noise_std=0.1)] class TestStyblinskiTang( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ StyblinskiTang(), StyblinskiTang(negate=True), StyblinskiTang(noise_std=0.1), StyblinskiTang(dim=3), StyblinskiTang(dim=3, negate=True), StyblinskiTang(dim=3, noise_std=0.1), ] class TestThreeHumpCamel( BotorchTestCase, BaseTestProblemTestCaseMixIn, SyntheticTestFunctionTestCaseMixin ): functions = [ ThreeHumpCamel(), ThreeHumpCamel(negate=True), ThreeHumpCamel(noise_std=0.1), ] # ------------------ Constrained synthetic test problems ------------------ # class TestPressureVessel( BotorchTestCase, BaseTestProblemTestCaseMixIn, ConstrainedTestProblemTestCaseMixin, ): functions = [PressureVessel()] class TestSpeedReducer( BotorchTestCase, BaseTestProblemTestCaseMixIn, ConstrainedTestProblemTestCaseMixin, ): functions = [SpeedReducer()] class TestTensionCompressionString( BotorchTestCase, BaseTestProblemTestCaseMixIn, ConstrainedTestProblemTestCaseMixin, ): functions = [TensionCompressionString()] class TestWeldedBeamSO( BotorchTestCase, BaseTestProblemTestCaseMixIn, ConstrainedTestProblemTestCaseMixin, ): functions = [WeldedBeamSO()]
#! /usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import List import torch from botorch.exceptions.errors import UnsupportedError from botorch.test_functions.base import BaseTestProblem from botorch.test_functions.multi_objective import ( BNH, BraninCurrin, C2DTLZ2, CarSideImpact, CONSTR, ConstrainedBraninCurrin, DH1, DH2, DH3, DH4, DiscBrake, DTLZ1, DTLZ2, DTLZ3, DTLZ4, DTLZ5, DTLZ7, GMM, MultiObjectiveTestProblem, MW7, OSY, Penicillin, SRN, ToyRobust, VehicleSafety, WeldedBeam, ZDT1, ZDT2, ZDT3, ) from botorch.utils.testing import ( BaseTestProblemTestCaseMixIn, BotorchTestCase, ConstrainedTestProblemTestCaseMixin, MultiObjectiveTestProblemTestCaseMixin, ) class DummyMOProblem(MultiObjectiveTestProblem): _ref_point = [0.0, 0.0] _num_objectives = 2 _bounds = [(0.0, 1.0)] * 2 dim = 2 def evaluate_true(self, X): f_X = X + 2 return -f_X if self.negate else f_X class TestBaseTestMultiObjectiveProblem(BotorchTestCase): def test_base_mo_problem(self): for negate in (True, False): for noise_std in (None, 1.0): f = DummyMOProblem(noise_std=noise_std, negate=negate) self.assertEqual(f.noise_std, noise_std) self.assertEqual(f.negate, negate) for dtype in (torch.float, torch.double): f.to(dtype=dtype, device=self.device) X = torch.rand(3, 2, dtype=dtype, device=self.device) f_X = f.evaluate_true(X) expected_f_X = -(X + 2) if negate else X + 2 self.assertTrue(torch.equal(f_X, expected_f_X)) with self.assertRaises(NotImplementedError): f.gen_pareto_front(1) class TestBraninCurrin( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [BraninCurrin()] def test_init(self): for f in self.functions: self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, 2) class TestDH( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): def setUp(self, suppress_input_warnings: bool = True) -> None: super().setUp(suppress_input_warnings=suppress_input_warnings) self.dims = [2, 3, 4, 5] self.bounds = [ [[0.0, -1], [1, 1]], [[0.0, -1, -1], [1, 1, 1]], [[0.0, 0, -1, -1], [1, 1, 1, 1]], [[0.0, -0.15, -1, -1, -1], [1, 1, 1, 1, 1]], ] self.expected = [ [[0.0, 1.0], [1.0, 1.0 / 1.2 + 1.0]], [[0.0, 1.0], [1.0, 2.0 / 1.2 + 20.0]], [[0.0, 1.88731], [1.0, 1.9990726 * 100]], [[0.0, 1.88731], [1.0, 150.0]], ] @property def functions(self) -> List[BaseTestProblem]: return [DH1(dim=2), DH2(dim=3), DH3(dim=4), DH4(dim=5)] def test_init(self): for i, f in enumerate(self.functions): with self.assertRaises(ValueError): f.__class__(dim=1) self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, self.dims[i]) self.assertTrue( torch.equal( f.bounds, torch.tensor( self.bounds[i], dtype=f.bounds.dtype, device=f.bounds.device ), ) ) def test_function_values(self): for i, f in enumerate(self.functions): test_X = torch.zeros(2, self.dims[i], device=self.device) test_X[1] = 1.0 actual = f(test_X) expected = torch.tensor(self.expected[i], device=self.device) self.assertAllClose(actual, expected) class TestDTLZ( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [ DTLZ1(dim=5, num_objectives=2), DTLZ2(dim=5, num_objectives=2), DTLZ3(dim=5, num_objectives=2), DTLZ4(dim=5, num_objectives=2), DTLZ5(dim=5, num_objectives=2), DTLZ7(dim=5, num_objectives=2), ] def test_init(self): for f in self.functions: with self.assertRaises(ValueError): f.__class__(dim=1, num_objectives=2) self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, 5) self.assertEqual(f.k, 4) def test_gen_pareto_front(self): for dtype in (torch.float, torch.double): for f in self.functions: for negate in (True, False): f.negate = negate f = f.to(dtype=dtype, device=self.device) if isinstance(f, (DTLZ5, DTLZ7)): with self.assertRaises(NotImplementedError): f.gen_pareto_front(n=1) else: pareto_f = f.gen_pareto_front(n=10) if negate: pareto_f *= -1 self.assertEqual(pareto_f.dtype, dtype) self.assertEqual(pareto_f.device.type, self.device.type) self.assertTrue((pareto_f > 0).all()) if isinstance(f, DTLZ1): # assert is the hyperplane sum_i (f(x_i)) = 0.5 self.assertTrue( torch.allclose( pareto_f.sum(dim=-1), torch.full( pareto_f.shape[0:1], 0.5, dtype=dtype, device=self.device, ), ) ) elif isinstance(f, (DTLZ2, DTLZ3, DTLZ4)): # assert the points lie on the surface # of the unit hypersphere self.assertTrue( torch.allclose( pareto_f.pow(2).sum(dim=-1), torch.ones( pareto_f.shape[0], dtype=dtype, device=self.device, ), ) ) class TestGMM( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [GMM(num_objectives=4)] def test_init(self): f = self.functions[0] with self.assertRaises(UnsupportedError): f.__class__(num_objectives=5) self.assertEqual(f.num_objectives, 4) self.assertEqual(f.dim, 2) def test_result(self): x = torch.tensor( [ [[0.0342, 0.8055], [0.7844, 0.4831]], [[0.5236, 0.3158], [0.0992, 0.9873]], [[0.4693, 0.5792], [0.5357, 0.9451]], ], device=self.device, ) expected_f_x = -torch.tensor( [ [ [3.6357e-03, 5.9030e-03, 5.8958e-03, 1.0309e-04], [1.6304e-02, 3.1430e-04, 4.7323e-04, 2.0691e-04], ], [ [1.2251e-01, 3.2309e-02, 3.7199e-02, 5.4211e-03], [1.9378e-04, 1.5290e-03, 3.5051e-04, 3.6924e-07], ], [ [3.5550e-01, 5.9409e-02, 1.7352e-01, 8.5574e-02], [3.2686e-02, 9.7298e-02, 7.2311e-02, 1.5613e-03], ], ], device=self.device, ) f = self.functions[0] f.to(device=self.device) for dtype in (torch.float, torch.double): f.to(dtype=dtype) f_x = f(x.to(dtype=dtype)) self.assertTrue( torch.allclose(f_x, expected_f_x.to(dtype=dtype), rtol=1e-4, atol=1e-4) ) class TestMW7( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [MW7(dim=3)] def test_init(self): for f in self.functions: with self.assertRaises(ValueError): f.__class__(dim=1) self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, 3) class TestZDT( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [ ZDT1(dim=3, num_objectives=2), ZDT2(dim=3, num_objectives=2), ZDT3(dim=3, num_objectives=2), ] def test_init(self): for f in self.functions: with self.assertRaises(NotImplementedError): f.__class__(dim=3, num_objectives=3) with self.assertRaises(NotImplementedError): f.__class__(dim=3, num_objectives=1) with self.assertRaises(ValueError): f.__class__(dim=1, num_objectives=2) self.assertEqual(f.num_objectives, 2) self.assertEqual(f.dim, 3) def test_gen_pareto_front(self): for dtype in (torch.float, torch.double): for f in self.functions: for negate in (True, False): f.negate = negate f = f.to(dtype=dtype, device=self.device) pareto_f = f.gen_pareto_front(n=11) if negate: pareto_f *= -1 self.assertEqual(pareto_f.dtype, dtype) self.assertEqual(pareto_f.device.type, self.device.type) if isinstance(f, ZDT1): self.assertTrue( torch.equal(pareto_f[:, 1], 1 - pareto_f[:, 0].sqrt()) ) elif isinstance(f, ZDT2): self.assertTrue( torch.equal(pareto_f[:, 1], 1 - pareto_f[:, 0].pow(2)) ) elif isinstance(f, ZDT3): f_0 = pareto_f[:, 0] f_1 = pareto_f[:, 1] # check f_0 is in the expected discontinuous part of the pareto # front self.assertTrue( ( (f_0[:3] >= f._parts[0][0]) & (f_0[:3] <= f._parts[0][1]) ).all() ) for i in range(0, 4): f_0_i = f_0[3 + 2 * i : 3 + 2 * (i + 1)] comparison = f_0_i > torch.tensor( f._parts[i + 1], dtype=dtype, device=self.device ) self.assertTrue((comparison[..., 0]).all()) self.assertTrue((~comparison[..., 1]).all()) self.assertTrue( ((comparison[..., 0]) & (~comparison[..., 1])).all() ) # check f_1 self.assertTrue( torch.equal( f_1, 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0), ) ) # ------------------ Unconstrained Multi-objective test problems ------------------ # class TestCarSideImpact( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [CarSideImpact()] class TestPenicillin( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [Penicillin()] class TestToyRobust( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [ToyRobust()] class TestVehicleSafety( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [VehicleSafety()] # ------------------ Constrained Multi-objective test problems ------------------ # class TestBNH( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [BNH()] class TestSRN( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [SRN()] class TestCONSTR( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [CONSTR()] class TestConstrainedBraninCurrin( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [ConstrainedBraninCurrin()] class TestC2DTLZ2( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [C2DTLZ2(dim=3, num_objectives=2)] def test_batch_exception(self): f = C2DTLZ2(dim=3, num_objectives=2) with self.assertRaises(NotImplementedError): f.evaluate_slack_true(torch.empty(1, 1, 3)) class TestDiscBrake( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [DiscBrake()] class TestWeldedBeam( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [WeldedBeam()] class TestOSY( BotorchTestCase, BaseTestProblemTestCaseMixIn, MultiObjectiveTestProblemTestCaseMixin, ConstrainedTestProblemTestCaseMixin, ): @property def functions(self) -> List[BaseTestProblem]: return [OSY()]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.test_functions.sensitivity_analysis import Gsobol, Ishigami, Morris from botorch.utils.testing import BotorchTestCase class TestIshigami(BotorchTestCase): def testFunction(self): with self.assertRaises(ValueError): Ishigami(b=0.33) f = Ishigami(b=0.1) self.assertEqual(f.b, 0.1) f = Ishigami(b=0.05) self.assertEqual(f.b, 0.05) X = torch.tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]) m1, m2, m3 = f.compute_dgsm(X) for m in [m1, m2, m3]: self.assertEqual(len(m), 3) Z = f.evaluate_true(X) Ztrue = torch.tensor([5.8401, 7.4245]) self.assertAllClose(Z, Ztrue, atol=1e-3) self.assertIsNone(f._optimizers) with self.assertRaises(NotImplementedError): f.optimal_value class TestGsobol(BotorchTestCase): def testFunction(self): for dim in [6, 8, 15]: f = Gsobol(dim=dim) self.assertIsNotNone(f.a) self.assertEqual(len(f.a), dim) f = Gsobol(dim=3, a=[1, 2, 3]) self.assertEqual(f.a, [1, 2, 3]) X = torch.tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]) Z = f.evaluate_true(X) Ztrue = torch.tensor([2.5, 21.0]) self.assertAllClose(Z, Ztrue, atol=1e-3) self.assertIsNone(f._optimizers) with self.assertRaises(NotImplementedError): f.optimal_value class TestMorris(BotorchTestCase): def testFunction(self): f = Morris() X = torch.stack((torch.zeros(20), torch.ones(20))) Z = f.evaluate_true(X) Ztrue = torch.tensor([5163.0, -8137.0]) self.assertAllClose(Z, Ztrue, atol=1e-3) self.assertIsNone(f._optimizers) with self.assertRaises(NotImplementedError): f.optimal_value
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.test_functions.base import BaseTestProblem, ConstrainedBaseTestProblem from botorch.utils.testing import BotorchTestCase from torch import Tensor class DummyTestProblem(BaseTestProblem): dim = 2 _bounds = [(0, 1), (2, 3)] def evaluate_true(self, X: Tensor) -> Tensor: return -X.pow(2).sum(dim=-1) class DummyConstrainedTestProblem(DummyTestProblem, ConstrainedBaseTestProblem): num_constraints = 1 def evaluate_slack_true(self, X: Tensor) -> Tensor: return 0.25 - X.sum(dim=-1, keepdim=True) class TestBaseTestProblems(BotorchTestCase): def test_base_test_problem(self): for dtype in (torch.float, torch.double): problem = DummyTestProblem() self.assertIsNone(problem.noise_std) self.assertFalse(problem.negate) bnds_expected = torch.tensor([(0, 2), (1, 3)], dtype=torch.float) self.assertTrue(torch.equal(problem.bounds, bnds_expected)) problem = problem.to(device=self.device, dtype=dtype) bnds_expected = bnds_expected.to(device=self.device, dtype=dtype) self.assertTrue(torch.equal(problem.bounds, bnds_expected)) X = torch.rand(2, 2, device=self.device, dtype=dtype) Y = problem(X) self.assertAllClose(Y, -X.pow(2).sum(dim=-1)) problem = DummyTestProblem(negate=True, noise_std=0.1) self.assertEqual(problem.noise_std, 0.1) self.assertTrue(problem.negate) def test_constrained_base_test_problem(self): for dtype in (torch.float, torch.double): problem = DummyConstrainedTestProblem().to(device=self.device, dtype=dtype) X = torch.tensor([[0.4, 0.6], [0.1, 0.1]]) feas = problem.is_feasible(X=X) self.assertFalse(feas[0].item()) self.assertTrue(feas[1].item()) problem = DummyConstrainedTestProblem(noise_std=0.0).to( device=self.device, dtype=dtype ) feas = problem.is_feasible(X=X) self.assertFalse(feas[0].item()) self.assertTrue(feas[1].item())
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List import torch from botorch.acquisition import LinearMCObjective, ScalarizedPosteriorTransform from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.analytic import ExpectedImprovement from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.acquisition.proximal import ProximalAcquisitionFunction from botorch.exceptions.errors import UnsupportedError from botorch.models import ModelListGP, SingleTaskGP from botorch.models.gpytorch import GPyTorchModel from botorch.models.model import Model from botorch.models.transforms.input import Normalize from botorch.utils.testing import BotorchTestCase from torch.distributions.multivariate_normal import MultivariateNormal class DummyModel(GPyTorchModel): num_outputs = 1 def __init__(self): # noqa: D107 super(GPyTorchModel, self).__init__() def subset_output(self, idcs: List[int]) -> Model: pass class DummyAcquisitionFunction(AcquisitionFunction): def forward(self, X): pass class NegativeAcquisitionFunction(AcquisitionFunction): def forward(self, X): return torch.ones(*X.shape[:-1]) * -1.0 class TestProximalAcquisitionFunction(BotorchTestCase): def test_proximal(self): for dtype in (torch.float, torch.double): # test single point evaluation with and without input transform normalize = Normalize( 3, bounds=torch.tensor(((0.0, 0.0, 0.0), (2.0, 2.0, 2.0))) ) for input_transform, x_scale in [(None, 1), (normalize, 2)]: train_X = torch.rand(5, 3, device=self.device, dtype=dtype) * x_scale train_Y = train_X.norm(dim=-1, keepdim=True) # test with and without transformed weights for transformed_weighting in [True, False]: # test with single outcome model model = SingleTaskGP( train_X, train_Y, input_transform=input_transform ) model = model.to(device=self.device, dtype=dtype).eval() EI = ExpectedImprovement(model, best_f=0.0) proximal_weights = torch.ones(3, device=self.device, dtype=dtype) last_X = train_X[-1] test_X = torch.rand(1, 3, device=self.device, dtype=dtype) EI_prox = ProximalAcquisitionFunction( EI, proximal_weights=proximal_weights, transformed_weighting=transformed_weighting, ) # softplus transformed value of the acquisition function ei = EI(test_X) # modify last_X/test_X depending on transformed_weighting proximal_test_X = test_X.clone() if transformed_weighting: if input_transform is not None: last_X = input_transform(train_X[-1]) proximal_test_X = input_transform(test_X) mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights)) test_prox_weight = torch.exp( mv_normal.log_prob(proximal_test_X) ) / torch.exp(mv_normal.log_prob(last_X)) ei_prox = EI_prox(test_X) self.assertAllClose(ei_prox, ei * test_prox_weight) self.assertEqual(ei_prox.shape, torch.Size([1])) # test with beta specified EI_prox_beta = ProximalAcquisitionFunction( EI, proximal_weights=proximal_weights, transformed_weighting=transformed_weighting, beta=1.0, ) # SoftPlus transformed value of the acquisition function ei = torch.nn.functional.softplus(EI(test_X), beta=1.0) # modify last_X/test_X depending on transformed_weighting proximal_test_X = test_X.clone() if transformed_weighting: if input_transform is not None: last_X = input_transform(train_X[-1]) proximal_test_X = input_transform(test_X) mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights)) test_prox_weight = torch.exp( mv_normal.log_prob(proximal_test_X) - mv_normal.log_prob(last_X) ) ei_prox_beta = EI_prox_beta(test_X) self.assertAllClose(ei_prox_beta, ei * test_prox_weight) self.assertEqual(ei_prox_beta.shape, torch.Size([1])) # test t-batch with broadcasting test_X = torch.rand(4, 1, 3, device=self.device, dtype=dtype) proximal_test_X = test_X.clone() if transformed_weighting: if input_transform is not None: last_X = input_transform(train_X[-1]) proximal_test_X = input_transform(test_X) ei = EI(test_X) mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights)) test_prox_weight = torch.exp( mv_normal.log_prob(proximal_test_X) ) / torch.exp(mv_normal.log_prob(last_X)) ei_prox = EI_prox(test_X) self.assertTrue( torch.allclose(ei_prox, ei * test_prox_weight.flatten()) ) self.assertEqual(ei_prox.shape, torch.Size([4])) # test q-based MC acquisition function qEI = qExpectedImprovement(model, best_f=0.0) test_X = torch.rand(4, 1, 3, device=self.device, dtype=dtype) proximal_test_X = test_X.clone() if transformed_weighting: if input_transform is not None: last_X = input_transform(train_X[-1]) proximal_test_X = input_transform(test_X) qEI_prox = ProximalAcquisitionFunction( qEI, proximal_weights=proximal_weights, transformed_weighting=transformed_weighting, ) qei = qEI(test_X) mv_normal = MultivariateNormal(last_X, torch.diag(proximal_weights)) test_prox_weight = torch.exp( mv_normal.log_prob(proximal_test_X) ) / torch.exp(mv_normal.log_prob(last_X)) qei_prox = qEI_prox(test_X) self.assertTrue( torch.allclose(qei_prox, qei * test_prox_weight.flatten()) ) self.assertEqual(qei_prox.shape, torch.Size([4])) # test acquisition function with # negative values w/o SoftPlus transform specified negative_acqf = NegativeAcquisitionFunction(model) bad_neg_prox = ProximalAcquisitionFunction( negative_acqf, proximal_weights=proximal_weights ) with self.assertRaisesRegex( RuntimeError, "Cannot use proximal biasing for negative" ): bad_neg_prox(test_X) # test gradient test_X = torch.rand( 1, 3, device=self.device, dtype=dtype, requires_grad=True ) ei_prox = EI_prox(test_X) ei_prox.backward() # test model without train_inputs bad_model = DummyModel() with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction( ExpectedImprovement(bad_model, 0.0), proximal_weights ) # test proximal weights that do not match training_inputs train_X = torch.rand(5, 1, 3, device=self.device, dtype=dtype) train_Y = train_X.norm(dim=-1, keepdim=True) model = SingleTaskGP(train_X, train_Y).to(device=self.device).eval() with self.assertRaises(ValueError): ProximalAcquisitionFunction( ExpectedImprovement(model, 0.0), proximal_weights[:1] ) with self.assertRaises(ValueError): ProximalAcquisitionFunction( ExpectedImprovement(model, 0.0), torch.rand(3, 3, device=self.device, dtype=dtype), ) # test for x_pending points pending_acq = DummyAcquisitionFunction(model) pending_acq.set_X_pending(torch.rand(3, 3, device=self.device, dtype=dtype)) with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction(pending_acq, proximal_weights) # test model with multi-batch training inputs train_X = torch.rand(5, 2, 3, device=self.device, dtype=dtype) train_Y = train_X.norm(dim=-1, keepdim=True) bad_single_task = ( SingleTaskGP(train_X, train_Y).to(device=self.device).eval() ) with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction( ExpectedImprovement(bad_single_task, 0.0), proximal_weights ) # test a multi-output SingleTaskGP model train_X = torch.rand(5, 3, device=self.device, dtype=dtype) train_Y = torch.rand(5, 2, device=self.device, dtype=dtype) multi_output_model = SingleTaskGP(train_X, train_Y).to(device=self.device) ptransform = ScalarizedPosteriorTransform( weights=torch.ones(2, dtype=dtype, device=self.device) ) ei = ExpectedImprovement( multi_output_model, 0.0, posterior_transform=ptransform ) acq = ProximalAcquisitionFunction(ei, proximal_weights) acq(test_X) def test_proximal_model_list(self): for dtype in (torch.float, torch.double): proximal_weights = torch.ones(3, device=self.device, dtype=dtype) # test with model-list model for complex objective optimization train_X = torch.rand(5, 3, device=self.device, dtype=dtype) train_Y = train_X.norm(dim=-1, keepdim=True) gp = SingleTaskGP(train_X, train_Y).to(device=self.device) model = ModelListGP(gp, gp) scalarized_posterior_transform = ScalarizedPosteriorTransform( torch.ones(2, device=self.device, dtype=dtype) ) mc_linear_objective = LinearMCObjective( torch.ones(2, device=self.device, dtype=dtype) ) EI = ExpectedImprovement( model, best_f=0.0, posterior_transform=scalarized_posterior_transform ) test_X = torch.rand(1, 3, device=self.device, dtype=dtype) EI_prox = ProximalAcquisitionFunction(EI, proximal_weights=proximal_weights) ei = EI(test_X) mv_normal = MultivariateNormal(train_X[-1], torch.diag(proximal_weights)) test_prox_weight = torch.exp(mv_normal.log_prob(test_X)) / torch.exp( mv_normal.log_prob(train_X[-1]) ) # test calculation ei_prox = EI_prox(test_X) self.assertAllClose(ei_prox, ei * test_prox_weight) self.assertEqual(ei_prox.shape, torch.Size([1])) # test MC acquisition function qEI = qExpectedImprovement(model, best_f=0.0, objective=mc_linear_objective) test_X = torch.rand(4, 1, 3, device=self.device, dtype=dtype) qEI_prox = ProximalAcquisitionFunction( qEI, proximal_weights=proximal_weights ) qei = qEI(test_X) mv_normal = MultivariateNormal(train_X[-1], torch.diag(proximal_weights)) test_prox_weight = torch.exp(mv_normal.log_prob(test_X)) / torch.exp( mv_normal.log_prob(train_X[-1]) ) qei_prox = qEI_prox(test_X) self.assertAllClose(qei_prox, qei * test_prox_weight.flatten()) self.assertEqual(qei_prox.shape, torch.Size([4])) # test gradient test_X = torch.rand( 1, 3, device=self.device, dtype=dtype, requires_grad=True ) ei_prox = EI_prox(test_X) ei_prox.backward() # test proximal weights that do not match training_inputs expected_err_msg = ( "`proximal_weights` must be a one dimensional tensor with " "same feature dimension as model." ) with self.assertRaisesRegex(ValueError, expected_err_msg): ProximalAcquisitionFunction( ExpectedImprovement( model, 0.0, posterior_transform=scalarized_posterior_transform ), proximal_weights[:1], ) with self.assertRaisesRegex(ValueError, expected_err_msg): ProximalAcquisitionFunction( ExpectedImprovement( model, 0.0, posterior_transform=scalarized_posterior_transform ), torch.rand(3, 3, device=self.device, dtype=dtype), ) # test for x_pending points pending_acq = DummyAcquisitionFunction(model) pending_acq.set_X_pending(torch.rand(3, 3, device=self.device, dtype=dtype)) with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction(pending_acq, proximal_weights) # test model with multi-batch training inputs train_X = torch.rand(5, 2, 3, device=self.device, dtype=dtype) train_Y = train_X.norm(dim=-1, keepdim=True) bad_model = ModelListGP( SingleTaskGP(train_X, train_Y).to(device=self.device), SingleTaskGP(train_X, train_Y).to(device=self.device), ) with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction( ExpectedImprovement( bad_model, 0.0, posterior_transform=scalarized_posterior_transform, ), proximal_weights, ) # try using unequal training sets train_X = torch.rand(5, 3, device=self.device, dtype=dtype) train_Y = train_X.norm(dim=-1, keepdim=True) bad_model = ModelListGP( SingleTaskGP(train_X[:-1], train_Y[:-1]).to(device=self.device), SingleTaskGP(train_X, train_Y).to(device=self.device), ) with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction( ExpectedImprovement( bad_model, 0.0, posterior_transform=scalarized_posterior_transform, ), proximal_weights, ) # try with unequal input transforms train_X = torch.rand(5, 3, device=self.device, dtype=dtype) train_Y = train_X.norm(dim=-1, keepdim=True) bad_model = ModelListGP( SingleTaskGP(train_X, train_Y, input_transform=Normalize(3)).to( device=self.device ), SingleTaskGP(train_X, train_Y).to(device=self.device), ) with self.assertRaises(UnsupportedError): ProximalAcquisitionFunction( ExpectedImprovement( bad_model, 0.0, posterior_transform=scalarized_posterior_transform, ), proximal_weights, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import product import torch from botorch.acquisition.predictive_entropy_search import qPredictiveEntropySearch from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.outcome import Standardize from botorch.utils.testing import BotorchTestCase def get_model(train_X, train_Y, use_model_list, standardize_model): num_objectives = train_Y.shape[-1] if standardize_model: if use_model_list: outcome_transform = Standardize(m=1) else: outcome_transform = Standardize(m=num_objectives) else: outcome_transform = None if use_model_list: model = ModelListGP( *[ SingleTaskGP( train_X=train_X, train_Y=train_Y[:, i : i + 1], outcome_transform=outcome_transform, ) for i in range(num_objectives) ] ) else: model = SingleTaskGP( train_X=train_X, train_Y=train_Y, outcome_transform=outcome_transform, ) return model class TestQPredictiveEntropySearch(BotorchTestCase): def test_predictive_entropy_search(self): torch.manual_seed(1) tkwargs = {"device": self.device} num_objectives = 1 for (dtype, use_model_list, standardize_model, maximize,) in product( (torch.float, torch.double), (False, True), (False, True), (False, True), ): tkwargs["dtype"] = dtype input_dim = 2 train_X = torch.rand(4, input_dim, **tkwargs) train_Y = torch.rand(4, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) num_samples = 20 optimal_inputs = torch.rand(num_samples, input_dim, **tkwargs) # test acquisition X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)] for i in range(len(X_pending_list)): X_pending = X_pending_list[i] acq = qPredictiveEntropySearch( model=model, optimal_inputs=optimal_inputs, maximize=maximize, X_pending=X_pending, ) test_Xs = [ torch.rand(4, 1, input_dim, **tkwargs), torch.rand(4, 3, input_dim, **tkwargs), torch.rand(4, 5, 1, input_dim, **tkwargs), torch.rand(4, 5, 3, input_dim, **tkwargs), ] for j in range(len(test_Xs)): acq_X = acq(test_Xs[j]) # assess shape self.assertTrue(acq_X.shape == test_Xs[j].shape[:-2])
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools from unittest import mock import torch from botorch.acquisition.objective import GenericMCObjective from botorch.acquisition.utils import ( compute_best_feasible_objective, expand_trace_observations, get_acquisition_function, get_infeasible_cost, get_optimal_samples, project_to_sample_points, project_to_target_fidelity, prune_inferior_points, ) from botorch.exceptions.errors import DeprecationError, UnsupportedError from botorch.models import SingleTaskGP from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior class TestGetAcquisitionFunctionDeprecation(BotorchTestCase): def test_get_acquisition_function_deprecation(self): msg = ( "`get_acquisition_function` has been moved to" " `botorch.acquisition.factory`." ) with self.assertRaisesRegex(DeprecationError, msg): get_acquisition_function() class TestConstraintUtils(BotorchTestCase): def test_compute_best_feasible_objective(self): for dtype in (torch.float, torch.double): with self.subTest(dtype=dtype): tkwargs = {"dtype": dtype, "device": self.device} n = 5 X = torch.arange(n, **tkwargs).view(-1, 1) for batch_shape, sample_shape in itertools.product( (torch.Size([]), torch.Size([2])), (torch.Size([1]), torch.Size([3])), ): means = torch.arange(n, **tkwargs).view(-1, 1) if len(batch_shape) > 0: view_means = means.view(1, *means.shape) means = view_means.expand(batch_shape + means.shape) if sample_shape[0] == 1: samples = means.unsqueeze(0) else: samples = torch.stack([means, means + 1, means + 4], dim=0) variances = torch.tensor( [0.09, 0.25, 0.36, 0.25, 0.09], **tkwargs ).view(-1, 1) mm = MockModel(MockPosterior(mean=means, variance=variances)) # testing all feasible points obj = samples.squeeze(-1) constraints = [lambda samples: -torch.ones_like(samples[..., 0])] best_f = compute_best_feasible_objective( samples=samples, obj=obj, constraints=constraints ) self.assertAllClose(best_f, obj.amax(dim=-1, keepdim=True)) # testing with some infeasible points con_cutoff = 3.0 best_f = compute_best_feasible_objective( samples=samples, obj=obj, constraints=[ lambda samples: samples[..., 0] - (con_cutoff + 1 / 2) ], model=mm, X_baseline=X, ) if sample_shape[0] == 3: # under some samples, all baseline points are infeasible, so # the best_f is set to the negative infeasible cost for # for samples where no point is feasible expected_best_f = torch.tensor( [ 3.0, 3.0, -get_infeasible_cost( X=X, model=mm, ).item(), ], **tkwargs, ).view(-1, 1) if len(batch_shape) > 0: expected_best_f = expected_best_f.unsqueeze(1) expected_best_f = expected_best_f.expand( *sample_shape, *batch_shape, 1 ) else: expected_best_f = torch.full( sample_shape + batch_shape + torch.Size([1]), con_cutoff, **tkwargs, ) self.assertAllClose(best_f, expected_best_f) # test some feasible points with infeasible obi if sample_shape[0] == 3: best_f = compute_best_feasible_objective( samples=samples, obj=obj, constraints=[ lambda samples: samples[..., 0] - (con_cutoff + 1 / 2) ], infeasible_obj=torch.ones(1, **tkwargs), ) expected_best_f[-1] = 1 self.assertAllClose(best_f, expected_best_f) # testing with no feasible points and infeasible obj infeasible_obj = torch.tensor(torch.pi, **tkwargs) expected_best_f = torch.full( sample_shape + batch_shape + torch.Size([1]), torch.pi, **tkwargs, ) best_f = compute_best_feasible_objective( samples=samples, obj=obj, constraints=[lambda X: torch.ones_like(X[..., 0])], infeasible_obj=infeasible_obj, ) self.assertAllClose(best_f, expected_best_f) # testing with no feasible points and not infeasible obj def objective(Y, X): return Y.squeeze(-1) - 5.0 best_f = compute_best_feasible_objective( samples=samples, obj=obj, constraints=[lambda X: torch.ones_like(X[..., 0])], model=mm, X_baseline=X, objective=objective, ) expected_best_f = torch.full( sample_shape + batch_shape + torch.Size([1]), -get_infeasible_cost(X=X, model=mm, objective=objective).item(), **tkwargs, ) self.assertAllClose(best_f, expected_best_f) with self.assertRaisesRegex(ValueError, "Must specify `model`"): best_f = compute_best_feasible_objective( samples=means, obj=obj, constraints=[lambda X: torch.ones_like(X[..., 0])], X_baseline=X, ) with self.assertRaisesRegex( ValueError, "Must specify `X_baseline`" ): best_f = compute_best_feasible_objective( samples=means, obj=obj, constraints=[lambda X: torch.ones_like(X[..., 0])], model=mm, ) def test_get_infeasible_cost(self): for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} X = torch.ones(5, 1, **tkwargs) means = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], **tkwargs).view(-1, 1) variances = torch.tensor([0.09, 0.25, 0.36, 0.25, 0.09], **tkwargs).view( -1, 1 ) mm = MockModel(MockPosterior(mean=means, variance=variances)) # means - 6 * std = [-0.8, -1, -0.6, 1, 3.2]. After applying the # objective, the minimum becomes -6.0, so 6.0 should be returned. M = get_infeasible_cost( X=X, model=mm, objective=lambda Y, X: Y.squeeze(-1) - 5.0 ) self.assertAllClose(M, torch.tensor([6.0], **tkwargs)) M = get_infeasible_cost( X=X, model=mm, objective=lambda Y, X: Y.squeeze(-1) - 5.0 - X[0, 0] ) self.assertAllClose(M, torch.tensor([7.0], **tkwargs)) # test it with using also X in the objective # Test default objective (squeeze last dim). M2 = get_infeasible_cost(X=X, model=mm) self.assertAllClose(M2, torch.tensor([1.0], **tkwargs)) # Test multi-output. m_ = means.repeat(1, 2) m_[:, 1] -= 10 mm = MockModel(MockPosterior(mean=m_, variance=variances.expand(-1, 2))) M3 = get_infeasible_cost(X=X, model=mm) self.assertAllClose(M3, torch.tensor([1.0, 11.0], **tkwargs)) # With a batched model. means = means.expand(2, 4, -1, -1) variances = variances.expand(2, 4, -1, -1) mm = MockModel(MockPosterior(mean=means, variance=variances)) M4 = get_infeasible_cost(X=X, model=mm) self.assertAllClose(M4, torch.tensor([1.0], **tkwargs)) class TestPruneInferiorPoints(BotorchTestCase): def test_prune_inferior_points(self): for dtype in (torch.float, torch.double): X = torch.rand(3, 2, device=self.device, dtype=dtype) # the event shape is `q x t` = 3 x 1 samples = torch.tensor( [[-1.0], [0.0], [1.0]], device=self.device, dtype=dtype ) mm = MockModel(MockPosterior(samples=samples)) # test that a batched X raises errors with self.assertRaises(UnsupportedError): prune_inferior_points(model=mm, X=X.expand(2, 3, 2)) # test marginalize_dim mm2 = MockModel(MockPosterior(samples=samples.expand(2, 3, 1))) X_pruned = prune_inferior_points(model=mm2, X=X, marginalize_dim=-3) with self.assertRaises(UnsupportedError): # test error raised when marginalize_dim is not specified with # a batch model prune_inferior_points(model=mm2, X=X) self.assertTrue(torch.equal(X_pruned, X[[-1]])) # test that a batched model raises errors when there are multiple batch dims mm2 = MockModel(MockPosterior(samples=samples.expand(1, 2, 3, 1))) with self.assertRaises(UnsupportedError): prune_inferior_points(model=mm2, X=X) # test that invalid max_frac is checked properly with self.assertRaises(ValueError): prune_inferior_points(model=mm, X=X, max_frac=1.1) # test basic behaviour X_pruned = prune_inferior_points(model=mm, X=X) self.assertTrue(torch.equal(X_pruned, X[[-1]])) # test custom objective neg_id_obj = GenericMCObjective(lambda Y, X: -(Y.squeeze(-1))) X_pruned = prune_inferior_points(model=mm, X=X, objective=neg_id_obj) self.assertTrue(torch.equal(X_pruned, X[[0]])) # test non-repeated samples (requires mocking out MockPosterior's rsample) samples = torch.tensor( [[[3.0], [0.0], [0.0]], [[0.0], [2.0], [0.0]], [[0.0], [0.0], [1.0]]], device=self.device, dtype=dtype, ) with mock.patch.object(MockPosterior, "rsample", return_value=samples): mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points(model=mm, X=X) self.assertTrue(torch.equal(X_pruned, X)) # test max_frac limiting with mock.patch.object(MockPosterior, "rsample", return_value=samples): mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points(model=mm, X=X, max_frac=2 / 3) if self.device.type == "cuda": # sorting has different order on cuda self.assertTrue(torch.equal(X_pruned, torch.stack([X[2], X[1]], dim=0))) else: self.assertTrue(torch.equal(X_pruned, X[:2])) # test that zero-probability is in fact pruned samples[2, 0, 0] = 10 with mock.patch.object(MockPosterior, "rsample", return_value=samples): mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points(model=mm, X=X) self.assertTrue(torch.equal(X_pruned, X[:2])) class TestFidelityUtils(BotorchTestCase): def test_project_to_target_fidelity(self): for batch_shape, dtype in itertools.product( ([], [2]), (torch.float, torch.double) ): X = torch.rand(*batch_shape, 3, 4, device=self.device, dtype=dtype) # test default behavior X_proj = project_to_target_fidelity(X) ones = torch.ones(*X.shape[:-1], 1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(X_proj[..., :, [-1]], ones)) self.assertTrue(torch.equal(X_proj[..., :-1], X[..., :-1])) # test custom target fidelity target_fids = {2: 0.5} X_proj = project_to_target_fidelity(X, target_fidelities=target_fids) self.assertTrue(torch.equal(X_proj[..., :, [2]], 0.5 * ones)) # test multiple target fidelities target_fids = {2: 0.5, 0: 0.1} X_proj = project_to_target_fidelity(X, target_fidelities=target_fids) self.assertTrue(torch.equal(X_proj[..., :, [0]], 0.1 * ones)) self.assertTrue(torch.equal(X_proj[..., :, [2]], 0.5 * ones)) # test gradients X.requires_grad_(True) X_proj = project_to_target_fidelity(X, target_fidelities=target_fids) out = (X_proj**2).sum() out.backward() self.assertTrue(torch.all(X.grad[..., [0, 2]] == 0)) self.assertTrue(torch.equal(X.grad[..., [1, 3]], 2 * X[..., [1, 3]])) def test_expand_trace_observations(self): for batch_shape, dtype in itertools.product( ([], [2]), (torch.float, torch.double) ): q, d = 3, 4 X = torch.rand(*batch_shape, q, d, device=self.device, dtype=dtype) # test nullop behavior self.assertTrue(torch.equal(expand_trace_observations(X), X)) self.assertTrue( torch.equal(expand_trace_observations(X, fidelity_dims=[1]), X) ) # test default behavior num_tr = 2 X_expanded = expand_trace_observations(X, num_trace_obs=num_tr) self.assertEqual( X_expanded.shape, torch.Size(batch_shape + [q * (1 + num_tr), d]) ) for i in range(num_tr): X_sub = X_expanded[..., q * i : q * (i + 1), :] self.assertTrue(torch.equal(X_sub[..., :-1], X[..., :-1])) X_sub_expected = (1 - i / (num_tr + 1)) * X[..., :q, -1] self.assertTrue(torch.equal(X_sub[..., -1], X_sub_expected)) # test custom fidelity dims fdims = [0, 2] num_tr = 3 X_expanded = expand_trace_observations( X, fidelity_dims=fdims, num_trace_obs=num_tr ) self.assertEqual( X_expanded.shape, torch.Size(batch_shape + [q * (1 + num_tr), d]) ) for j, i in itertools.product([1, 3], range(num_tr)): X_sub = X_expanded[..., q * i : q * (i + 1), j] self.assertTrue(torch.equal(X_sub, X[..., j])) for j, i in itertools.product(fdims, range(num_tr)): X_sub = X_expanded[..., q * i : q * (i + 1), j] X_sub_expected = (1 - i / (1 + num_tr)) * X[..., :q, j] self.assertTrue(torch.equal(X_sub, X_sub_expected)) # test gradients num_tr = 2 fdims = [1] X.requires_grad_(True) X_expanded = expand_trace_observations( X, fidelity_dims=fdims, num_trace_obs=num_tr ) out = X_expanded.sum() out.backward() grad_exp = torch.full_like(X, 1 + num_tr) grad_exp[..., fdims] = 1 + sum( (i + 1) / (num_tr + 1) for i in range(num_tr) ) self.assertAllClose(X.grad, grad_exp) def test_project_to_sample_points(self): for batch_shape, dtype in itertools.product( ([], [2]), (torch.float, torch.double) ): q, d, p, d_prime = 1, 12, 7, 4 X = torch.rand(*batch_shape, q, d, device=self.device, dtype=dtype) sample_points = torch.rand(p, d_prime, device=self.device, dtype=dtype) X_augmented = project_to_sample_points(X=X, sample_points=sample_points) self.assertEqual(X_augmented.shape, torch.Size(batch_shape + [p, d])) if batch_shape == [2]: self.assertAllClose(X_augmented[0, :, -d_prime:], sample_points) else: self.assertAllClose(X_augmented[:, -d_prime:], sample_points) class TestGetOptimalSamples(BotorchTestCase): def test_get_optimal_samples(self): dims = 3 dtype = torch.float64 for_testing_speed_kwargs = {"raw_samples": 50, "num_restarts": 3} num_optima = 7 batch_shape = (3,) bounds = torch.tensor([[0, 1]] * dims, dtype=dtype).T X = torch.rand(*batch_shape, 4, dims, dtype=dtype) Y = torch.sin(X).sum(dim=-1, keepdim=True).to(dtype) model = SingleTaskGP(X, Y) X_opt, f_opt = get_optimal_samples( model, bounds, num_optima=num_optima, **for_testing_speed_kwargs ) X_opt, f_opt_min = get_optimal_samples( model, bounds, num_optima=num_optima, maximize=False, **for_testing_speed_kwargs, ) correct_X_shape = (num_optima,) + batch_shape + (dims,) correct_f_shape = (num_optima,) + batch_shape + (1,) self.assertEqual(X_opt.shape, correct_X_shape) self.assertEqual(f_opt.shape, correct_f_shape) # asserting that the solutions found by minimization the samples are smaller # than those found by maximization self.assertTrue(torch.all(f_opt_min < f_opt))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.acquisition import ( ExpectedImprovement, qExpectedImprovement, qMultiStepLookahead, ) from botorch.acquisition.multi_step_lookahead import make_best_f, warmstart_multistep from botorch.acquisition.objective import IdentityMCObjective from botorch.exceptions.errors import UnsupportedError from botorch.models import SingleTaskGP from botorch.sampling import SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase class TestMultiStepLookahead(BotorchTestCase): def test_qMS_init(self): d = 2 q = 1 num_data = 3 q_batch_sizes = [1, 1, 1] num_fantasies = [2, 2, 1] t_batch_size = [2] for dtype in (torch.float, torch.double): bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype) bounds = bounds.repeat(1, d) train_X = torch.rand(num_data, d, device=self.device, dtype=dtype) train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype) model = SingleTaskGP(train_X, train_Y) # exactly one of samplers or num_fantasies with self.assertRaises(UnsupportedError): qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[qExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, inner_mc_samples=[2] * 4, ) # cannot use qMS as its own valfunc_cls with self.assertRaises(UnsupportedError): qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[qMultiStepLookahead] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, inner_mc_samples=[2] * 4, ) # construct using samplers samplers = [ SobolQMCNormalSampler(sample_shape=torch.Size([nf])) for nf in num_fantasies ] qMS = qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[qExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, inner_mc_samples=[2] * 4, samplers=samplers, ) self.assertEqual(qMS.num_fantasies, num_fantasies) # use default valfunc_cls, valfun_argfacs, inner_mc_samples qMS = qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, samplers=samplers, ) self.assertEqual(len(qMS._valfunc_cls), 4) self.assertEqual(len(qMS.inner_samplers), 4) self.assertEqual(len(qMS._valfunc_argfacs), 4) # _construct_inner_samplers error catching tests below # AnalyticAcquisitionFunction with MCAcquisitionObjective with self.assertRaises(UnsupportedError): qMultiStepLookahead( model=model, objective=IdentityMCObjective(), batch_sizes=q_batch_sizes, valfunc_cls=[ExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, ) # AnalyticAcquisitionFunction and q > 1 with self.assertRaises(UnsupportedError): qMultiStepLookahead( model=model, batch_sizes=[2, 2, 2], valfunc_cls=[ExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, inner_mc_samples=[2] * 4, ) # AnalyticAcquisitionFunction and inner_mc_samples with self.assertWarns(Warning): qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[ExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, inner_mc_samples=[2] * 4, ) # test warmstarting qMS = qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, samplers=samplers, ) q_prime = qMS.get_augmented_q_batch_size(q) eval_X = torch.rand( t_batch_size + [q_prime, d], device=self.device, dtype=dtype ) warmstarted_X = warmstart_multistep( acq_function=qMS, bounds=bounds, num_restarts=5, raw_samples=10, full_optimizer=eval_X, ) self.assertEqual(warmstarted_X.shape, torch.Size([5, q_prime, d])) with self.assertRaisesRegex( UnsupportedError, "`qMultiStepLookahead` got a non-MC `objective`. This is not supported." " Use `posterior_transform` and `objective=None` instead.", ): qMultiStepLookahead(model=model, batch_sizes=q_batch_sizes, objective="cat") def test_qMS(self): d = 2 q = 1 num_data = 3 q_batch_sizes = [1, 1, 1] num_fantasies = [2, 2, 1] t_batch_size = [2] for dtype in (torch.float, torch.double): bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype) bounds = bounds.repeat(1, d) train_X = torch.rand(num_data, d, device=self.device, dtype=dtype) train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype) model = SingleTaskGP(train_X, train_Y) # default evaluation testsÎ qMS = qMultiStepLookahead( model=model, batch_sizes=[1, 1, 1], num_fantasies=num_fantasies, ) q_prime = qMS.get_augmented_q_batch_size(q) eval_X = torch.rand( t_batch_size + [q_prime, d], device=self.device, dtype=dtype ) result = qMS(eval_X) self.assertEqual(result.shape, torch.Size(t_batch_size)) qMS = qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[qExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, inner_mc_samples=[2] * 4, ) result = qMS(eval_X) self.assertEqual(result.shape, torch.Size(t_batch_size)) # get induced fantasy model, with collapse_fantasy_base_samples fant_model = qMS.get_induced_fantasy_model(eval_X) self.assertEqual( fant_model.train_inputs[0].shape, torch.Size( num_fantasies[::-1] + t_batch_size + [num_data + sum(q_batch_sizes), d] ), ) # collapse fantasy base samples qMS = qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[qExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, inner_mc_samples=[2] * 4, collapse_fantasy_base_samples=False, ) q_prime = qMS.get_augmented_q_batch_size(q) eval_X = torch.rand( t_batch_size + [q_prime, d], device=self.device, dtype=dtype ) result = qMS(eval_X) self.assertEqual(result.shape, torch.Size(t_batch_size)) self.assertEqual( qMS.samplers[0]._get_batch_range(model.posterior(eval_X)), (-3, -2) ) # get induced fantasy model, without collapse_fantasy_base_samples fant_model = qMS.get_induced_fantasy_model(eval_X) self.assertEqual( fant_model.train_inputs[0].shape, torch.Size( num_fantasies[::-1] + t_batch_size + [num_data + sum(q_batch_sizes), d] ), ) # X_pending X_pending = torch.rand(5, d, device=self.device, dtype=dtype) qMS = qMultiStepLookahead( model=model, batch_sizes=q_batch_sizes, valfunc_cls=[qExpectedImprovement] * 4, valfunc_argfacs=[make_best_f] * 4, num_fantasies=num_fantasies, inner_mc_samples=[2] * 4, X_pending=X_pending, ) q_prime = qMS.get_augmented_q_batch_size(q) eval_X = torch.rand( t_batch_size + [q_prime, d], device=self.device, dtype=dtype ) result = qMS(eval_X) self.assertEqual(result.shape, torch.Size(t_batch_size)) # add dummy base_weights to samplers samplers = [ SobolQMCNormalSampler(sample_shape=torch.Size([nf])) for nf in num_fantasies ] for s in samplers: s.base_weights = torch.ones( s.sample_shape[0], 1, device=self.device, dtype=dtype ) qMS = qMultiStepLookahead( model=model, batch_sizes=[1, 1, 1], samplers=samplers, ) q_prime = qMS.get_augmented_q_batch_size(q) eval_X = torch.rand( t_batch_size + [q_prime, d], device=self.device, dtype=dtype ) result = qMS(eval_X) self.assertEqual(result.shape, torch.Size(t_batch_size)) # extract candidates cand = qMS.extract_candidates(eval_X) self.assertEqual(cand.shape, torch.Size(t_batch_size + [q, d]))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from copy import deepcopy from functools import partial from itertools import product from math import pi from unittest import mock import torch from botorch import settings from botorch.acquisition.monte_carlo import ( MCAcquisitionFunction, qExpectedImprovement, qNoisyExpectedImprovement, qProbabilityOfImprovement, qSimpleRegret, qUpperConfidenceBound, SampleReducingMCAcquisitionFunction, ) from botorch.acquisition.objective import ( ConstrainedMCObjective, GenericMCObjective, IdentityMCObjective, PosteriorTransform, ScalarizedPosteriorTransform, ) from botorch.exceptions import BotorchWarning, UnsupportedError from botorch.models import SingleTaskGP from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.low_rank import sample_cached_cholesky from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from botorch.utils.transforms import standardize from torch import Tensor class DummyMCAcquisitionFunction(MCAcquisitionFunction): def forward(self, X): pass class DummyReducingMCAcquisitionFunction(SampleReducingMCAcquisitionFunction): def _sample_forward(self, X): pass class DummyNonScalarizingPosteriorTransform(PosteriorTransform): scalarize = False def evaluate(self, Y): pass # pragma: no cover def forward(self, posterior): pass # pragma: no cover def infeasible_con(samples: Tensor) -> Tensor: return torch.ones_like(samples[..., 0]) def feasible_con(samples: Tensor) -> Tensor: return -torch.ones_like(samples[..., 0]) class TestMCAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): for acqf_class in (MCAcquisitionFunction, SampleReducingMCAcquisitionFunction): with self.assertRaises(TypeError): acqf_class() # raise if model is multi-output, but no outcome transform or objective # are given no = "botorch.utils.testing.MockModel.num_outputs" with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(MockPosterior()) for dummy in ( DummyMCAcquisitionFunction, DummyReducingMCAcquisitionFunction, ): with self.assertRaises(UnsupportedError): dummy(model=mm) # raise if model is multi-output, but outcome transform does not # scalarize and no objetive is given with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(MockPosterior()) ptf = DummyNonScalarizingPosteriorTransform() with self.assertRaises(UnsupportedError): dummy(model=mm, posterior_transform=ptf) class TestQExpectedImprovement(BotorchTestCase): def test_q_expected_improvement(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} # the event shape is `b x q x t` = 1 x 1 x 1 samples = torch.zeros(1, 1, 1, **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking X = torch.zeros(1, 1, **tkwargs) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) # test initialization for k in ["objective", "sampler"]: self.assertIn(k, acqf._modules) res = acqf(X) self.assertEqual(res.item(), 0.0) # test shifting best_f value acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 1.0) # TODO: Test batched best_f, batched model, batched evaluation # basic test, no resample sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) mm._posterior._samples = torch.zeros(1, 2, 1, **tkwargs) res = acqf(X) X2 = torch.zeros(1, 1, 1, **tkwargs, requires_grad=True) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_q_expected_improvement_batch(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 2 x 2 x 1 samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) samples[0, 0, 0] = 1.0 mm = MockModel(MockPosterior(samples=samples)) # X is a dummy and unused b/c of mocking X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # test batch model, batched best_f values sampler = IIDNormalSampler(sample_shape=torch.Size([3])) acqf = qExpectedImprovement( model=mm, best_f=torch.Tensor([0, 0]), sampler=sampler ) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # test shifting best_f value acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 2.0) self.assertEqual(res[1].item(), 1.0) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) # 1-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) res = acqf(X.expand(2, 2, 1)) # 2-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, 2, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # TODO: Test different objectives (incl. constraints) class TestQNoisyExpectedImprovement(BotorchTestCase): def test_q_noisy_expected_improvement(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 1 x 2 x 1 samples_noisy = torch.tensor([0.0, 1.0], device=self.device, dtype=dtype) samples_noisy = samples_noisy.view(1, 2, 1) # X_baseline is `q' x d` = 1 x 1 X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype) mm_noisy = MockModel(MockPosterior(samples=samples_noisy)) # X is `q x d` = 1 x 1 X = torch.zeros(1, 1, device=self.device, dtype=dtype) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X) self.assertEqual(res.item(), 1.0) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X) self.assertEqual(res.item(), 1.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X) self.assertEqual(res.item(), 1.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) samples_noisy_pending = torch.tensor( [1.0, 0.0, 0.0], device=self.device, dtype=dtype ) samples_noisy_pending = samples_noisy_pending.view(1, 3, 1) mm_noisy_pending = MockModel(MockPosterior(samples=samples_noisy_pending)) acqf = qNoisyExpectedImprovement( model=mm_noisy_pending, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) res = acqf(X) X2 = torch.zeros( 1, 1, 1, device=self.device, dtype=dtype, requires_grad=True ) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_q_noisy_expected_improvement_batch(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 2 x 3 x 1 samples_noisy = torch.zeros(2, 3, 1, device=self.device, dtype=dtype) samples_noisy[0, -1, 0] = 1.0 mm_noisy = MockModel(MockPosterior(samples=samples_noisy)) # X is `q x d` = 1 x 1 X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X) # 1-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) res = acqf(X.expand(2, 2, 1)) # 2-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, 2, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) def test_prune_baseline(self): no = "botorch.utils.testing.MockModel.num_outputs" prune = "botorch.acquisition.monte_carlo.prune_inferior_points" for dtype in (torch.float, torch.double): X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype) X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype) with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(MockPosterior(samples=X_baseline)) with mock.patch(prune, return_value=X_pruned) as mock_prune: acqf = qNoisyExpectedImprovement( model=mm, X_baseline=X_baseline, prune_baseline=True, cache_root=False, ) mock_prune.assert_called_once() self.assertTrue(torch.equal(acqf.X_baseline, X_pruned)) with mock.patch(prune, return_value=X_pruned) as mock_prune: acqf = qNoisyExpectedImprovement( model=mm, X_baseline=X_baseline, prune_baseline=True, marginalize_dim=-3, cache_root=False, ) _, kwargs = mock_prune.call_args self.assertEqual(kwargs["marginalize_dim"], -3) def test_cache_root(self): sample_cached_path = ( "botorch.acquisition.cached_cholesky.sample_cached_cholesky" ) raw_state_dict = { "likelihood.noise_covar.raw_noise": torch.tensor( [[0.0895], [0.2594]], dtype=torch.float64 ), "mean_module.raw_constant": torch.tensor( [-0.4545, -0.1285], dtype=torch.float64 ), "covar_module.raw_outputscale": torch.tensor( [1.4876, 1.4897], dtype=torch.float64 ), "covar_module.base_kernel.raw_lengthscale": torch.tensor( [[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64 ), } # test batched models (e.g. for MCMC) for train_batch_shape, m, dtype in product( (torch.Size([]), torch.Size([3])), (1, 2), (torch.float, torch.double) ): state_dict = deepcopy(raw_state_dict) for k, v in state_dict.items(): if m == 1: v = v[0] if len(train_batch_shape) > 0: v = v.unsqueeze(0).expand(*train_batch_shape, *v.shape) state_dict[k] = v tkwargs = {"device": self.device, "dtype": dtype} if m == 2: objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1)) else: objective = None for k, v in state_dict.items(): state_dict[k] = v.to(**tkwargs) all_close_kwargs = ( { "atol": 1e-1, "rtol": 0.0, } if dtype == torch.float else {"atol": 1e-4, "rtol": 0.0} ) torch.manual_seed(1234) train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs) train_Y = ( torch.sin(train_X * 2 * pi) + torch.randn(*train_batch_shape, 3, 2, **tkwargs) )[..., :m] train_Y = standardize(train_Y) model = SingleTaskGP( train_X, train_Y, ) if len(train_batch_shape) > 0: X_baseline = train_X[0] else: X_baseline = train_X model.load_state_dict(state_dict, strict=False) sampler = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0) torch.manual_seed(0) acqf = qNoisyExpectedImprovement( model=model, X_baseline=X_baseline, sampler=sampler, objective=objective, prune_baseline=False, cache_root=True, ) orig_base_samples = acqf.base_sampler.base_samples.detach().clone() sampler2 = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0) sampler2.base_samples = orig_base_samples torch.manual_seed(0) acqf_no_cache = qNoisyExpectedImprovement( model=model, X_baseline=X_baseline, sampler=sampler2, objective=objective, prune_baseline=False, cache_root=False, ) for q, batch_shape in product( (1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3])) ): acqf.q_in = -1 acqf_no_cache.q_in = -1 test_X = ( 0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs) ).requires_grad_(True) with mock.patch( sample_cached_path, wraps=sample_cached_cholesky ) as mock_sample_cached: torch.manual_seed(0) val = acqf(test_X) mock_sample_cached.assert_called_once() val.sum().backward() base_samples = acqf.sampler.base_samples.detach().clone() X_grad = test_X.grad.clone() test_X2 = test_X.detach().clone().requires_grad_(True) acqf_no_cache.sampler.base_samples = base_samples with mock.patch( sample_cached_path, wraps=sample_cached_cholesky ) as mock_sample_cached: torch.manual_seed(0) val2 = acqf_no_cache(test_X2) mock_sample_cached.assert_not_called() self.assertAllClose(val, val2, **all_close_kwargs) val2.sum().backward() self.assertAllClose(X_grad, test_X2.grad, **all_close_kwargs) # test we fall back to standard sampling for # ill-conditioned covariances acqf._baseline_L = torch.zeros_like(acqf._baseline_L) with warnings.catch_warnings(record=True) as ws, settings.debug(True): with torch.no_grad(): acqf(test_X) self.assertEqual(sum(issubclass(w.category, BotorchWarning) for w in ws), 1) # test w/ posterior transform X_baseline = torch.rand(2, 1) model = SingleTaskGP(X_baseline, torch.randn(2, 1)) pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1])) with mock.patch.object( qNoisyExpectedImprovement, "_compute_root_decomposition", ) as mock_cache_root: acqf = qNoisyExpectedImprovement( model=model, X_baseline=X_baseline, sampler=IIDNormalSampler(sample_shape=torch.Size([1])), posterior_transform=pt, prune_baseline=False, cache_root=True, ) tf_post = model.posterior(X_baseline, posterior_transform=pt) self.assertTrue( torch.allclose( tf_post.mean, mock_cache_root.call_args[-1]["posterior"].mean ) ) # testing constraints n, d, m = 8, 1, 3 X_baseline = torch.rand(n, d) model = SingleTaskGP(X_baseline, torch.randn(n, m)) # batched model nei_args = { "model": model, "X_baseline": X_baseline, "prune_baseline": False, "cache_root": True, "posterior_transform": ScalarizedPosteriorTransform(weights=torch.ones(m)), "sampler": SobolQMCNormalSampler(sample_shape=torch.Size([5])), } acqf = qNoisyExpectedImprovement(**nei_args) X = torch.randn_like(X_baseline) for con in [feasible_con, infeasible_con]: with self.subTest(con=con): target = "botorch.acquisition.utils.get_infeasible_cost" infcost = torch.tensor([3], device=self.device, dtype=dtype) with mock.patch(target, return_value=infcost): cacqf = qNoisyExpectedImprovement(**nei_args, constraints=[con]) _, obj = cacqf._get_samples_and_objectives(X) best_feas_f = cacqf.compute_best_f(obj) if con is feasible_con: self.assertAllClose(best_feas_f, acqf.compute_best_f(obj)) else: self.assertAllClose( best_feas_f, torch.full_like(obj[..., [0]], -infcost.item()) ) # TODO: Test different objectives (incl. constraints) class TestQProbabilityOfImprovement(BotorchTestCase): def test_q_probability_of_improvement(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 1 x 1 x 1 samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(samples=samples)) # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking X = torch.zeros(1, 1, device=self.device, dtype=dtype) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.5) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.5) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.5) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) mm._posterior._samples = mm._posterior._samples.expand(-1, 2, -1) res = acqf(X) X2 = torch.zeros( 1, 1, 1, device=self.device, dtype=dtype, requires_grad=True ) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_q_probability_of_improvement_batch(self): # the event shape is `b x q x t` = 2 x 2 x 1 for dtype in (torch.float, torch.double): samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) samples[0, 0, 0] = 1.0 mm = MockModel(MockPosterior(samples=samples)) # X is a dummy and unused b/c of mocking X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.5) # test batch model, batched best_f values sampler = IIDNormalSampler(sample_shape=torch.Size([3])) acqf = qProbabilityOfImprovement( model=mm, best_f=torch.Tensor([0, 0]), sampler=sampler ) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.5) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) # 1-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.5) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) res = acqf(X.expand(2, -1, 1)) # 2-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.5) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, -1, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.5) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # TODO: Test different objectives (incl. constraints) class TestQSimpleRegret(BotorchTestCase): def test_q_simple_regret(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 1 x 1 x 1 samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(samples=samples)) # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking X = torch.zeros(1, 1, device=self.device, dtype=dtype) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qSimpleRegret(model=mm, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qSimpleRegret(model=mm, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qSimpleRegret(model=mm, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) mm._posterior._samples = mm._posterior._samples.expand(1, 2, 1) res = acqf(X) X2 = torch.zeros( 1, 1, 1, device=self.device, dtype=dtype, requires_grad=True ) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_q_simple_regret_batch(self): # the event shape is `b x q x t` = 2 x 2 x 1 for dtype in (torch.float, torch.double): samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) samples[0, 0, 0] = 1.0 mm = MockModel(MockPosterior(samples=samples)) # X is a dummy and unused b/c of mocking X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qSimpleRegret(model=mm, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qSimpleRegret(model=mm, sampler=sampler) res = acqf(X) # 1-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) res = acqf(X.expand(2, -1, 1)) # 2-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, -1, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qSimpleRegret(model=mm, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # TODO: Test different objectives (incl. constraints) class TestQUpperConfidenceBound(BotorchTestCase): def test_q_upper_confidence_bound(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 1 x 1 x 1 samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(samples=samples)) # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking X = torch.zeros(1, 1, device=self.device, dtype=dtype) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) mm._posterior._samples = mm._posterior._samples.expand(1, 2, 1) res = acqf(X) X2 = torch.zeros( 1, 1, 1, device=self.device, dtype=dtype, requires_grad=True ) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_q_upper_confidence_bound_batch(self): # TODO: T41739913 Implement tests for all MCAcquisitionFunctions for dtype in (torch.float, torch.double): samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) samples[0, 0, 0] = 1.0 mm = MockModel(MockPosterior(samples=samples)) # X is a dummy and unused b/c of mocking X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler) res = acqf(X) # 1-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) res = acqf(X.expand(2, -1, 1)) # 2-dim batch self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, -1, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler) res = acqf(X) self.assertEqual(res[0].item(), 1.0) self.assertEqual(res[1].item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertTrue(torch.equal(acqf.X_pending, X)) mm._posterior._samples = torch.zeros( 2, 4, 1, device=self.device, dtype=dtype ) res = acqf(X) X2 = torch.zeros( 1, 1, 1, device=self.device, dtype=dtype, requires_grad=True ) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) # TODO: Test different objectives (incl. constraints) class TestMCAcquisitionFunctionWithConstraints(BotorchTestCase): def test_mc_acquisition_function_with_constraints(self): for dtype in (torch.float, torch.double): with self.subTest(dtype=dtype): num_samples, n, q, d, m = 5, 4, 1, 3, 1 X = torch.randn(n, q, d, device=self.device, dtype=dtype) samples = torch.randn( num_samples, n, q, m, device=self.device, dtype=dtype ) mm = MockModel(MockPosterior(samples=samples)) nei_args = { "model": mm, "X_baseline": X, "prune_baseline": False, } for acqf_constructor in [ partial(qProbabilityOfImprovement, model=mm, best_f=0.0), partial(qExpectedImprovement, model=mm, best_f=0.0), # cache_root=True not supported by MockModel, see test_cache_root partial(qNoisyExpectedImprovement, cache_root=False, **nei_args), partial(qNoisyExpectedImprovement, cache_root=True, **nei_args), ]: acqf = acqf_constructor() mm._posterior._samples = ( torch.cat((samples, samples), dim=-2) if isinstance(acqf, qNoisyExpectedImprovement) else samples ) with self.subTest(acqf_class=type(acqf)): for con in [feasible_con, infeasible_con]: cacqf = acqf_constructor(constraints=[con]) # for NEI test target = "botorch.acquisition.utils.get_infeasible_cost" inf_cost = torch.tensor(3, device=self.device, dtype=dtype) with mock.patch(target, return_value=inf_cost): vals = cacqf(X) # NOTE: this is only true for q = 1 expected_vals = acqf(X) * (con(samples) < 0).squeeze() self.assertAllClose(vals, expected_vals) with self.assertRaisesRegex( ValueError, "ConstrainedMCObjective as well as constraints passed", ): acqf_constructor( constraints=[feasible_con], objective=ConstrainedMCObjective( objective=IdentityMCObjective, constraints=[feasible_con], ), ) # Forcing negative samples, which will throw an error with simple # regret because the acquisition utility is negative. samples = -torch.rand(n, q, m, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(samples=samples)) cacqf = qSimpleRegret(model=mm, constraints=[feasible_con]) with self.assertRaisesRegex( ValueError, "Constraint-weighting requires unconstrained " "acquisition values to be non-negative", ): cacqf(X) # Test highlighting both common and different behavior of the old # `ConstrainedMCObjective` and new `constraints` implementation. # 1. Highlighting difference: q = 1 samples = torch.randn(n, q, m, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(samples=samples)) constrained_objective = ConstrainedMCObjective( objective=IdentityMCObjective(), constraints=[infeasible_con], infeasible_cost=0.0, ) # The old `ConstrainedMCObjective`-based implementation does not scale # the best_f value by the feasibility indicator, while the new # `constraints`-based implementation does. Therefore, the old version # yields an acquisition value of 1, even though the constraint is not # satisfied. best_f = -1.0 old_acqf = qExpectedImprovement( model=mm, best_f=best_f, objective=constrained_objective ) new_acqf = qExpectedImprovement( model=mm, best_f=best_f, constraints=[infeasible_con] ) old_val = old_acqf(X) self.assertAllClose(old_val, torch.ones_like(old_val)) new_val = new_acqf(X) self.assertAllClose(new_val, torch.zeros_like(new_val)) # 2. Highlighting commonality: # When best_f = 0 and infeasible_cost = 0, both implementations yield # the same results. constrained_objective = ConstrainedMCObjective( objective=IdentityMCObjective(), constraints=[feasible_con], infeasible_cost=0.0, ) best_f = 0.0 old_acqf = qExpectedImprovement( model=mm, best_f=best_f, objective=constrained_objective ) new_acqf = qExpectedImprovement( model=mm, best_f=best_f, constraints=[feasible_con] ) old_val = old_acqf(X) new_val = new_acqf(X) self.assertAllClose(new_val, old_val)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional from unittest import mock import torch from botorch.acquisition.cost_aware import InverseCostWeightedUtility from botorch.acquisition.max_value_entropy_search import ( _sample_max_value_Gumbel, _sample_max_value_Thompson, qLowerBoundMaxValueEntropy, qMaxValueEntropy, qMultiFidelityLowerBoundMaxValueEntropy, qMultiFidelityMaxValueEntropy, ) from botorch.acquisition.objective import ( PosteriorTransform, ScalarizedPosteriorTransform, ) from botorch.exceptions.errors import UnsupportedError from botorch.posteriors import GPyTorchPosterior from botorch.sampling.normal import SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal from torch import Tensor class MESMockModel(MockModel): r"""Mock object that implements dummy methods and feeds through specified outputs""" def __init__(self, num_outputs=1, batch_shape=None): r""" Args: num_outputs: The number of outputs. batch_shape: The batch shape of the model. For details see `botorch.models.model.Model.batch_shape`. """ super().__init__(None) self._num_outputs = num_outputs self._batch_shape = torch.Size() if batch_shape is None else batch_shape def posterior( self, X: Tensor, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> MockPosterior: m_shape = X.shape[:-1] r_shape = list(X.shape[:-2]) + [1, 1] mvn = MultivariateNormal( mean=torch.zeros(m_shape, dtype=X.dtype, device=X.device), covariance_matrix=torch.eye( m_shape[-1], dtype=X.dtype, device=X.device ).repeat(r_shape), ) if self.num_outputs > 1: mvn = mvn = MultitaskMultivariateNormal.from_independent_mvns( mvns=[mvn] * self.num_outputs ) posterior = GPyTorchPosterior(mvn) if posterior_transform is not None: return posterior_transform(posterior) return posterior def forward(self, X: Tensor) -> MultivariateNormal: return self.posterior(X).distribution @property def batch_shape(self) -> torch.Size: return self._batch_shape @property def num_outputs(self) -> int: return self._num_outputs class NoBatchShapeMESMockModel(MESMockModel): # For some reason it's really hard to mock this property to raise a # NotImplementedError, so let's just make a class for it. @property def batch_shape(self) -> torch.Size: raise NotImplementedError class TestMaxValueEntropySearch(BotorchTestCase): def test_q_max_value_entropy(self): for dtype in (torch.float, torch.double): torch.manual_seed(7) mm = MESMockModel() with self.assertRaises(TypeError): qMaxValueEntropy(mm) candidate_set = torch.rand(1000, 2, device=self.device, dtype=dtype) # test error in case of batch GP model mm = MESMockModel(batch_shape=torch.Size([2])) with self.assertRaises(NotImplementedError): qMaxValueEntropy(mm, candidate_set, num_mv_samples=10) mm = MESMockModel() train_inputs = torch.rand(5, 10, 2, device=self.device, dtype=dtype) with self.assertRaises(NotImplementedError): qMaxValueEntropy( mm, candidate_set, num_mv_samples=10, train_inputs=train_inputs ) # test that init works if batch_shape is not implemented on the model mm = NoBatchShapeMESMockModel() qMaxValueEntropy( mm, candidate_set, num_mv_samples=10, ) # test error when number of outputs > 1 and no transform is given. mm = MESMockModel() mm._num_outputs = 2 with self.assertRaises(UnsupportedError): qMaxValueEntropy(mm, candidate_set, num_mv_samples=10) # test with X_pending is None mm = MESMockModel() train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype) mm.train_inputs = (train_inputs,) qMVE = qMaxValueEntropy(mm, candidate_set, num_mv_samples=10) # test initialization self.assertEqual(qMVE.num_fantasies, 16) self.assertEqual(qMVE.num_mv_samples, 10) self.assertIsInstance(qMVE.sampler, SobolQMCNormalSampler) self.assertEqual(qMVE.sampler.sample_shape, torch.Size([128])) self.assertIsInstance(qMVE.fantasies_sampler, SobolQMCNormalSampler) self.assertEqual(qMVE.fantasies_sampler.sample_shape, torch.Size([16])) self.assertEqual(qMVE.use_gumbel, True) self.assertEqual(qMVE.posterior_max_values.shape, torch.Size([10, 1])) # test evaluation X = torch.rand(1, 2, device=self.device, dtype=dtype) self.assertEqual(qMVE(X).shape, torch.Size([1])) # test set X pending to None in case of _init_model exists qMVE.set_X_pending(None) self.assertEqual(qMVE.model, qMVE._init_model) # test with use_gumbel = False qMVE = qMaxValueEntropy( mm, candidate_set, num_mv_samples=10, use_gumbel=False ) self.assertEqual(qMVE(X).shape, torch.Size([1])) # test with X_pending is not None with mock.patch.object( MESMockModel, "fantasize", return_value=mm ) as patch_f: qMVE = qMaxValueEntropy( mm, candidate_set, num_mv_samples=10, X_pending=torch.rand(1, 2, device=self.device, dtype=dtype), ) patch_f.assert_called_once() # Test with multi-output model w/ transform. mm = MESMockModel(num_outputs=2) pt = ScalarizedPosteriorTransform( weights=torch.ones(2, device=self.device, dtype=dtype) ) for gumbel in (True, False): qMVE = qMaxValueEntropy( mm, candidate_set, num_mv_samples=10, use_gumbel=gumbel, posterior_transform=pt, ) self.assertEqual(qMVE(X).shape, torch.Size([1])) def test_q_lower_bound_max_value_entropy(self): for dtype in (torch.float, torch.double): torch.manual_seed(7) mm = MESMockModel() with self.assertRaises(TypeError): qLowerBoundMaxValueEntropy(mm) candidate_set = torch.rand(1000, 2, device=self.device, dtype=dtype) # test error in case of batch GP model # train_inputs = torch.rand(5, 10, 2, device=self.device, dtype=dtype) # mm.train_inputs = (train_inputs,) mm = MESMockModel(batch_shape=torch.Size([2])) with self.assertRaises(NotImplementedError): qLowerBoundMaxValueEntropy(mm, candidate_set, num_mv_samples=10) # test error when number of outputs > 1 and no transform mm = MESMockModel() mm._num_outputs = 2 with self.assertRaises(UnsupportedError): qLowerBoundMaxValueEntropy(mm, candidate_set, num_mv_samples=10) mm._num_outputs = 1 # test with X_pending is None mm = MESMockModel() train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype) mm.train_inputs = (train_inputs,) qGIBBON = qLowerBoundMaxValueEntropy(mm, candidate_set, num_mv_samples=10) # test initialization self.assertEqual(qGIBBON.num_mv_samples, 10) self.assertEqual(qGIBBON.use_gumbel, True) self.assertEqual(qGIBBON.posterior_max_values.shape, torch.Size([10, 1])) # test evaluation X = torch.rand(1, 2, device=self.device, dtype=dtype) self.assertEqual(qGIBBON(X).shape, torch.Size([1])) # test with use_gumbel = False qGIBBON = qLowerBoundMaxValueEntropy( mm, candidate_set, num_mv_samples=10, use_gumbel=False ) self.assertEqual(qGIBBON(X).shape, torch.Size([1])) # test with X_pending is not None qGIBBON = qLowerBoundMaxValueEntropy( mm, candidate_set, num_mv_samples=10, use_gumbel=False, X_pending=torch.rand(1, 2, device=self.device, dtype=dtype), ) self.assertEqual(qGIBBON(X).shape, torch.Size([1])) # Test with multi-output model w/ transform. mm = MESMockModel(num_outputs=2) pt = ScalarizedPosteriorTransform( weights=torch.ones(2, device=self.device, dtype=dtype) ) qGIBBON = qLowerBoundMaxValueEntropy( mm, candidate_set, num_mv_samples=10, use_gumbel=False, X_pending=torch.rand(1, 2, device=self.device, dtype=dtype), posterior_transform=pt, ) with self.assertRaisesRegex(UnsupportedError, "X_pending is not None"): qGIBBON(X) def test_q_multi_fidelity_max_value_entropy( self, acqf_class=qMultiFidelityMaxValueEntropy ): for dtype in (torch.float, torch.double): torch.manual_seed(7) mm = MESMockModel() train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype) mm.train_inputs = (train_inputs,) candidate_set = torch.rand(10, 2, device=self.device, dtype=dtype) qMF_MVE = acqf_class( model=mm, candidate_set=candidate_set, num_mv_samples=10 ) # test initialization self.assertEqual(qMF_MVE.num_fantasies, 16) self.assertEqual(qMF_MVE.num_mv_samples, 10) self.assertIsInstance(qMF_MVE.sampler, SobolQMCNormalSampler) self.assertIsInstance(qMF_MVE.cost_sampler, SobolQMCNormalSampler) self.assertEqual(qMF_MVE.sampler.sample_shape, torch.Size([128])) self.assertIsInstance(qMF_MVE.fantasies_sampler, SobolQMCNormalSampler) self.assertEqual(qMF_MVE.fantasies_sampler.sample_shape, torch.Size([16])) self.assertIsInstance(qMF_MVE.expand, Callable) self.assertIsInstance(qMF_MVE.project, Callable) self.assertIsNone(qMF_MVE.X_pending) self.assertEqual(qMF_MVE.posterior_max_values.shape, torch.Size([10, 1])) self.assertIsInstance( qMF_MVE.cost_aware_utility, InverseCostWeightedUtility ) # test evaluation X = torch.rand(1, 2, device=self.device, dtype=dtype) self.assertEqual(qMF_MVE(X).shape, torch.Size([1])) # Test with multi-output model w/ transform. mm = MESMockModel(num_outputs=2) pt = ScalarizedPosteriorTransform( weights=torch.ones(2, device=self.device, dtype=dtype) ) qMF_MVE = acqf_class( model=mm, candidate_set=candidate_set, num_mv_samples=10, posterior_transform=pt, ) X = torch.rand(1, 2, device=self.device, dtype=dtype) self.assertEqual(qMF_MVE(X).shape, torch.Size([1])) def test_q_multi_fidelity_lower_bound_max_value_entropy(self): # Same test as for MF-MES since GIBBON only changes in the way it computes the # information gain. self.test_q_multi_fidelity_max_value_entropy( acqf_class=qMultiFidelityLowerBoundMaxValueEntropy ) def test_sample_max_value_Gumbel(self): for dtype in (torch.float, torch.double): torch.manual_seed(7) mm = MESMockModel() candidate_set = torch.rand(3, 10, 2, device=self.device, dtype=dtype) samples = _sample_max_value_Gumbel(mm, candidate_set, 5) self.assertEqual(samples.shape, torch.Size([5, 3])) # Test with multi-output model w/ transform. mm = MESMockModel(num_outputs=2) pt = ScalarizedPosteriorTransform( weights=torch.ones(2, device=self.device, dtype=dtype) ) samples = _sample_max_value_Gumbel( mm, candidate_set, 5, posterior_transform=pt ) self.assertEqual(samples.shape, torch.Size([5, 3])) def test_sample_max_value_Thompson(self): for dtype in (torch.float, torch.double): torch.manual_seed(7) mm = MESMockModel() candidate_set = torch.rand(3, 10, 2, device=self.device, dtype=dtype) samples = _sample_max_value_Thompson(mm, candidate_set, 5) self.assertEqual(samples.shape, torch.Size([5, 3])) # Test with multi-output model w/ transform. mm = MESMockModel(num_outputs=2) pt = ScalarizedPosteriorTransform( weights=torch.ones(2, device=self.device, dtype=dtype) ) samples = _sample_max_value_Thompson( mm, candidate_set, 5, posterior_transform=pt ) self.assertEqual(samples.shape, torch.Size([5, 3]))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import product import torch from botorch.acquisition.analytic import ExpectedImprovement from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.acquisition.prior_guided import PriorGuidedAcquisitionFunction from botorch.models import SingleTaskGP from botorch.utils.testing import BotorchTestCase from botorch.utils.transforms import match_batch_shape from torch.nn import Module class DummyPrior(Module): def forward(self, X): p = torch.distributions.Normal(0, 1) # sum over d dimensions return p.log_prob(X).sum(dim=-1).exp() def get_val_prob(test_X, test_X_exp, af, prior): with torch.no_grad(): val = af(test_X) prob = prior(test_X_exp) return val, prob def get_weighted_val(ei_val, prob, exponent, use_log): if use_log: return prob * exponent + ei_val return prob.pow(exponent) * ei_val class TestPriorGuidedAcquisitionFunction(BotorchTestCase): def setUp(self): super().setUp() self.prior = DummyPrior() self.train_X = torch.rand(5, 3, dtype=torch.double, device=self.device) self.train_Y = self.train_X.norm(dim=-1, keepdim=True) def test_prior_guided_analytic_acquisition_function(self): for dtype in (torch.float, torch.double): model = SingleTaskGP( self.train_X.to(dtype=dtype), self.train_Y.to(dtype=dtype) ) ei = ExpectedImprovement(model, best_f=0.0) for batch_shape, use_log, exponent in product( ([], [2]), (False, True), (1.0, 2.0), ): af = PriorGuidedAcquisitionFunction( acq_function=ei, prior_module=self.prior, log=use_log, prior_exponent=exponent, ) test_X = torch.rand(*batch_shape, 1, 3, dtype=dtype, device=self.device) test_X_exp = test_X.unsqueeze(0) if batch_shape == [] else test_X with torch.no_grad(): ei_val = ei(test_X_exp).unsqueeze(-1) val, prob = get_val_prob(test_X, test_X_exp, af, self.prior) weighted_val = get_weighted_val(ei_val, prob, exponent, use_log) expected_val = weighted_val.squeeze(-1) self.assertTrue(torch.allclose(val, expected_val)) # test that q>1 and a non SampleReducing AF raises an exception msg = ( "q-batches with q>1 are only supported using " "SampleReducingMCAcquisitionFunction." ) test_X = torch.rand(2, 3, dtype=dtype, device=self.device) with self.assertRaisesRegex(NotImplementedError, msg): af(test_X) def test_prior_guided_mc_acquisition_function(self): for dtype in (torch.float, torch.double): model = SingleTaskGP( self.train_X.to(dtype=dtype), self.train_Y.to(dtype=dtype) ) ei = qExpectedImprovement(model, best_f=0.0) for batch_shape, q, use_log, exponent in product( ([], [2]), (1, 2), (False, True), (1.0, 2.0), ): af = PriorGuidedAcquisitionFunction( acq_function=ei, prior_module=self.prior, log=use_log, prior_exponent=exponent, ) test_X = torch.rand(*batch_shape, q, 3, dtype=dtype, device=self.device) test_X_exp = test_X.unsqueeze(0) if batch_shape == [] else test_X val, prob = get_val_prob(test_X, test_X_exp, af, self.prior) ei_val = ei._non_reduced_forward(test_X_exp) weighted_val = get_weighted_val(ei_val, prob, exponent, use_log) expected_val = ei._sample_reduction(ei._q_reduction(weighted_val)) self.assertTrue(torch.allclose(val, expected_val)) # test set_X_pending X_pending = torch.rand(2, 3, dtype=dtype, device=self.device) af.X_pending = X_pending self.assertTrue(torch.equal(X_pending, af.X_pending)) # unsqueeze batch dim test_X_exp_with_pending = torch.cat( [test_X_exp, match_batch_shape(X_pending, test_X_exp)], dim=-2 ) with torch.no_grad(): val = af(test_X) prob = self.prior(test_X_exp_with_pending) ei_val = ei._non_reduced_forward(test_X_exp_with_pending) if use_log: weighted_val = prob * exponent + ei_val else: weighted_val = prob.pow(exponent) * ei_val expected_val = ei._sample_reduction(ei._q_reduction(weighted_val)) self.assertTrue(torch.equal(val, expected_val))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from copy import deepcopy from itertools import product from math import pi from unittest import mock import torch from botorch import settings from botorch.acquisition import ( LogImprovementMCAcquisitionFunction, qLogExpectedImprovement, qLogNoisyExpectedImprovement, ) from botorch.acquisition.input_constructors import ACQF_INPUT_CONSTRUCTOR_REGISTRY from botorch.acquisition.monte_carlo import ( qExpectedImprovement, qNoisyExpectedImprovement, ) from botorch.acquisition.objective import ( ConstrainedMCObjective, GenericMCObjective, IdentityMCObjective, PosteriorTransform, ScalarizedPosteriorTransform, ) from botorch.exceptions import BotorchWarning, UnsupportedError from botorch.exceptions.errors import BotorchError from botorch.models import SingleTaskGP from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.low_rank import sample_cached_cholesky from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from botorch.utils.transforms import standardize from torch import Tensor def infeasible_con(samples: Tensor) -> Tensor: return torch.ones_like(samples[..., 0]) def feasible_con(samples: Tensor) -> Tensor: return -torch.ones_like(samples[..., 0]) class DummyLogImprovementAcquisitionFunction(LogImprovementMCAcquisitionFunction): def _sample_forward(self, X): pass class DummyNonScalarizingPosteriorTransform(PosteriorTransform): scalarize = False def evaluate(self, Y): pass # pragma: no cover def forward(self, posterior): pass # pragma: no cover class TestLogImprovementAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): LogImprovementMCAcquisitionFunction() # raise if model is multi-output, but no outcome transform or objective # are given no = "botorch.utils.testing.MockModel.num_outputs" with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(MockPosterior()) with self.assertRaises(UnsupportedError): DummyLogImprovementAcquisitionFunction(model=mm) # raise if model is multi-output, but outcome transform does not # scalarize and no objetive is given with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(MockPosterior()) ptf = DummyNonScalarizingPosteriorTransform() with self.assertRaises(UnsupportedError): DummyLogImprovementAcquisitionFunction( model=mm, posterior_transform=ptf ) mm = MockModel(MockPosterior()) objective = ConstrainedMCObjective( IdentityMCObjective(), constraints=[lambda samples: torch.zeros_like(samples[..., 0])], ) with self.assertRaisesRegex( BotorchError, "Log-Improvement should not be used with `ConstrainedMCObjective`.", ): DummyLogImprovementAcquisitionFunction(model=mm, objective=objective) class TestQLogExpectedImprovement(BotorchTestCase): def test_q_log_expected_improvement(self): self.assertIn(qLogExpectedImprovement, ACQF_INPUT_CONSTRUCTOR_REGISTRY.keys()) for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} # the event shape is `b x q x t` = 1 x 1 x 1 samples = torch.zeros(1, 1, 1, **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking X = torch.zeros(1, 1, **tkwargs) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler) log_acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler) self.assertFalse(acqf._fat) # different default behavior self.assertTrue(log_acqf._fat) # test initialization for k in ["objective", "sampler"]: self.assertIn(k, acqf._modules) self.assertIn(k, log_acqf._modules) res = acqf(X).item() self.assertEqual(res, 0.0) exp_log_res = log_acqf(X).exp().item() # Due to the smooth approximation, the value at zero should be close to, but # not exactly zero, and upper-bounded by the tau hyperparameter. self.assertTrue(0 < exp_log_res) self.assertTrue(exp_log_res <= log_acqf.tau_relu) # test shifting best_f value downward to see non-zero improvement best_f = -1 acqf = qExpectedImprovement(model=mm, best_f=best_f, sampler=sampler) log_acqf = qLogExpectedImprovement(model=mm, best_f=best_f, sampler=sampler) res, exp_log_res = acqf(X), log_acqf(X).exp() expected_val = -best_f self.assertEqual(res.dtype, dtype) self.assertEqual(res.device.type, self.device.type) self.assertEqual(res.item(), expected_val) # Further away from zero, the value is numerically indistinguishable with # single precision arithmetic. self.assertEqual(exp_log_res.dtype, dtype) self.assertEqual(exp_log_res.device.type, self.device.type) self.assertTrue(expected_val <= exp_log_res.item()) self.assertTrue(exp_log_res.item() <= expected_val + log_acqf.tau_relu) # test shifting best_f value upward to see advantage of LogEI best_f = 1 acqf = qExpectedImprovement(model=mm, best_f=best_f, sampler=sampler) log_acqf = qLogExpectedImprovement(model=mm, best_f=best_f, sampler=sampler) res, log_res = acqf(X), log_acqf(X) exp_log_res = log_res.exp() expected_val = 0 self.assertEqual(res.item(), expected_val) self.assertTrue(expected_val <= exp_log_res.item()) self.assertTrue(exp_log_res.item() <= expected_val + log_acqf.tau_relu) # However, the log value is large and negative with non-vanishing gradients self.assertGreater(-1, log_res.item()) self.assertGreater(log_res.item(), -100) # NOTE: The following tests are adapted from the qEI tests. # basic test, no resample sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertTrue(0 < res.exp().item()) self.assertTrue(res.exp().item() < acqf.tau_relu) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler) res = acqf(X) self.assertTrue(0 < res.exp().item()) self.assertTrue(res.exp().item() < acqf.tau_relu) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) mm._posterior._samples = torch.zeros(1, 2, 1, **tkwargs) res = acqf(X) X2 = torch.zeros(1, 1, 1, **tkwargs, requires_grad=True) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) # testing with illegal taus with self.assertRaisesRegex(ValueError, "tau_max is not a scalar:"): qLogExpectedImprovement( model=mm, best_f=0, tau_max=torch.tensor([1, 2]) ) with self.assertRaisesRegex(ValueError, "tau_relu is non-positive:"): qLogExpectedImprovement(model=mm, best_f=0, tau_relu=-2) def test_q_log_expected_improvement_batch(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 2 x 2 x 1 samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) samples[0, 0, 0] = 1.0 mm = MockModel(MockPosterior(samples=samples)) # X is a dummy and unused b/c of mocking X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler) exp_log_res = acqf(X).exp() # with no approximations (qEI): self.assertEqual(res[0].item(), 1.0) # in the batch case, the values get adjusted toward self.assertEqual(exp_log_res.dtype, dtype) self.assertEqual(exp_log_res.device.type, self.device.type) self.assertTrue(1.0 <= exp_log_res[0].item()) self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu) # self.assertAllClose(exp_log_res[0], torch.ones_like(exp_log_res[0]), ) # with no approximations (qEI): self.assertEqual(res[1].item(), 0.0) self.assertTrue(0 < exp_log_res[1].item()) self.assertTrue(exp_log_res[1].item() <= acqf.tau_relu) # test batch model, batched best_f values sampler = IIDNormalSampler(sample_shape=torch.Size([3])) acqf = qLogExpectedImprovement( model=mm, best_f=torch.Tensor([0, 0]), sampler=sampler ) exp_log_res = acqf(X).exp() # with no approximations (qEI): self.assertEqual(res[0].item(), 1.0) self.assertTrue(1.0 <= exp_log_res[0].item()) self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu) # with no approximations (qEI): self.assertEqual(res[1].item(), 0.0) self.assertTrue(0 < exp_log_res[1].item()) self.assertTrue(exp_log_res[1].item() <= acqf.tau_relu) # test shifting best_f value acqf = qLogExpectedImprovement(model=mm, best_f=-1, sampler=sampler) exp_log_res = acqf(X).exp() # with no approximations (qEI): self.assertEqual(res[0].item(), 2.0) # TODO: figure out numerically stable tests and principled tolerances # With q > 1, maximum value can get moved down due to L_q-norm approximation # of the maximum over the q-batch. safe_upper_lower_bound = 1.999 self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item()) self.assertTrue(exp_log_res[0].item() <= 2.0 + acqf.tau_relu + acqf.tau_max) # with no approximations (qEI): self.assertEqual(res[1].item(), 1.0) self.assertTrue(1.0 <= exp_log_res[1].item()) # ocurring ~tau_max error when all candidates in a q-batch have the # acquisition value self.assertTrue(exp_log_res[1].item() <= 1.0 + acqf.tau_relu + acqf.tau_max) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler) # res = acqf(X) # 1-dim batch exp_log_res = acqf(X).exp() # 1-dim batch # with no approximations (qEI): self.assertEqual(res[0].item(), 1.0) safe_upper_lower_bound = 0.999 self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item()) self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu) # with no approximations (qEI): self.assertEqual(res[1].item(), 0.0) self.assertTrue(0.0 <= exp_log_res[1].item()) self.assertTrue(exp_log_res[1].item() <= 0.0 + acqf.tau_relu) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) exp_log_res = acqf(X.expand(2, 2, 1)).exp() # 2-dim batch # self.assertEqual(res[0].item(), 1.0) safe_upper_lower_bound = 0.999 self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item()) self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu) # self.assertEqual(res[1].item(), 0.0) self.assertTrue(0.0 <= exp_log_res[1].item()) self.assertTrue(exp_log_res[1].item() <= 0.0 + acqf.tau_relu) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, 2, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qLogExpectedImprovement(model=mm, best_f=0, sampler=sampler) exp_log_res = acqf(X).exp() # self.assertEqual(res[0].item(), 1.0) safe_upper_lower_bound = 0.999 self.assertTrue(safe_upper_lower_bound <= exp_log_res[0].item()) self.assertTrue(exp_log_res[0].item() <= 1.0 + acqf.tau_relu) # self.assertEqual(res[1].item(), 0.0) self.assertTrue(0.0 <= exp_log_res[1].item()) self.assertTrue(exp_log_res[1].item() <= 0.0 + acqf.tau_relu) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # # TODO: Test different objectives (incl. constraints) class TestQLogNoisyExpectedImprovement(BotorchTestCase): def test_q_log_noisy_expected_improvement(self): self.assertIn( qLogNoisyExpectedImprovement, ACQF_INPUT_CONSTRUCTOR_REGISTRY.keys() ) for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 1 x 2 x 1 samples_noisy = torch.tensor([0.0, 1.0], device=self.device, dtype=dtype) samples_noisy = samples_noisy.view(1, 2, 1) # X_baseline is `q' x d` = 1 x 1 X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype) mm_noisy = MockModel(MockPosterior(samples=samples_noisy)) # X is `q x d` = 1 x 1 X = torch.zeros(1, 1, device=self.device, dtype=dtype) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2])) kwargs = { "model": mm_noisy, "X_baseline": X_baseline, "sampler": sampler, "prune_baseline": False, "cache_root": False, } acqf = qNoisyExpectedImprovement(**kwargs) log_acqf = qLogNoisyExpectedImprovement(**kwargs) res = acqf(X) self.assertEqual(res.item(), 1.0) log_res = log_acqf(X) self.assertEqual(log_res.dtype, dtype) self.assertEqual(log_res.device.type, self.device.type) self.assertAllClose(log_res.exp().item(), 1.0) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) kwargs = { "model": mm_noisy, "X_baseline": X_baseline, "sampler": sampler, "prune_baseline": False, "cache_root": False, } log_acqf = qLogNoisyExpectedImprovement(**kwargs) log_res = log_acqf(X) self.assertEqual(log_res.exp().item(), 1.0) self.assertEqual( log_acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]) ) bs = log_acqf.sampler.base_samples.clone() log_acqf(X) self.assertTrue(torch.equal(log_acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) kwargs = { "model": mm_noisy, "X_baseline": X_baseline, "sampler": sampler, "prune_baseline": False, "cache_root": False, } log_acqf = qLogNoisyExpectedImprovement(**kwargs) log_res = log_acqf(X) self.assertEqual(log_res.exp().item(), 1.0) self.assertEqual( log_acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]) ) bs = log_acqf.sampler.base_samples.clone() log_acqf(X) self.assertTrue(torch.equal(log_acqf.sampler.base_samples, bs)) # basic test for X_pending and warning sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) samples_noisy_pending = torch.tensor( [1.0, 0.0, 0.0], device=self.device, dtype=dtype ) samples_noisy_pending = samples_noisy_pending.view(1, 3, 1) mm_noisy_pending = MockModel(MockPosterior(samples=samples_noisy_pending)) kwargs = { "model": mm_noisy_pending, "X_baseline": X_baseline, "sampler": sampler, "prune_baseline": False, "cache_root": False, } # copy for log version log_acqf = qLogNoisyExpectedImprovement(**kwargs) log_acqf.set_X_pending() self.assertIsNone(log_acqf.X_pending) log_acqf.set_X_pending(None) self.assertIsNone(log_acqf.X_pending) log_acqf.set_X_pending(X) self.assertEqual(log_acqf.X_pending, X) log_acqf(X) X2 = torch.zeros( 1, 1, 1, device=self.device, dtype=dtype, requires_grad=True ) with warnings.catch_warnings(record=True) as ws, settings.debug(True): log_acqf.set_X_pending(X2) self.assertEqual(log_acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_q_noisy_expected_improvement_batch(self): for dtype in (torch.float, torch.double): # the event shape is `b x q x t` = 2 x 3 x 1 samples_noisy = torch.zeros(2, 3, 1, device=self.device, dtype=dtype) samples_noisy[0, -1, 0] = 1.0 mm_noisy = MockModel(MockPosterior(samples=samples_noisy)) # X is `q x d` = 1 x 1 X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype) X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2])) kwargs = { "model": mm_noisy, "X_baseline": X_baseline, "sampler": sampler, "prune_baseline": False, "cache_root": False, } acqf = qLogNoisyExpectedImprovement(**kwargs) res = acqf(X).exp() expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device) self.assertAllClose(res, expected_res, atol=acqf.tau_relu) self.assertGreater(res[1].item(), 0.0) self.assertGreater(acqf.tau_relu, res[1].item()) # test batch mode sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qLogNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X).exp() # 1-dim batch expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device) self.assertAllClose(res, expected_res, atol=acqf.tau_relu) self.assertGreater(res[1].item(), 0.0) self.assertGreater(acqf.tau_relu, res[1].item()) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) res = acqf(X.expand(2, 2, 1)).exp() # 2-dim batch expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device) self.assertAllClose(res, expected_res, atol=acqf.tau_relu) self.assertGreater(res[1].item(), 0.0) self.assertGreater(acqf.tau_relu, res[1].item()) # the base samples should have the batch dim collapsed self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1])) bs = acqf.sampler.base_samples.clone() acqf(X.expand(2, 2, 1)) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # test batch mode, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qLogNoisyExpectedImprovement( model=mm_noisy, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=False, ) res = acqf(X).exp() expected_res = torch.tensor([1.0, 0.0], dtype=dtype, device=self.device) self.assertAllClose(res, expected_res, atol=acqf.tau_relu) self.assertGreater(res[1].item(), 0.0) self.assertGreater(acqf.tau_relu, res[1].item()) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) def test_prune_baseline(self): no = "botorch.utils.testing.MockModel.num_outputs" prune = "botorch.acquisition.logei.prune_inferior_points" for dtype in (torch.float, torch.double): X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype) X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype) with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(MockPosterior(samples=X_baseline)) with mock.patch(prune, return_value=X_pruned) as mock_prune: acqf = qLogNoisyExpectedImprovement( model=mm, X_baseline=X_baseline, prune_baseline=True, cache_root=False, ) mock_prune.assert_called_once() self.assertTrue(torch.equal(acqf.X_baseline, X_pruned)) with mock.patch(prune, return_value=X_pruned) as mock_prune: acqf = qLogNoisyExpectedImprovement( model=mm, X_baseline=X_baseline, prune_baseline=True, marginalize_dim=-3, cache_root=False, ) _, kwargs = mock_prune.call_args self.assertEqual(kwargs["marginalize_dim"], -3) def test_cache_root(self): sample_cached_path = ( "botorch.acquisition.cached_cholesky.sample_cached_cholesky" ) raw_state_dict = { "likelihood.noise_covar.raw_noise": torch.tensor( [[0.0895], [0.2594]], dtype=torch.float64 ), "mean_module.raw_constant": torch.tensor( [-0.4545, -0.1285], dtype=torch.float64 ), "covar_module.raw_outputscale": torch.tensor( [1.4876, 1.4897], dtype=torch.float64 ), "covar_module.base_kernel.raw_lengthscale": torch.tensor( [[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64 ), } # test batched models (e.g. for MCMC) for train_batch_shape, m, dtype in product( (torch.Size([]), torch.Size([3])), (1, 2), (torch.float, torch.double) ): state_dict = deepcopy(raw_state_dict) for k, v in state_dict.items(): if m == 1: v = v[0] if len(train_batch_shape) > 0: v = v.unsqueeze(0).expand(*train_batch_shape, *v.shape) state_dict[k] = v tkwargs = {"device": self.device, "dtype": dtype} if m == 2: objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1)) else: objective = None for k, v in state_dict.items(): state_dict[k] = v.to(**tkwargs) all_close_kwargs = ( { "atol": 1e-1, "rtol": 0.0, } if dtype == torch.float else {"atol": 1e-4, "rtol": 0.0} ) torch.manual_seed(1234) train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs) train_Y = ( torch.sin(train_X * 2 * pi) + torch.randn(*train_batch_shape, 3, 2, **tkwargs) )[..., :m] train_Y = standardize(train_Y) model = SingleTaskGP( train_X, train_Y, ) if len(train_batch_shape) > 0: X_baseline = train_X[0] else: X_baseline = train_X model.load_state_dict(state_dict, strict=False) sampler = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0) torch.manual_seed(0) acqf = qLogNoisyExpectedImprovement( model=model, X_baseline=X_baseline, sampler=sampler, objective=objective, prune_baseline=False, cache_root=True, ) orig_base_samples = acqf.base_sampler.base_samples.detach().clone() sampler2 = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0) sampler2.base_samples = orig_base_samples torch.manual_seed(0) acqf_no_cache = qLogNoisyExpectedImprovement( model=model, X_baseline=X_baseline, sampler=sampler2, objective=objective, prune_baseline=False, cache_root=False, ) for q, batch_shape in product( (1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3])) ): acqf.q_in = -1 acqf_no_cache.q_in = -1 test_X = ( 0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs) ).requires_grad_(True) with mock.patch( sample_cached_path, wraps=sample_cached_cholesky ) as mock_sample_cached: torch.manual_seed(0) val = acqf(test_X).exp() mock_sample_cached.assert_called_once() val.sum().backward() base_samples = acqf.sampler.base_samples.detach().clone() X_grad = test_X.grad.clone() test_X2 = test_X.detach().clone().requires_grad_(True) acqf_no_cache.sampler.base_samples = base_samples with mock.patch( sample_cached_path, wraps=sample_cached_cholesky ) as mock_sample_cached: torch.manual_seed(0) val2 = acqf_no_cache(test_X2).exp() mock_sample_cached.assert_not_called() self.assertAllClose(val, val2, **all_close_kwargs) val2.sum().backward() self.assertAllClose(X_grad, test_X2.grad, **all_close_kwargs) # test we fall back to standard sampling for # ill-conditioned covariances acqf._baseline_L = torch.zeros_like(acqf._baseline_L) with warnings.catch_warnings(record=True) as ws, settings.debug(True): with torch.no_grad(): acqf(test_X) self.assertEqual(sum(issubclass(w.category, BotorchWarning) for w in ws), 1) # test w/ posterior transform X_baseline = torch.rand(2, 1) model = SingleTaskGP(X_baseline, torch.randn(2, 1)) pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1])) with mock.patch.object( qLogNoisyExpectedImprovement, "_compute_root_decomposition", ) as mock_cache_root: acqf = qLogNoisyExpectedImprovement( model=model, X_baseline=X_baseline, sampler=IIDNormalSampler(sample_shape=torch.Size([1])), posterior_transform=pt, prune_baseline=False, cache_root=True, ) tf_post = model.posterior(X_baseline, posterior_transform=pt) self.assertTrue( torch.allclose( tf_post.mean, mock_cache_root.call_args[-1]["posterior"].mean ) ) # testing constraints n, d, m = 8, 1, 3 X_baseline = torch.rand(n, d) model = SingleTaskGP(X_baseline, torch.randn(n, m)) # batched model nei_args = { "model": model, "X_baseline": X_baseline, "prune_baseline": False, "cache_root": True, "posterior_transform": ScalarizedPosteriorTransform(weights=torch.ones(m)), "sampler": SobolQMCNormalSampler(torch.Size([5])), } acqf = qLogNoisyExpectedImprovement(**nei_args) X = torch.randn_like(X_baseline) for con in [feasible_con, infeasible_con]: with self.subTest(con=con): target = "botorch.acquisition.utils.get_infeasible_cost" infcost = torch.tensor([3], device=self.device, dtype=dtype) with mock.patch(target, return_value=infcost): cacqf = qLogNoisyExpectedImprovement(**nei_args, constraints=[con]) _, obj = cacqf._get_samples_and_objectives(X) best_feas_f = cacqf.compute_best_f(obj) if con is feasible_con: self.assertAllClose(best_feas_f, acqf.compute_best_f(obj)) else: self.assertAllClose( best_feas_f, torch.full_like(obj[..., [0]], -infcost.item()) ) # TODO: Test different objectives (incl. constraints)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings import torch from botorch import settings from botorch.acquisition.decoupled import DecoupledAcquisitionFunction from botorch.exceptions import BotorchTensorDimensionError, BotorchWarning from botorch.logging import shape_to_str from botorch.models import ModelListGP, SingleTaskGP from botorch.utils.testing import BotorchTestCase class DummyDecoupledAcquisitionFunction(DecoupledAcquisitionFunction): def forward(self, X): pass class TestDecoupledAcquisitionFunction(BotorchTestCase): def test_decoupled_acquisition_function(self): msg = ( "Can't instantiate abstract class DecoupledAcquisitionFunction" " with abstract method forward" ) with self.assertRaisesRegex(TypeError, msg): DecoupledAcquisitionFunction() # test raises error if model is not ModelList msg = "DummyDecoupledAcquisitionFunction requires using a ModelList." model = SingleTaskGP( torch.rand(1, 3, device=self.device), torch.rand(1, 2, device=self.device) ) with self.assertRaisesRegex(ValueError, msg): DummyDecoupledAcquisitionFunction(model=model) m = SingleTaskGP( torch.rand(1, 3, device=self.device), torch.rand(1, 1, device=self.device) ) model = ModelListGP(m, m) # basic test af = DummyDecoupledAcquisitionFunction(model=model) self.assertIs(af.model, model) self.assertIsNone(af.X_evaluation_mask) self.assertIsNone(af.X_pending) # test set X_evaluation_mask # test wrong number of outputs eval_mask = torch.randint(0, 2, (2, 3), device=self.device).bool() msg = ( "Expected X_evaluation_mask to be `q x m`, but got shape" f" {shape_to_str(eval_mask.shape)}." ) with self.assertRaisesRegex(BotorchTensorDimensionError, msg): af.X_evaluation_mask = eval_mask # test more than 2 dimensions eval_mask.unsqueeze_(0) msg = ( "Expected X_evaluation_mask to be `q x m`, but got shape" f" {shape_to_str(eval_mask.shape)}." ) with self.assertRaisesRegex(BotorchTensorDimensionError, msg): af.X_evaluation_mask = eval_mask # set eval_mask eval_mask = eval_mask[0, :, :2] af.X_evaluation_mask = eval_mask self.assertIs(af.X_evaluation_mask, eval_mask) # test set_X_pending X_pending = torch.rand(1, 1, device=self.device) msg = ( "If `self.X_evaluation_mask` is not None, then " "`X_pending_evaluation_mask` must be provided." ) with self.assertRaisesRegex(ValueError, msg): af.set_X_pending(X_pending=X_pending) af.X_evaluation_mask = None X_pending = X_pending.requires_grad_(True) with warnings.catch_warnings(record=True) as ws, settings.debug(True): af.set_X_pending(X_pending) self.assertEqual(af.X_pending, X_pending) self.assertEqual(sum(issubclass(w.category, BotorchWarning) for w in ws), 1) self.assertIsNone(af.X_evaluation_mask) # test setting X_pending with X_pending_evaluation_mask X_pending = torch.rand(3, 1, device=self.device) # test raises exception # wrong number of outputs, wrong number of dims, wrong number of rows for shape in ([3, 1], [1, 3, 2], [1, 2]): eval_mask = torch.randint(0, 2, shape, device=self.device).bool() msg = ( f"Expected `X_pending_evaluation_mask` of shape `{X_pending.shape[0]} " f"x {model.num_outputs}`, but got " f"{shape_to_str(eval_mask.shape)}." ) with self.assertRaisesRegex(BotorchTensorDimensionError, msg): af.set_X_pending( X_pending=X_pending, X_pending_evaluation_mask=eval_mask ) eval_mask = torch.randint(0, 2, (3, 2), device=self.device).bool() af.set_X_pending(X_pending=X_pending, X_pending_evaluation_mask=eval_mask) self.assertTrue(torch.equal(af.X_pending, X_pending)) self.assertIs(af.X_pending_evaluation_mask, eval_mask) # test construct_evaluation_mask # X_evaluation_mask is None X = torch.rand(4, 5, 2, device=self.device) X_eval_mask = af.construct_evaluation_mask(X=X) expected_eval_mask = torch.cat( [torch.ones(X.shape[1:], dtype=torch.bool, device=self.device), eval_mask], dim=0, ) self.assertTrue(torch.equal(X_eval_mask, expected_eval_mask)) # test X_evaluation_mask is not None # test wrong shape af.X_evaluation_mask = torch.zeros(1, 2, dtype=bool, device=self.device) msg = "Expected the -2 dimension of X and X_evaluation_mask to match." with self.assertRaisesRegex(BotorchTensorDimensionError, msg): af.construct_evaluation_mask(X=X) af.X_evaluation_mask = torch.randint(0, 2, (5, 2), device=self.device).bool() X_eval_mask = af.construct_evaluation_mask(X=X) expected_eval_mask = torch.cat([af.X_evaluation_mask, eval_mask], dim=0) self.assertTrue(torch.equal(X_eval_mask, expected_eval_mask)) # test setting X_pending as None af.set_X_pending(X_pending=None, X_pending_evaluation_mask=None) self.assertIsNone(af.X_pending) self.assertIsNone(af.X_pending_evaluation_mask) # test construct_evaluation_mask when X_pending is None self.assertTrue( torch.equal(af.construct_evaluation_mask(X=X), af.X_evaluation_mask) )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings import torch from botorch import settings from botorch.acquisition.cost_aware import ( CostAwareUtility, GenericCostAwareUtility, InverseCostWeightedUtility, ) from botorch.exceptions.warnings import CostAwareWarning from botorch.models.deterministic import GenericDeterministicModel from botorch.sampling import IIDNormalSampler from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior class TestCostAwareUtilities(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): CostAwareUtility() def test_GenericCostAwareUtility(self): def cost(X, deltas, **kwargs): return deltas.mean(dim=-1) / X[..., 1].sum(dim=-1) for dtype in (torch.float, torch.double): u = GenericCostAwareUtility(cost) X = torch.rand(3, 2, device=self.device, dtype=dtype) deltas = torch.rand(5, 3, device=self.device, dtype=dtype) self.assertIsInstance(u, GenericCostAwareUtility) self.assertTrue(torch.equal(u(X, deltas), cost(X, deltas))) X = torch.rand(4, 3, 2, device=self.device, dtype=dtype) deltas = torch.rand(5, 4, 3, device=self.device, dtype=dtype) self.assertIsInstance(u, GenericCostAwareUtility) self.assertTrue(torch.equal(u(X, deltas), cost(X, deltas))) def test_InverseCostWeightedUtility(self): for batch_shape in ([], [2]): for dtype in (torch.float, torch.double): # the event shape is `batch_shape x q x t` mean = 1 + torch.rand( *batch_shape, 2, 1, device=self.device, dtype=dtype ) mm = MockModel(MockPosterior(mean=mean)) X = torch.randn(*batch_shape, 3, 2, device=self.device, dtype=dtype) deltas = torch.rand(4, *batch_shape, device=self.device, dtype=dtype) # test that sampler is required if use_mean=False icwu = InverseCostWeightedUtility(mm, use_mean=False) with self.assertRaises(RuntimeError): icwu(X, deltas) # check warning for negative cost mm = MockModel(MockPosterior(mean=mean.clamp_max(-1e-6))) icwu = InverseCostWeightedUtility(mm) with warnings.catch_warnings(record=True) as ws, settings.debug(True): icwu(X, deltas) self.assertTrue( any(issubclass(w.category, CostAwareWarning) for w in ws) ) # basic test mm = MockModel(MockPosterior(mean=mean)) icwu = InverseCostWeightedUtility(mm) ratios = icwu(X, deltas) self.assertTrue( torch.equal(ratios, deltas / mean.squeeze(-1).sum(dim=-1)) ) # sampling test samples = 1 + torch.rand( # event shape is q x m *batch_shape, 3, 1, device=self.device, dtype=dtype ) mm = MockModel(MockPosterior(samples=samples)) icwu = InverseCostWeightedUtility(mm, use_mean=False) ratios = icwu( X, deltas, sampler=IIDNormalSampler(sample_shape=torch.Size([4])) ) self.assertTrue( torch.equal(ratios, deltas / samples.squeeze(-1).sum(dim=-1)) ) # test min cost mm = MockModel(MockPosterior(mean=mean)) icwu = InverseCostWeightedUtility(mm, min_cost=1.5) ratios = icwu(X, deltas) self.assertTrue( torch.equal( ratios, deltas / mean.clamp_min(1.5).squeeze(-1).sum(dim=-1) ) ) # test evaluation_mask multi_output_mean = torch.cat([mean, 2 * mean], dim=-1) def cost_fn(X): return multi_output_mean mm = GenericDeterministicModel(f=cost_fn, num_outputs=2) icwu = InverseCostWeightedUtility(mm) eval_mask = torch.zeros(3, 2, dtype=torch.bool, device=self.device) eval_mask[:, 1] = True # 1 objective is evaluated ratios = icwu(X, deltas, X_evaluation_mask=eval_mask) self.assertTrue( torch.equal(ratios, deltas / multi_output_mean[..., 1].sum(dim=-1)) ) eval_mask[:, 0] = True # both objectives are evaluated ratios = icwu(X, deltas, X_evaluation_mask=eval_mask) self.assertAllClose( ratios, deltas / multi_output_mean.sum(dim=(-1, -2)) ) # test eval_mask where not all rows are the same eval_mask[0, 1] = False msg = ( "Currently, all candidates must be evaluated " "on the same outputs." ) with self.assertRaisesRegex(NotImplementedError, msg): icwu(X, deltas, X_evaluation_mask=eval_mask)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import math from typing import Any, Callable, Sequence, Type from unittest import mock from unittest.mock import MagicMock import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.analytic import ( ExpectedImprovement, LogExpectedImprovement, LogNoisyExpectedImprovement, LogProbabilityOfImprovement, NoisyExpectedImprovement, PosteriorMean, ProbabilityOfImprovement, UpperConfidenceBound, ) from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction from botorch.acquisition.input_constructors import ( _field_is_shared, _register_acqf_input_constructor, acqf_input_constructor, ACQF_INPUT_CONSTRUCTOR_REGISTRY, construct_inputs_mf_base, get_acqf_input_constructor, get_best_f_analytic, get_best_f_mc, ) from botorch.acquisition.joint_entropy_search import qJointEntropySearch from botorch.acquisition.knowledge_gradient import ( qKnowledgeGradient, qMultiFidelityKnowledgeGradient, ) from botorch.acquisition.logei import ( qLogExpectedImprovement, qLogNoisyExpectedImprovement, TAU_MAX, TAU_RELU, ) from botorch.acquisition.max_value_entropy_search import ( qMaxValueEntropy, qMultiFidelityMaxValueEntropy, ) from botorch.acquisition.monte_carlo import ( qExpectedImprovement, qNoisyExpectedImprovement, qProbabilityOfImprovement, qSimpleRegret, qUpperConfidenceBound, ) from botorch.acquisition.multi_objective import ( ExpectedHypervolumeImprovement, qExpectedHypervolumeImprovement, qNoisyExpectedHypervolumeImprovement, ) from botorch.acquisition.multi_objective.multi_output_risk_measures import ( MultiOutputExpectation, ) from botorch.acquisition.multi_objective.objective import ( IdentityAnalyticMultiOutputObjective, IdentityMCMultiOutputObjective, WeightedMCMultiOutputObjective, ) from botorch.acquisition.multi_objective.utils import get_default_partitioning_alpha from botorch.acquisition.objective import ( LinearMCObjective, ScalarizedPosteriorTransform, ) from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption from botorch.acquisition.utils import ( expand_trace_observations, project_to_target_fidelity, ) from botorch.exceptions.errors import UnsupportedError from botorch.models import FixedNoiseGP, MultiTaskGP, SingleTaskGP from botorch.models.deterministic import FixedSingleSampleModel from botorch.models.model_list_gp_regression import ModelListGP from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.constraints import get_outcome_constraint_transforms from botorch.utils.datasets import SupervisedDataset from botorch.utils.multi_objective.box_decompositions.non_dominated import ( FastNondominatedPartitioning, NondominatedPartitioning, ) from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior class DummyAcquisitionFunction(AcquisitionFunction): ... class InputConstructorBaseTestCase(BotorchTestCase): def setUp(self, suppress_input_warnings: bool = True) -> None: super().setUp(suppress_input_warnings=suppress_input_warnings) self.mock_model = MockModel( posterior=MockPosterior(mean=None, variance=None, base_shape=(1,)) ) X1 = torch.rand(3, 2) X2 = torch.rand(3, 2) Y1 = torch.rand(3, 1) Y2 = torch.rand(3, 1) self.blockX_blockY = SupervisedDataset.dict_from_iter(X1, Y1) self.blockX_multiY = SupervisedDataset.dict_from_iter(X1, (Y1, Y2)) self.multiX_multiY = SupervisedDataset.dict_from_iter((X1, X2), (Y1, Y2)) self.bounds = 2 * [(0.0, 1.0)] class TestInputConstructorUtils(InputConstructorBaseTestCase): def test_field_is_shared(self) -> None: self.assertTrue(_field_is_shared(self.blockX_multiY, "X")) self.assertFalse(_field_is_shared(self.blockX_multiY, "Y")) with self.assertRaisesRegex(AttributeError, "has no field"): self.assertFalse(_field_is_shared(self.blockX_multiY, "foo")) def test_get_best_f_analytic(self) -> None: with self.assertRaisesRegex( NotImplementedError, "Currently only block designs are supported." ): get_best_f_analytic(training_data=self.multiX_multiY) best_f = get_best_f_analytic(training_data=self.blockX_blockY) self.assertEqual(best_f, get_best_f_analytic(self.blockX_blockY[0])) best_f_expected = self.blockX_blockY[0].Y.squeeze().max() self.assertEqual(best_f, best_f_expected) with self.assertRaisesRegex( NotImplementedError, "Analytic acquisition functions currently only work with " "multi-output models if provided with a", ): get_best_f_analytic(training_data=self.blockX_multiY) weights = torch.rand(2) post_tf = ScalarizedPosteriorTransform(weights=weights) best_f_tf = get_best_f_analytic( training_data=self.blockX_multiY, posterior_transform=post_tf ) multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1) best_f_expected = post_tf.evaluate(multi_Y).max() self.assertEqual(best_f_tf, best_f_expected) def test_get_best_f_mc(self) -> None: with self.assertRaisesRegex( NotImplementedError, "Currently only block designs are supported." ): get_best_f_mc(training_data=self.multiX_multiY) best_f = get_best_f_mc(training_data=self.blockX_blockY) self.assertEqual(best_f, get_best_f_mc(self.blockX_blockY[0])) best_f_expected = self.blockX_blockY[0].Y.max(dim=0).values self.assertAllClose(best_f, best_f_expected) with self.assertRaisesRegex(UnsupportedError, "require an objective"): get_best_f_mc(training_data=self.blockX_multiY) obj = LinearMCObjective(weights=torch.rand(2)) best_f = get_best_f_mc(training_data=self.blockX_multiY, objective=obj) multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1) best_f_expected = (multi_Y @ obj.weights).amax(dim=-1, keepdim=True) self.assertAllClose(best_f, best_f_expected) post_tf = ScalarizedPosteriorTransform(weights=torch.ones(2)) best_f = get_best_f_mc( training_data=self.blockX_multiY, posterior_transform=post_tf ) best_f_expected = (multi_Y.sum(dim=-1)).amax(dim=-1, keepdim=True) self.assertAllClose(best_f, best_f_expected) @mock.patch("botorch.acquisition.input_constructors.optimize_acqf") def test_optimize_objective(self, mock_optimize_acqf): from botorch.acquisition.input_constructors import optimize_objective mock_model = self.mock_model bounds = torch.rand(2, len(self.bounds)) A = torch.rand(1, bounds.shape[-1]) b = torch.zeros([1, 1]) idx = A[0].nonzero(as_tuple=False).squeeze() inequality_constraints = ((idx, -A[0, idx], -b[0, 0]),) with self.subTest("scalarObjective_linearConstraints"): post_tf = ScalarizedPosteriorTransform(weights=torch.rand(bounds.shape[-1])) _ = optimize_objective( model=mock_model, bounds=bounds, q=1, posterior_transform=post_tf, linear_constraints=(A, b), fixed_features=None, ) kwargs = mock_optimize_acqf.call_args[1] self.assertIsInstance(kwargs["acq_function"], PosteriorMean) self.assertTrue(torch.equal(kwargs["bounds"], bounds)) self.assertEqual(len(kwargs["inequality_constraints"]), 1) for a, b in zip( kwargs["inequality_constraints"][0], inequality_constraints[0] ): self.assertTrue(torch.equal(a, b)) with self.subTest("mcObjective_fixedFeatures"): _ = optimize_objective( model=mock_model, bounds=bounds, q=1, objective=LinearMCObjective(weights=torch.rand(bounds.shape[-1])), fixed_features={0: 0.5}, ) kwargs = mock_optimize_acqf.call_args[1] self.assertIsInstance( kwargs["acq_function"], FixedFeatureAcquisitionFunction ) self.assertIsInstance(kwargs["acq_function"].acq_func, qSimpleRegret) self.assertTrue(torch.equal(kwargs["bounds"], bounds[:, 1:])) def test__allow_only_specific_variable_kwargs__raises(self) -> None: input_constructor = get_acqf_input_constructor(ExpectedImprovement) with self.assertRaisesRegex( TypeError, "Unexpected keyword argument `hat` when constructing input arguments", ): input_constructor( model=self.mock_model, training_data=self.blockX_blockY, hat="car" ) def test__register_acqf_input_constructor(self) -> None: with self.assertRaisesRegex(RuntimeError, "not registered"): get_acqf_input_constructor(DummyAcquisitionFunction) dummy_constructor = MagicMock() _register_acqf_input_constructor( acqf_cls=DummyAcquisitionFunction, input_constructor=dummy_constructor, ) input_constructor = get_acqf_input_constructor(DummyAcquisitionFunction) self.assertIs(input_constructor, dummy_constructor) # Clean up changes to the global registry (leads to failure of other tests). ACQF_INPUT_CONSTRUCTOR_REGISTRY.pop(DummyAcquisitionFunction) class TestAnalyticAcquisitionFunctionInputConstructors(InputConstructorBaseTestCase): def test_acqf_input_constructor(self) -> None: with self.assertRaisesRegex(RuntimeError, "not registered"): get_acqf_input_constructor(DummyAcquisitionFunction) with self.assertRaisesRegex(ValueError, "duplicate"): acqf_input_constructor(ExpectedImprovement)(lambda x: x) def test_construct_inputs_posterior_mean(self) -> None: c = get_acqf_input_constructor(PosteriorMean) mock_model = self.mock_model kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["posterior_transform"]) # test instantiation acqf = PosteriorMean(**kwargs) self.assertIs(acqf.model, mock_model) post_tf = ScalarizedPosteriorTransform(weights=torch.rand(1)) kwargs = c( model=mock_model, training_data=self.blockX_blockY, posterior_transform=post_tf, ) self.assertIs(kwargs["model"], mock_model) self.assertIs(kwargs["posterior_transform"], post_tf) # test instantiation acqf = PosteriorMean(**kwargs) self.assertIs(acqf.model, mock_model) def test_construct_inputs_best_f(self) -> None: for acqf_cls in [ ExpectedImprovement, LogExpectedImprovement, ProbabilityOfImprovement, LogProbabilityOfImprovement, ]: with self.subTest(acqf_cls=acqf_cls): c = get_acqf_input_constructor(acqf_cls) mock_model = self.mock_model kwargs = c( model=mock_model, training_data=self.blockX_blockY, maximize=False ) best_f_expected = self.blockX_blockY[0].Y.squeeze().max() self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["posterior_transform"]) self.assertEqual(kwargs["best_f"], best_f_expected) self.assertFalse(kwargs["maximize"]) acqf = acqf_cls(**kwargs) self.assertIs(acqf.model, mock_model) kwargs = c( model=mock_model, training_data=self.blockX_blockY, best_f=0.1 ) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["posterior_transform"]) self.assertEqual(kwargs["best_f"], 0.1) self.assertTrue(kwargs["maximize"]) acqf = acqf_cls(**kwargs) self.assertIs(acqf.model, mock_model) def test_construct_inputs_ucb(self) -> None: c = get_acqf_input_constructor(UpperConfidenceBound) mock_model = self.mock_model kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["posterior_transform"]) self.assertEqual(kwargs["beta"], 0.2) self.assertTrue(kwargs["maximize"]) acqf = UpperConfidenceBound(**kwargs) self.assertIs(mock_model, acqf.model) kwargs = c( model=mock_model, training_data=self.blockX_blockY, beta=0.1, maximize=False ) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["posterior_transform"]) self.assertEqual(kwargs["beta"], 0.1) self.assertFalse(kwargs["maximize"]) acqf = UpperConfidenceBound(**kwargs) self.assertIs(mock_model, acqf.model) def test_construct_inputs_noisy_ei(self) -> None: for acqf_cls in [NoisyExpectedImprovement, LogNoisyExpectedImprovement]: with self.subTest(acqf_cls=acqf_cls): c = get_acqf_input_constructor(acqf_cls) mock_model = FixedNoiseGP( train_X=torch.rand((2, 2)), train_Y=torch.rand((2, 1)), train_Yvar=torch.rand((2, 1)), ) kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertEqual(kwargs["model"], mock_model) self.assertTrue( torch.equal(kwargs["X_observed"], self.blockX_blockY[0].X) ) self.assertEqual(kwargs["num_fantasies"], 20) self.assertTrue(kwargs["maximize"]) acqf = acqf_cls(**kwargs) self.assertTrue(acqf.maximize) kwargs = c( model=mock_model, training_data=self.blockX_blockY, num_fantasies=10, maximize=False, ) self.assertEqual(kwargs["model"], mock_model) self.assertTrue( torch.equal(kwargs["X_observed"], self.blockX_blockY[0].X) ) self.assertEqual(kwargs["num_fantasies"], 10) self.assertFalse(kwargs["maximize"]) acqf = acqf_cls(**kwargs) self.assertFalse(acqf.maximize) with self.assertRaisesRegex(ValueError, "Field `X` must be shared"): c(model=mock_model, training_data=self.multiX_multiY) def test_construct_inputs_constrained_analytic_eubo(self) -> None: # create dummy modellist gp n = 10 X = torch.linspace(0, 0.95, n).unsqueeze(dim=-1) Y1, Y2 = torch.sin(X * (2 * math.pi)), torch.cos(X * (2 * math.pi)) # 3 tasks train_X = torch.cat( [torch.nn.functional.pad(X, (1, 0), value=i) for i in range(3)] ) train_Y = torch.cat([Y1, Y2]) # train_Y is a 1d tensor with shape (2n,) # model list of 2, so model.num_outputs is 4 model = ModelListGP( *[MultiTaskGP(train_X, train_Y, task_feature=0) for i in range(2)] ) self.assertEqual(model.num_outputs, 6) c = get_acqf_input_constructor(AnalyticExpectedUtilityOfBestOption) mock_pref_model = self.mock_model # assume we only have a preference model with 2 outcomes mock_pref_model.dim = 2 mock_pref_model.datapoints = torch.tensor([]) # test basic construction kwargs = c(model=model, pref_model=mock_pref_model) self.assertIsInstance(kwargs["outcome_model"], FixedSingleSampleModel) self.assertIs(kwargs["pref_model"], mock_pref_model) self.assertIsNone(kwargs["previous_winner"]) # test instantiation AnalyticExpectedUtilityOfBestOption(**kwargs) # test previous_winner previous_winner = torch.randn(mock_pref_model.dim) kwargs = c( model=model, pref_model=mock_pref_model, previous_winner=previous_winner, ) self.assertTrue(torch.equal(kwargs["previous_winner"], previous_winner)) # test instantiation AnalyticExpectedUtilityOfBestOption(**kwargs) # test sample_multiplier torch.manual_seed(123) kwargs = c( model=model, pref_model=mock_pref_model, sample_multiplier=1e6, ) # w by default is drawn from std normal and very unlikely to be > 10.0 self.assertTrue((kwargs["outcome_model"].w.abs() > 10.0).all()) # Check w has the right dimension that agrees with the preference model self.assertEqual(kwargs["outcome_model"].w.shape[-1], mock_pref_model.dim) class TestMCAcquisitionFunctionInputConstructors(InputConstructorBaseTestCase): def test_construct_inputs_mc_base(self) -> None: c = get_acqf_input_constructor(qSimpleRegret) mock_model = self.mock_model kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["objective"]) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["sampler"]) acqf = qSimpleRegret(**kwargs) self.assertIs(acqf.model, mock_model) X_pending = torch.rand(2, 2) objective = LinearMCObjective(torch.rand(2)) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective=objective, X_pending=X_pending, ) self.assertIs(kwargs["model"], mock_model) self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights)) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertIsNone(kwargs["sampler"]) acqf = qSimpleRegret(**kwargs) self.assertIs(acqf.model, mock_model) # TODO: Test passing through of sampler def test_construct_inputs_qEI(self) -> None: c = get_acqf_input_constructor(qExpectedImprovement) mock_model = self.mock_model kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["objective"]) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["sampler"]) self.assertIsNone(kwargs["constraints"]) self.assertIsInstance(kwargs["eta"], float) self.assertLess(kwargs["eta"], 1) acqf = qExpectedImprovement(**kwargs) self.assertIs(acqf.model, mock_model) X_pending = torch.rand(2, 2) objective = LinearMCObjective(torch.rand(2)) kwargs = c( model=mock_model, training_data=self.blockX_multiY, objective=objective, X_pending=X_pending, ) self.assertIs(kwargs["model"], mock_model) self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights)) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertIsNone(kwargs["sampler"]) self.assertIsInstance(kwargs["eta"], float) self.assertLess(kwargs["eta"], 1) acqf = qExpectedImprovement(**kwargs) self.assertIs(acqf.model, mock_model) multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1) best_f_expected = objective(multi_Y).max() self.assertEqual(kwargs["best_f"], best_f_expected) # Check explicitly specifying `best_f`. best_f_expected = best_f_expected - 1 # Random value. kwargs = c( model=mock_model, training_data=self.blockX_multiY, objective=objective, X_pending=X_pending, best_f=best_f_expected, ) self.assertEqual(kwargs["best_f"], best_f_expected) acqf = qExpectedImprovement(**kwargs) self.assertIs(acqf.model, mock_model) self.assertEqual(acqf.best_f, best_f_expected) # test passing constraints outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]])) constraints = get_outcome_constraint_transforms( outcome_constraints=outcome_constraints ) kwargs = c( model=mock_model, training_data=self.blockX_multiY, objective=objective, X_pending=X_pending, best_f=best_f_expected, constraints=constraints, ) self.assertIs(kwargs["constraints"], constraints) acqf = qExpectedImprovement(**kwargs) self.assertEqual(acqf.best_f, best_f_expected) # testing qLogEI input constructor log_constructor = get_acqf_input_constructor(qLogExpectedImprovement) log_kwargs = log_constructor( model=mock_model, training_data=self.blockX_blockY, objective=objective, X_pending=X_pending, best_f=best_f_expected, constraints=constraints, ) # includes strict superset of kwargs tested above self.assertLessEqual(kwargs.items(), log_kwargs.items()) self.assertIn("fat", log_kwargs) self.assertIn("tau_max", log_kwargs) self.assertEqual(log_kwargs["tau_max"], TAU_MAX) self.assertIn("tau_relu", log_kwargs) self.assertEqual(log_kwargs["tau_relu"], TAU_RELU) self.assertIs(log_kwargs["constraints"], constraints) acqf = qLogExpectedImprovement(**log_kwargs) self.assertIs(acqf.model, mock_model) self.assertIs(acqf.objective, objective) def test_construct_inputs_qNEI(self) -> None: c = get_acqf_input_constructor(qNoisyExpectedImprovement) mock_model = SingleTaskGP( train_X=torch.rand((2, 2)), train_Y=torch.rand((2, 1)) ) kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertIs(kwargs["model"], mock_model) self.assertIsNone(kwargs["objective"]) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["sampler"]) self.assertTrue(kwargs["prune_baseline"]) self.assertTrue(torch.equal(kwargs["X_baseline"], self.blockX_blockY[0].X)) self.assertIsNone(kwargs["constraints"]) self.assertIsInstance(kwargs["eta"], float) self.assertLess(kwargs["eta"], 1) acqf = qNoisyExpectedImprovement(**kwargs) self.assertIs(acqf.model, mock_model) with self.assertRaisesRegex(ValueError, "Field `X` must be shared"): c(model=mock_model, training_data=self.multiX_multiY) X_baseline = torch.rand(2, 2) outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]])) constraints = get_outcome_constraint_transforms( outcome_constraints=outcome_constraints ) kwargs = c( model=mock_model, training_data=self.blockX_blockY, X_baseline=X_baseline, prune_baseline=False, constraints=constraints, ) self.assertEqual(kwargs["model"], mock_model) self.assertIsNone(kwargs["objective"]) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["sampler"]) self.assertFalse(kwargs["prune_baseline"]) self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline)) self.assertIsInstance(kwargs["eta"], float) self.assertLess(kwargs["eta"], 1) self.assertIs(kwargs["constraints"], constraints) acqf = qNoisyExpectedImprovement(**kwargs) self.assertIs(acqf.model, mock_model) # testing qLogNEI input constructor log_constructor = get_acqf_input_constructor(qLogNoisyExpectedImprovement) log_kwargs = log_constructor( model=mock_model, training_data=self.blockX_blockY, X_baseline=X_baseline, prune_baseline=False, constraints=constraints, ) # includes strict superset of kwargs tested above self.assertLessEqual(kwargs.items(), log_kwargs.items()) self.assertIn("fat", log_kwargs) self.assertIn("tau_max", log_kwargs) self.assertEqual(log_kwargs["tau_max"], TAU_MAX) self.assertIn("tau_relu", log_kwargs) self.assertEqual(log_kwargs["tau_relu"], TAU_RELU) self.assertIs(log_kwargs["constraints"], constraints) acqf = qLogNoisyExpectedImprovement(**log_kwargs) self.assertIs(acqf.model, mock_model) def test_construct_inputs_qPI(self) -> None: c = get_acqf_input_constructor(qProbabilityOfImprovement) mock_model = self.mock_model kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertEqual(kwargs["model"], mock_model) self.assertIsNone(kwargs["objective"]) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["sampler"]) self.assertEqual(kwargs["tau"], 1e-3) self.assertIsNone(kwargs["constraints"]) self.assertIsInstance(kwargs["eta"], float) self.assertLess(kwargs["eta"], 1) acqf = qProbabilityOfImprovement(**kwargs) self.assertIs(acqf.model, mock_model) X_pending = torch.rand(2, 2) objective = LinearMCObjective(torch.rand(2)) kwargs = c( model=mock_model, training_data=self.blockX_multiY, objective=objective, X_pending=X_pending, tau=1e-2, ) self.assertEqual(kwargs["model"], mock_model) self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights)) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertIsNone(kwargs["sampler"]) self.assertEqual(kwargs["tau"], 1e-2) self.assertIsInstance(kwargs["eta"], float) self.assertLess(kwargs["eta"], 1) multi_Y = torch.cat([d.Y for d in self.blockX_multiY.values()], dim=-1) best_f_expected = objective(multi_Y).max() self.assertEqual(kwargs["best_f"], best_f_expected) acqf = qProbabilityOfImprovement(**kwargs) self.assertIs(acqf.model, mock_model) self.assertIs(acqf.objective, objective) # Check explicitly specifying `best_f`. best_f_expected = best_f_expected - 1 # Random value. outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]])) constraints = get_outcome_constraint_transforms( outcome_constraints=outcome_constraints ) kwargs = c( model=mock_model, training_data=self.blockX_multiY, objective=objective, X_pending=X_pending, tau=1e-2, best_f=best_f_expected, constraints=constraints, ) self.assertEqual(kwargs["best_f"], best_f_expected) self.assertIs(kwargs["constraints"], constraints) acqf = qProbabilityOfImprovement(**kwargs) self.assertIs(acqf.model, mock_model) self.assertIs(acqf.objective, objective) def test_construct_inputs_qUCB(self) -> None: c = get_acqf_input_constructor(qUpperConfidenceBound) mock_model = self.mock_model kwargs = c(model=mock_model, training_data=self.blockX_blockY) self.assertEqual(kwargs["model"], mock_model) self.assertIsNone(kwargs["objective"]) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["sampler"]) self.assertEqual(kwargs["beta"], 0.2) acqf = qUpperConfidenceBound(**kwargs) self.assertIs(acqf.model, mock_model) X_pending = torch.rand(2, 2) objective = LinearMCObjective(torch.rand(2)) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective=objective, X_pending=X_pending, beta=0.1, ) self.assertEqual(kwargs["model"], mock_model) self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights)) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertIsNone(kwargs["sampler"]) self.assertEqual(kwargs["beta"], 0.1) acqf = qUpperConfidenceBound(**kwargs) self.assertIs(acqf.model, mock_model) class TestMultiObjectiveAcquisitionFunctionInputConstructors( InputConstructorBaseTestCase ): def test_construct_inputs_EHVI(self) -> None: c = get_acqf_input_constructor(ExpectedHypervolumeImprovement) mock_model = mock.Mock() objective_thresholds = torch.rand(6) # test error on non-block designs with self.assertRaisesRegex(ValueError, "Field `X` must be shared"): c( model=mock_model, training_data=self.multiX_multiY, objective_thresholds=objective_thresholds, ) # test error on unsupported outcome constraints with self.assertRaises(NotImplementedError): c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, constraints=mock.Mock(), ) # test with Y_pmean supplied explicitly Y_pmean = torch.rand(3, 6) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, Y_pmean=Y_pmean, ) self.assertEqual(kwargs["model"], mock_model) self.assertIsInstance(kwargs["objective"], IdentityAnalyticMultiOutputObjective) self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds)) partitioning = kwargs["partitioning"] alpha_expected = get_default_partitioning_alpha(6) self.assertIsInstance(partitioning, NondominatedPartitioning) self.assertEqual(partitioning.alpha, alpha_expected) self.assertTrue(torch.equal(partitioning._neg_ref_point, -objective_thresholds)) Y_pmean = torch.rand(3, 2) objective_thresholds = torch.rand(2) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, Y_pmean=Y_pmean, ) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, FastNondominatedPartitioning) self.assertTrue(torch.equal(partitioning.ref_point, objective_thresholds)) # test with custom objective weights = torch.rand(2) obj = WeightedMCMultiOutputObjective(weights=weights) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, objective=obj, Y_pmean=Y_pmean, alpha=0.05, ) self.assertEqual(kwargs["model"], mock_model) self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective) ref_point_expected = objective_thresholds * weights self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, NondominatedPartitioning) self.assertEqual(partitioning.alpha, 0.05) self.assertTrue(torch.equal(partitioning._neg_ref_point, -ref_point_expected)) # Test without providing Y_pmean (computed from model) mean = torch.rand(1, 2) variance = torch.ones(1, 1) mm = MockModel(MockPosterior(mean=mean, variance=variance)) kwargs = c( model=mm, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, ) self.assertIsInstance(kwargs["objective"], IdentityAnalyticMultiOutputObjective) self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds)) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, FastNondominatedPartitioning) self.assertTrue(torch.equal(partitioning.ref_point, objective_thresholds)) self.assertTrue(torch.equal(partitioning._neg_Y, -mean)) # Test with risk measures. for use_preprocessing in (True, False): obj = MultiOutputExpectation( n_w=3, preprocessing_function=WeightedMCMultiOutputObjective( torch.tensor([-1.0, -1.0]) ) if use_preprocessing else None, ) kwargs = c( model=mm, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, objective=obj, ) expected_obj_t = ( -objective_thresholds if use_preprocessing else objective_thresholds ) self.assertIs(kwargs["objective"], obj) self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t)) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, FastNondominatedPartitioning) self.assertTrue(torch.equal(partitioning.ref_point, expected_obj_t)) def test_construct_inputs_qEHVI(self) -> None: c = get_acqf_input_constructor(qExpectedHypervolumeImprovement) objective_thresholds = torch.rand(2) # Test defaults mm = SingleTaskGP(torch.rand(1, 2), torch.rand(1, 2)) mean = mm.posterior(self.blockX_blockY[0].X).mean kwargs = c( model=mm, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, ) self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective) ref_point_expected = objective_thresholds self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, FastNondominatedPartitioning) self.assertTrue(torch.equal(partitioning.ref_point, ref_point_expected)) self.assertTrue(torch.equal(partitioning._neg_Y, -mean)) sampler = kwargs["sampler"] self.assertIsInstance(sampler, SobolQMCNormalSampler) self.assertEqual(sampler.sample_shape, torch.Size([128])) self.assertIsNone(kwargs["X_pending"]) self.assertIsNone(kwargs["constraints"]) self.assertEqual(kwargs["eta"], 1e-3) # Test IID sampler kwargs = c( model=mm, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, qmc=False, mc_samples=64, ) sampler = kwargs["sampler"] self.assertIsInstance(sampler, IIDNormalSampler) self.assertEqual(sampler.sample_shape, torch.Size([64])) # Test outcome constraints and custom inputs mean = torch.tensor([[1.0, 0.25], [0.5, 1.0]]) variance = torch.ones(1, 1) mm = MockModel(MockPosterior(mean=mean, variance=variance)) weights = torch.rand(2) obj = WeightedMCMultiOutputObjective(weights=weights) outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]])) constraints = get_outcome_constraint_transforms( outcome_constraints=outcome_constraints ) X_pending = torch.rand(1, 2) kwargs = c( model=mm, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, objective=obj, constraints=constraints, X_pending=X_pending, alpha=0.05, eta=1e-2, ) self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective) ref_point_expected = objective_thresholds * weights self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, NondominatedPartitioning) self.assertEqual(partitioning.alpha, 0.05) self.assertTrue(torch.equal(partitioning._neg_ref_point, -ref_point_expected)) Y_expected = mean[:1] * weights self.assertTrue(torch.equal(partitioning._neg_Y, -Y_expected)) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertIs(kwargs["constraints"], constraints) self.assertEqual(kwargs["eta"], 1e-2) # Test check for block designs with self.assertRaisesRegex(ValueError, "Field `X` must be shared"): c( model=mm, training_data=self.multiX_multiY, objective_thresholds=objective_thresholds, objective=obj, constraints=constraints, X_pending=X_pending, alpha=0.05, eta=1e-2, ) # Test custom sampler custom_sampler = SobolQMCNormalSampler(sample_shape=torch.Size([16]), seed=1234) kwargs = c( model=mm, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, sampler=custom_sampler, ) sampler = kwargs["sampler"] self.assertIsInstance(sampler, SobolQMCNormalSampler) self.assertEqual(sampler.sample_shape, torch.Size([16])) self.assertEqual(sampler.seed, 1234) def test_construct_inputs_qNEHVI(self) -> None: c = get_acqf_input_constructor(qNoisyExpectedHypervolumeImprovement) objective_thresholds = torch.rand(2) # Test defaults kwargs = c( model=SingleTaskGP(torch.rand(1, 2), torch.rand(1, 2)), training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, ) ref_point_expected = objective_thresholds self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) self.assertTrue(torch.equal(kwargs["X_baseline"], self.blockX_blockY[0].X)) self.assertIsInstance(kwargs["sampler"], SobolQMCNormalSampler) self.assertEqual(kwargs["sampler"].sample_shape, torch.Size([128])) self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective) self.assertIsNone(kwargs["constraints"]) self.assertIsNone(kwargs["X_pending"]) self.assertEqual(kwargs["eta"], 1e-3) self.assertTrue(kwargs["prune_baseline"]) self.assertEqual(kwargs["alpha"], 0.0) self.assertTrue(kwargs["cache_pending"]) self.assertEqual(kwargs["max_iep"], 0) self.assertTrue(kwargs["incremental_nehvi"]) self.assertTrue(kwargs["cache_root"]) # Test check for block designs mock_model = mock.Mock() mock_model.num_outputs = 2 with self.assertRaisesRegex(ValueError, "Field `X` must be shared"): c( model=mock_model, training_data=self.multiX_multiY, objective_thresholds=objective_thresholds, ) # Test custom inputs weights = torch.rand(2) objective = WeightedMCMultiOutputObjective(weights=weights) X_baseline = torch.rand(2, 2) sampler = IIDNormalSampler(sample_shape=torch.Size([4])) outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]])) constraints = get_outcome_constraint_transforms( outcome_constraints=outcome_constraints ) X_pending = torch.rand(1, 2) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, objective=objective, X_baseline=X_baseline, sampler=sampler, constraints=constraints, X_pending=X_pending, eta=1e-2, prune_baseline=True, alpha=0.0, cache_pending=False, max_iep=1, incremental_nehvi=False, cache_root=False, ) ref_point_expected = objective(objective_thresholds) self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline)) sampler_ = kwargs["sampler"] self.assertIsInstance(sampler_, IIDNormalSampler) self.assertEqual(sampler_.sample_shape, torch.Size([4])) self.assertEqual(kwargs["objective"], objective) self.assertIs(kwargs["constraints"], constraints) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertEqual(kwargs["eta"], 1e-2) self.assertTrue(kwargs["prune_baseline"]) self.assertEqual(kwargs["alpha"], 0.0) self.assertFalse(kwargs["cache_pending"]) self.assertEqual(kwargs["max_iep"], 1) self.assertFalse(kwargs["incremental_nehvi"]) self.assertFalse(kwargs["cache_root"]) # Test with risk measures. with self.assertRaisesRegex(UnsupportedError, "feasibility-weighted"): kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, objective=MultiOutputExpectation(n_w=3), constraints=constraints, ) for use_preprocessing in (True, False): obj = MultiOutputExpectation( n_w=3, preprocessing_function=WeightedMCMultiOutputObjective( torch.tensor([-1.0, -1.0]) ) if use_preprocessing else None, ) kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, objective=obj, ) expected_obj_t = ( -objective_thresholds if use_preprocessing else objective_thresholds ) self.assertIs(kwargs["objective"], obj) self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t)) # Test default alpha for many objectives/ mock_model.num_outputs = 5 kwargs = c( model=mock_model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, ) self.assertEqual(kwargs["alpha"], 0.0) def test_construct_inputs_kg(self) -> None: current_value = torch.tensor(1.23) with mock.patch( target="botorch.acquisition.input_constructors.optimize_objective", return_value=(None, current_value), ): from botorch.acquisition import input_constructors func = input_constructors.get_acqf_input_constructor(qKnowledgeGradient) kwargs = func( model=mock.Mock(), training_data=self.blockX_blockY, objective=LinearMCObjective(torch.rand(2)), bounds=self.bounds, num_fantasies=33, ) self.assertEqual(kwargs["num_fantasies"], 33) self.assertEqual(kwargs["current_value"], current_value) def test_construct_inputs_mes(self) -> None: func = get_acqf_input_constructor(qMaxValueEntropy) model = SingleTaskGP(train_X=torch.ones((3, 2)), train_Y=torch.zeros((3, 1))) kwargs = func( model=model, training_data=self.blockX_blockY, objective=LinearMCObjective(torch.rand(2)), bounds=self.bounds, candidate_size=17, maximize=False, ) self.assertFalse(kwargs["maximize"]) self.assertGreaterEqual(kwargs["candidate_set"].min(), 0.0) self.assertLessEqual(kwargs["candidate_set"].max(), 1.0) self.assertEqual( [int(s) for s in kwargs["candidate_set"].shape], [17, len(self.bounds)] ) acqf = qMaxValueEntropy(**kwargs) self.assertIs(acqf.model, model) def test_construct_inputs_mf_base(self) -> None: target_fidelities = {0: 0.123} fidelity_weights = {0: 0.456} cost_intercept = 0.789 num_trace_observations = 0 with self.subTest("test_fully_specified"): kwargs = construct_inputs_mf_base( target_fidelities=target_fidelities, fidelity_weights=fidelity_weights, cost_intercept=cost_intercept, num_trace_observations=num_trace_observations, ) X = torch.rand(3, 2) self.assertIsInstance(kwargs["expand"], Callable) self.assertTrue( torch.equal( kwargs["expand"](X), expand_trace_observations( X=X, fidelity_dims=sorted(target_fidelities), num_trace_obs=num_trace_observations, ), ) ) self.assertIsInstance(kwargs["project"], Callable) self.assertTrue( torch.equal( kwargs["project"](X), project_to_target_fidelity(X, target_fidelities=target_fidelities), ) ) cm = kwargs["cost_aware_utility"].cost_model w = torch.tensor(list(fidelity_weights.values()), dtype=cm.weights.dtype) self.assertEqual(cm.fixed_cost, cost_intercept) self.assertAllClose(cm.weights, w) with self.subTest("test_missing_fidelity_weights"): kwargs = construct_inputs_mf_base( target_fidelities=target_fidelities, cost_intercept=cost_intercept, ) cm = kwargs["cost_aware_utility"].cost_model self.assertAllClose(cm.weights, torch.ones_like(cm.weights)) with self.subTest("test_mismatched_weights"): with self.assertRaisesRegex( RuntimeError, "Must provide the same indices for" ): construct_inputs_mf_base( target_fidelities={0: 1.0}, fidelity_weights={1: 0.5}, cost_intercept=cost_intercept, ) def test_construct_inputs_mfkg(self) -> None: constructor_args = { "model": None, "training_data": self.blockX_blockY, "objective": None, "bounds": self.bounds, "num_fantasies": 123, "target_fidelities": {0: 0.987}, "fidelity_weights": {0: 0.654}, "cost_intercept": 0.321, } with mock.patch( target="botorch.acquisition.input_constructors.construct_inputs_mf_base", return_value={"foo": 0}, ), mock.patch( target="botorch.acquisition.input_constructors.construct_inputs_qKG", return_value={"bar": 1}, ): from botorch.acquisition import input_constructors input_constructor = input_constructors.get_acqf_input_constructor( qMultiFidelityKnowledgeGradient ) inputs_mfkg = input_constructor(**constructor_args) inputs_test = {"foo": 0, "bar": 1} self.assertEqual(inputs_mfkg, inputs_test) def test_construct_inputs_mfmes(self) -> None: target_fidelities = {0: 0.987} constructor_args = { "model": None, "training_data": self.blockX_blockY, "objective": None, "bounds": self.bounds, "num_fantasies": 123, "candidate_size": 17, "target_fidelities": target_fidelities, "fidelity_weights": {0: 0.654}, "cost_intercept": 0.321, } current_value = torch.tensor(1.23) with mock.patch( target="botorch.acquisition.input_constructors.construct_inputs_mf_base", return_value={"foo": 0}, ), mock.patch( target="botorch.acquisition.input_constructors.construct_inputs_qMES", return_value={"bar": 1}, ), mock.patch( target="botorch.acquisition.input_constructors.optimize_objective", return_value=(None, current_value), ): from botorch.acquisition import input_constructors input_constructor = input_constructors.get_acqf_input_constructor( qMultiFidelityMaxValueEntropy ) inputs_mfmes = input_constructor(**constructor_args) inputs_test = { "foo": 0, "bar": 1, "current_value": current_value, "target_fidelities": target_fidelities, } self.assertEqual(inputs_mfmes, inputs_test) def test_construct_inputs_jes(self) -> None: func = get_acqf_input_constructor(qJointEntropySearch) # we need to run optimize_posterior_samples, so we sort of need # a real model as there is no other (apparent) option model = SingleTaskGP(self.blockX_blockY[0].X, self.blockX_blockY[0].Y) kwargs = func( model=model, training_data=self.blockX_blockY, objective=LinearMCObjective(torch.rand(2)), bounds=self.bounds, num_optima=17, maximize=False, ) self.assertFalse(kwargs["maximize"]) self.assertEqual(self.blockX_blockY[0].X.dtype, kwargs["optimal_inputs"].dtype) self.assertEqual(len(kwargs["optimal_inputs"]), 17) self.assertEqual(len(kwargs["optimal_outputs"]), 17) # asserting that, for the non-batch case, the optimal inputs are # of shape N x D and outputs are N x 1 self.assertEqual(len(kwargs["optimal_inputs"].shape), 2) self.assertEqual(len(kwargs["optimal_outputs"].shape), 2) qJointEntropySearch(**kwargs) class TestInstantiationFromInputConstructor(InputConstructorBaseTestCase): def _test_constructor_base( self, classes: Sequence[Type[AcquisitionFunction]], **input_constructor_kwargs: Any, ) -> None: for cls_ in classes: with self.subTest(cls_.__name__, cls_=cls_): acqf_kwargs = get_acqf_input_constructor(cls_)( **input_constructor_kwargs ) # no assertions; we are just testing that this doesn't error cls_(**acqf_kwargs) def test_constructors_like_PosteriorMean(self) -> None: classes = [PosteriorMean, UpperConfidenceBound, qUpperConfidenceBound] self._test_constructor_base(classes=classes, model=self.mock_model) def test_constructors_like_ExpectedImprovement(self) -> None: classes = [ ExpectedImprovement, LogExpectedImprovement, ProbabilityOfImprovement, LogProbabilityOfImprovement, NoisyExpectedImprovement, LogNoisyExpectedImprovement, qExpectedImprovement, qLogExpectedImprovement, qNoisyExpectedImprovement, qLogNoisyExpectedImprovement, qProbabilityOfImprovement, ] model = FixedNoiseGP( train_X=torch.rand((4, 2)), train_Y=torch.rand((4, 1)), train_Yvar=torch.ones((4, 1)), ) self._test_constructor_base( classes=classes, model=model, training_data=self.blockX_blockY ) def test_constructors_like_qNEHVI(self) -> None: objective_thresholds = torch.tensor([0.1, 0.2]) model = SingleTaskGP(train_X=torch.rand((3, 2)), train_Y=torch.rand((3, 2))) # The EHVI and qEHVI input constructors are not working classes = [ qNoisyExpectedHypervolumeImprovement, # ExpectedHypervolumeImprovement, # qExpectedHypervolumeImprovement, ] self._test_constructor_base( classes=classes, model=model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, ) def test_constructors_like_qMaxValueEntropy(self) -> None: bounds = torch.ones((1, 2)) classes = [qMaxValueEntropy, qKnowledgeGradient] self._test_constructor_base( classes=classes, model=SingleTaskGP(train_X=torch.rand((3, 1)), train_Y=torch.rand((3, 1))), training_data=self.blockX_blockY, bounds=bounds, ) def test_constructors_like_qMultiFidelityKnowledgeGradient(self) -> None: classes = [ qMultiFidelityKnowledgeGradient, # currently the input constructor for qMFMVG is not working # qMultiFidelityMaxValueEntropy ] self._test_constructor_base( classes=classes, model=SingleTaskGP(train_X=torch.rand((3, 1)), train_Y=torch.rand((3, 1))), training_data=self.blockX_blockY, bounds=torch.ones((1, 2)), target_fidelities={0: 0.987}, ) def test_eubo(self) -> None: model = SingleTaskGP(train_X=torch.rand((3, 2)), train_Y=torch.rand((3, 2))) pref_model = self.mock_model pref_model.dim = 2 pref_model.datapoints = torch.tensor([]) classes = [AnalyticExpectedUtilityOfBestOption] self._test_constructor_base( classes=classes, model=model, pref_model=pref_model, ) def test_qjes(self) -> None: model = SingleTaskGP(self.blockX_blockY[0].X, self.blockX_blockY[0].Y) self._test_constructor_base( classes=[qJointEntropySearch], model=model, bounds=self.bounds, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from unittest import mock import torch from botorch.acquisition import logei, monte_carlo from botorch.acquisition.factory import get_acquisition_function from botorch.acquisition.multi_objective import ( MCMultiOutputObjective, monte_carlo as moo_monte_carlo, ) from botorch.acquisition.objective import ( MCAcquisitionObjective, ScalarizedPosteriorTransform, ) from botorch.acquisition.utils import compute_best_feasible_objective from botorch.utils.multi_objective.box_decompositions.non_dominated import ( FastNondominatedPartitioning, NondominatedPartitioning, ) from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultivariateNormal from torch import Tensor class DummyMCObjective(MCAcquisitionObjective): def forward(self, samples: Tensor, X=None) -> Tensor: return samples.sum(-1) class DummyMCMultiOutputObjective(MCMultiOutputObjective): def forward(self, samples: Tensor, X=None) -> Tensor: return samples class TestGetAcquisitionFunction(BotorchTestCase): def setUp(self): super().setUp() self.model = MockModel(MockPosterior()) self.objective = DummyMCObjective() self.X_observed = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]) self.X_pending = torch.tensor([[1.0, 3.0, 4.0]]) self.mc_samples = 250 self.qmc = True self.ref_point = [0.0, 0.0] self.mo_objective = DummyMCMultiOutputObjective() self.Y = torch.tensor([[1.0, 2.0]]) # (2 x 1)-dim multi-objective outcomes self.seed = 1 @mock.patch(f"{monte_carlo.__name__}.qExpectedImprovement") def test_GetQEI(self, mock_acqf): n = len(self.X_observed) mean = torch.arange(n, dtype=torch.double).view(-1, 1) var = torch.ones_like(mean) self.model = MockModel(MockPosterior(mean=mean, variance=var)) common_kwargs = { "model": self.model, "objective": self.objective, "X_observed": self.X_observed, "X_pending": self.X_pending, "mc_samples": self.mc_samples, "seed": self.seed, } acqf = get_acquisition_function( acquisition_function_name="qEI", **common_kwargs, marginalize_dim=0, ) self.assertEqual(acqf, mock_acqf.return_value) best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item() mock_acqf.assert_called_once_with( model=self.model, best_f=best_f, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, constraints=None, eta=1e-3, ) # test batched model self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1))) common_kwargs.update({"model": self.model}) acqf = get_acquisition_function( acquisition_function_name="qEI", **common_kwargs ) self.assertEqual(acqf, mock_acqf.return_value) # test batched model without marginalize dim args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) # test w/ posterior transform pm = torch.tensor([1.0, 2.0]) mvn = MultivariateNormal(pm, torch.eye(2)) self.model._posterior.distribution = mvn self.model._posterior._mean = pm.unsqueeze(-1) common_kwargs.update({"model": self.model}) pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1])) acqf = get_acquisition_function( acquisition_function_name="qEI", **common_kwargs, posterior_transform=pt, marginalize_dim=0, ) self.assertEqual(mock_acqf.call_args[-1]["best_f"].item(), -1.0) # with constraints upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5 constraints = [lambda samples: samples[..., 0] - upper_bound] eta = math.pi * 1e-2 # testing non-standard eta acqf = get_acquisition_function( acquisition_function_name="qEI", **common_kwargs, marginalize_dim=0, constraints=constraints, eta=eta, ) self.assertEqual(acqf, mock_acqf.return_value) best_feasible_f = compute_best_feasible_objective( samples=mean, obj=self.objective(mean), constraints=constraints, model=self.model, objective=self.objective, X_baseline=self.X_observed, ) mock_acqf.assert_called_with( model=self.model, best_f=best_feasible_f, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, constraints=constraints, eta=eta, ) @mock.patch(f"{logei.__name__}.qLogExpectedImprovement") def test_GetQLogEI(self, mock_acqf): n = len(self.X_observed) mean = torch.arange(n, dtype=torch.double).view(-1, 1) var = torch.ones_like(mean) self.model = MockModel(MockPosterior(mean=mean, variance=var)) common_kwargs = { "model": self.model, "objective": self.objective, "X_observed": self.X_observed, "X_pending": self.X_pending, "mc_samples": self.mc_samples, "seed": self.seed, } acqf = get_acquisition_function( acquisition_function_name="qLogEI", **common_kwargs, marginalize_dim=0, ) self.assertEqual(acqf, mock_acqf.return_value) best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item() mock_acqf.assert_called_once_with( model=self.model, best_f=best_f, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, constraints=None, eta=1e-3, ) # test batched model self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1))) common_kwargs.update({"model": self.model}) acqf = get_acquisition_function( acquisition_function_name="qLogEI", **common_kwargs ) self.assertEqual(acqf, mock_acqf.return_value) # test batched model without marginalize dim args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) # test w/ posterior transform pm = torch.tensor([1.0, 2.0]) mvn = MultivariateNormal(pm, torch.eye(2)) self.model._posterior.distribution = mvn self.model._posterior._mean = pm.unsqueeze(-1) common_kwargs.update({"model": self.model}) pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1])) acqf = get_acquisition_function( acquisition_function_name="qLogEI", **common_kwargs, posterior_transform=pt, marginalize_dim=0, ) self.assertEqual(mock_acqf.call_args[-1]["best_f"].item(), -1.0) # with constraints upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5 constraints = [lambda samples: samples[..., 0] - upper_bound] eta = math.pi * 1e-2 # testing non-standard eta acqf = get_acquisition_function( acquisition_function_name="qLogEI", **common_kwargs, marginalize_dim=0, constraints=constraints, eta=eta, ) self.assertEqual(acqf, mock_acqf.return_value) best_feasible_f = compute_best_feasible_objective( samples=mean, obj=self.objective(mean), constraints=constraints, model=self.model, objective=self.objective, X_baseline=self.X_observed, ) mock_acqf.assert_called_with( model=self.model, best_f=best_feasible_f, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, constraints=constraints, eta=eta, ) @mock.patch(f"{monte_carlo.__name__}.qProbabilityOfImprovement") def test_GetQPI(self, mock_acqf): # basic test n = len(self.X_observed) mean = torch.arange(n, dtype=torch.double).view(-1, 1) var = torch.ones_like(mean) self.model = MockModel(MockPosterior(mean=mean, variance=var)) acqf = get_acquisition_function( acquisition_function_name="qPI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ) self.assertEqual(acqf, mock_acqf.return_value) best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item() mock_acqf.assert_called_once_with( model=self.model, best_f=best_f, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, tau=1e-3, constraints=None, eta=1e-3, ) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) # test with different tau, non-qmc acqf = get_acquisition_function( acquisition_function_name="qPI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, tau=1.0, ) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertEqual(kwargs["tau"], 1.0) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 2) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) acqf = get_acquisition_function( acquisition_function_name="qPI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, tau=1.0, ) # test batched model self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1))) acqf = get_acquisition_function( acquisition_function_name="qPI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ) self.assertEqual(acqf, mock_acqf.return_value) # with constraints n = len(self.X_observed) mean = torch.arange(n, dtype=torch.double).view(-1, 1) var = torch.ones_like(mean) self.model = MockModel(MockPosterior(mean=mean, variance=var)) upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5 constraints = [lambda samples: samples[..., 0] - upper_bound] eta = math.pi * 1e-2 # testing non-standard eta acqf = get_acquisition_function( acquisition_function_name="qPI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, marginalize_dim=0, constraints=constraints, eta=eta, ) self.assertEqual(acqf, mock_acqf.return_value) best_feasible_f = compute_best_feasible_objective( samples=mean, obj=self.objective(mean), constraints=constraints, model=self.model, objective=self.objective, X_baseline=self.X_observed, ) mock_acqf.assert_called_with( model=self.model, best_f=best_feasible_f, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, tau=1e-3, constraints=constraints, eta=eta, ) @mock.patch(f"{monte_carlo.__name__}.qNoisyExpectedImprovement") def test_GetQNEI(self, mock_acqf): # basic test n = len(self.X_observed) mean = torch.arange(n, dtype=torch.double).view(-1, 1) var = torch.ones_like(mean) self.model = MockModel(MockPosterior(mean=mean, variance=var)) common_kwargs = { "model": self.model, "objective": self.objective, "X_observed": self.X_observed, "X_pending": self.X_pending, "mc_samples": self.mc_samples, "seed": self.seed, } acqf = get_acquisition_function( acquisition_function_name="qNEI", **common_kwargs, marginalize_dim=0, ) self.assertEqual(acqf, mock_acqf.return_value) self.assertEqual(mock_acqf.call_count, 1) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed)) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertEqual(kwargs["marginalize_dim"], 0) self.assertEqual(kwargs["cache_root"], True) # test with cache_root = False acqf = get_acquisition_function( acquisition_function_name="qNEI", **common_kwargs, marginalize_dim=0, cache_root=False, ) self.assertEqual(acqf, mock_acqf.return_value) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(kwargs["cache_root"], False) # test with non-qmc, no X_pending common_kwargs.update({"X_pending": None}) acqf = get_acquisition_function( acquisition_function_name="qNEI", **common_kwargs, ) self.assertEqual(mock_acqf.call_count, 3) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed)) self.assertEqual(kwargs["X_pending"], None) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed)) # with constraints upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5 constraints = [lambda samples: samples[..., 0] - upper_bound] eta = math.pi * 1e-2 # testing non-standard eta common_kwargs.update({"X_pending": self.X_pending}) acqf = get_acquisition_function( acquisition_function_name="qNEI", **common_kwargs, marginalize_dim=0, constraints=constraints, eta=eta, ) self.assertEqual(acqf, mock_acqf.return_value) mock_acqf.assert_called_with( model=self.model, X_baseline=self.X_observed, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, prune_baseline=True, marginalize_dim=0, cache_root=True, constraints=constraints, eta=eta, ) @mock.patch(f"{logei.__name__}.qLogNoisyExpectedImprovement") def test_GetQLogNEI(self, mock_acqf): # basic test n = len(self.X_observed) mean = torch.arange(n, dtype=torch.double).view(-1, 1) var = torch.ones_like(mean) self.model = MockModel(MockPosterior(mean=mean, variance=var)) common_kwargs = { "model": self.model, "objective": self.objective, "X_observed": self.X_observed, "X_pending": self.X_pending, "mc_samples": self.mc_samples, "seed": self.seed, } acqf = get_acquisition_function( acquisition_function_name="qLogNEI", **common_kwargs, marginalize_dim=0, ) self.assertEqual(acqf, mock_acqf.return_value) self.assertEqual(mock_acqf.call_count, 1) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed)) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertEqual(kwargs["marginalize_dim"], 0) self.assertEqual(kwargs["cache_root"], True) # test with cache_root = False acqf = get_acquisition_function( acquisition_function_name="qLogNEI", **common_kwargs, marginalize_dim=0, cache_root=False, ) self.assertEqual(acqf, mock_acqf.return_value) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(kwargs["cache_root"], False) # test with non-qmc, no X_pending common_kwargs.update({"X_pending": None}) acqf = get_acquisition_function( acquisition_function_name="qLogNEI", **common_kwargs, ) self.assertEqual(mock_acqf.call_count, 3) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed)) self.assertEqual(kwargs["X_pending"], None) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed)) # with constraints upper_bound = self.Y[0, 0] + 1 / 2 # = 1.5 constraints = [lambda samples: samples[..., 0] - upper_bound] eta = math.pi * 1e-2 # testing non-standard eta common_kwargs.update({"X_pending": self.X_pending}) acqf = get_acquisition_function( acquisition_function_name="qLogNEI", **common_kwargs, marginalize_dim=0, constraints=constraints, eta=eta, ) self.assertEqual(acqf, mock_acqf.return_value) mock_acqf.assert_called_with( model=self.model, X_baseline=self.X_observed, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, prune_baseline=True, marginalize_dim=0, cache_root=True, constraints=constraints, eta=eta, ) @mock.patch(f"{monte_carlo.__name__}.qSimpleRegret") def test_GetQSR(self, mock_acqf): # basic test acqf = get_acquisition_function( acquisition_function_name="qSR", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ) self.assertEqual(acqf, mock_acqf.return_value) mock_acqf.assert_called_once_with( model=self.model, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, ) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) # test with non-qmc acqf = get_acquisition_function( acquisition_function_name="qSR", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, ) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 2) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) @mock.patch(f"{monte_carlo.__name__}.qUpperConfidenceBound") def test_GetQUCB(self, mock_acqf): # make sure beta is specified with self.assertRaises(ValueError): acqf = get_acquisition_function( acquisition_function_name="qUCB", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ) acqf = get_acquisition_function( acquisition_function_name="qUCB", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, beta=0.3, ) self.assertEqual(acqf, mock_acqf.return_value) mock_acqf.assert_called_once_with( model=self.model, beta=0.3, sampler=mock.ANY, objective=self.objective, posterior_transform=None, X_pending=self.X_pending, ) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) # test with different tau, non-qmc acqf = get_acquisition_function( acquisition_function_name="qUCB", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, beta=0.2, ) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertEqual(kwargs["beta"], 0.2) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 2) self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending)) @mock.patch(f"{moo_monte_carlo.__name__}.qExpectedHypervolumeImprovement") def test_GetQEHVI(self, mock_acqf): # make sure ref_point is specified with self.assertRaises(ValueError): acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, Y=self.Y, ) # make sure Y is specified with self.assertRaises(ValueError): acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ref_point=self.ref_point, ) # posterior transforms are not supported with self.assertRaises(NotImplementedError): acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, posterior_transform=ScalarizedPosteriorTransform(weights=torch.rand(2)), X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ref_point=self.ref_point, ) acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ref_point=self.ref_point, Y=self.Y, ) self.assertEqual(acqf, mock_acqf.return_value) mock_acqf.assert_called_once_with( constraints=None, eta=1e-3, model=self.model, objective=self.mo_objective, ref_point=self.ref_point, partitioning=mock.ANY, sampler=mock.ANY, X_pending=self.X_pending, ) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, ref_point=self.ref_point, Y=self.Y, ) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertEqual(kwargs["ref_point"], self.ref_point) sampler = kwargs["sampler"] self.assertIsInstance(kwargs["objective"], DummyMCMultiOutputObjective) partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, FastNondominatedPartitioning) self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 2) # test that approximate partitioning is used when alpha > 0 acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, ref_point=self.ref_point, Y=self.Y, alpha=0.1, ) _, kwargs = mock_acqf.call_args partitioning = kwargs["partitioning"] self.assertIsInstance(partitioning, NondominatedPartitioning) self.assertEqual(partitioning.alpha, 0.1) # test constraints acqf = get_acquisition_function( acquisition_function_name="qEHVI", model=self.model, objective=self.mo_objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, constraints=[lambda Y: Y[..., -1]], eta=1e-2, seed=2, ref_point=self.ref_point, Y=self.Y, ) _, kwargs = mock_acqf.call_args partitioning = kwargs["partitioning"] self.assertEqual(partitioning.pareto_Y.shape[0], 0) self.assertEqual(kwargs["eta"], 1e-2) @mock.patch(f"{moo_monte_carlo.__name__}.qNoisyExpectedHypervolumeImprovement") def test_GetQNEHVI(self, mock_acqf): # make sure ref_point is specified with self.assertRaises(ValueError): acqf = get_acquisition_function( acquisition_function_name="qNEHVI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ) acqf = get_acquisition_function( acquisition_function_name="qNEHVI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, ref_point=self.ref_point, ) self.assertEqual(acqf, mock_acqf.return_value) mock_acqf.assert_called_once_with( constraints=None, eta=1e-3, model=self.model, X_baseline=self.X_observed, objective=self.objective, ref_point=self.ref_point, sampler=mock.ANY, prune_baseline=True, alpha=0.0, X_pending=self.X_pending, marginalize_dim=None, cache_root=True, ) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) sampler = kwargs["sampler"] self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 1) # test with non-qmc acqf = get_acquisition_function( acquisition_function_name="qNEHVI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, ref_point=self.ref_point, ) self.assertEqual(mock_acqf.call_count, 2) args, kwargs = mock_acqf.call_args self.assertEqual(args, ()) self.assertEqual(kwargs["ref_point"], self.ref_point) sampler = kwargs["sampler"] ref_point = kwargs["ref_point"] self.assertEqual(ref_point, self.ref_point) self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples])) self.assertEqual(sampler.seed, 2) # test passing alpha acqf = get_acquisition_function( acquisition_function_name="qNEHVI", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=2, ref_point=self.ref_point, alpha=0.01, ) self.assertEqual(mock_acqf.call_count, 3) args, kwargs = mock_acqf.call_args self.assertEqual(kwargs["alpha"], 0.01) def test_GetUnknownAcquisitionFunction(self): with self.assertRaises(NotImplementedError): get_acquisition_function( acquisition_function_name="foo", model=self.model, objective=self.objective, X_observed=self.X_observed, X_pending=self.X_pending, mc_samples=self.mc_samples, seed=self.seed, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch from botorch.acquisition.objective import LinearMCObjective from botorch.acquisition.risk_measures import ( CVaR, Expectation, RiskMeasureMCObjective, VaR, WorstCase, ) from botorch.utils.testing import BotorchTestCase from torch import Tensor class NotSoAbstractRiskMeasure(RiskMeasureMCObjective): def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor: prepared_samples = self._prepare_samples(samples) return prepared_samples.sum(dim=-1) class TestRiskMeasureMCObjective(BotorchTestCase): def test_risk_measure_mc_objective(self): # abstract raises with self.assertRaises(TypeError): RiskMeasureMCObjective(n_w=3) for dtype in (torch.float, torch.double): samples = torch.tensor( [[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]], device=self.device, dtype=dtype, ) obj = NotSoAbstractRiskMeasure(n_w=3) # MO samples without weights with self.assertRaises(RuntimeError): obj(torch.ones(3, 2, device=self.device, dtype=dtype)) # test _prepare_samples expected_samples = torch.tensor( [[[1.0, 0.5, 2.0], [3.0, 1.0, 5.0]]], device=self.device, dtype=dtype, ) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, expected_samples)) # test batches samples = torch.rand(5, 3, 6, 1, device=self.device, dtype=dtype) expected_samples = samples.view(5, 3, 2, 3) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, expected_samples)) # negating with preprocessing function. obj = NotSoAbstractRiskMeasure( n_w=3, preprocessing_function=LinearMCObjective( weights=torch.tensor([-1.0], device=self.device, dtype=dtype) ), ) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, -expected_samples)) # MO with weights obj = NotSoAbstractRiskMeasure( n_w=2, preprocessing_function=LinearMCObjective( weights=torch.tensor([1.0, 2.0], device=self.device, dtype=dtype) ), ) samples = torch.tensor( [ [ [1.0, 2.0], [0.5, 0.7], [2.0, 1.5], [3.0, 4.0], [1.0, 0.0], [5.0, 3.0], ] ], device=self.device, dtype=dtype, ) expected_samples = torch.tensor( [[[5.0, 1.9], [5.0, 11.0], [1.0, 11.0]]], device=self.device, dtype=dtype, ) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, expected_samples)) class TestCVaR(BotorchTestCase): def test_cvar(self): obj = CVaR(alpha=0.5, n_w=3) self.assertEqual(obj.alpha_idx, 1) with self.assertRaises(ValueError): CVaR(alpha=3, n_w=3) for dtype in (torch.float, torch.double): obj = CVaR(alpha=0.5, n_w=3) samples = torch.tensor( [[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[0.75, 2.0]], device=self.device, dtype=dtype), ) ) # w/ preprocessing function obj = CVaR( alpha=0.5, n_w=3, preprocessing_function=LinearMCObjective( weights=torch.tensor([-1.0], device=self.device, dtype=dtype) ), ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[-1.5, -4.0]], device=self.device, dtype=dtype), ) ) class TestVaR(BotorchTestCase): def test_var(self): for dtype in (torch.float, torch.double): obj = VaR(alpha=0.5, n_w=3) samples = torch.tensor( [[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[1.0, 3.0]], device=self.device, dtype=dtype), ) ) # w/ preprocessing function obj = VaR( alpha=0.5, n_w=3, preprocessing_function=LinearMCObjective( weights=torch.tensor([-1.0], device=self.device, dtype=dtype) ), ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[-1.0, -3.0]], device=self.device, dtype=dtype), ) ) class TestWorstCase(BotorchTestCase): def test_worst_case(self): for dtype in (torch.float, torch.double): obj = WorstCase(n_w=3) samples = torch.tensor( [[[1.0], [0.5], [2.0], [3.0], [1.0], [5.0]]], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[0.5, 1.0]], device=self.device, dtype=dtype), ) ) # w/ preprocessing function obj = WorstCase( n_w=3, preprocessing_function=LinearMCObjective( weights=torch.tensor([-1.0], device=self.device, dtype=dtype) ), ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[-2.0, -5.0]], device=self.device, dtype=dtype), ) ) class TestExpectation(BotorchTestCase): def test_expectation(self): for dtype in (torch.float, torch.double): obj = Expectation(n_w=3) samples = torch.tensor( [[[1.0], [0.5], [1.5], [3.0], [1.0], [5.0]]], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[1.0, 3.0]], device=self.device, dtype=dtype), ) ) # w/ preprocessing function samples = torch.tensor( [ [ [1.0, 3.0], [0.5, 1.0], [1.5, 2.0], [3.0, 1.0], [1.0, 2.0], [5.0, 3.0], ] ], device=self.device, dtype=dtype, ) obj = Expectation( n_w=3, preprocessing_function=LinearMCObjective( weights=torch.tensor([-1.0, 2.0], device=self.device, dtype=dtype) ), ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor([[3.0, 1.0]], device=self.device, dtype=dtype), ) )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.acquisition.acquisition import ( AcquisitionFunction, MCSamplerMixin, MultiModelAcquisitionFunction, OneShotAcquisitionFunction, ) from botorch.models.model import ModelDict from botorch.sampling.normal import IIDNormalSampler from botorch.sampling.stochastic_samplers import StochasticSampler from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior class DummyMCAcqf(AcquisitionFunction, MCSamplerMixin): def __init__(self, model, sampler): r"""Dummy acqf for testing MCSamplerMixin.""" super().__init__(model) MCSamplerMixin.__init__(self, sampler) def forward(self, X): raise NotImplementedError class DummyMultiModelAcqf(MultiModelAcquisitionFunction): def forward(self, X): raise NotImplementedError class TestAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): AcquisitionFunction() class TestOneShotAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): OneShotAcquisitionFunction() class TestMCSamplerMixin(BotorchTestCase): def test_mc_sampler_mixin(self): mm = MockModel(MockPosterior(samples=torch.rand(1, 2))) acqf = DummyMCAcqf(model=mm, sampler=None) self.assertIsNone(acqf.sampler) samples = acqf.get_posterior_samples(mm._posterior) self.assertEqual(samples.shape, torch.Size([512, 1, 2])) self.assertIsInstance(acqf.sampler, StochasticSampler) sampler = IIDNormalSampler(sample_shape=torch.Size([2])) acqf.sampler = sampler self.assertIs(acqf.sampler, sampler) class TestMultiModelAcquisitionFunction(BotorchTestCase): def test_multi_model_acquisition_function(self): model_dict = ModelDict( m1=MockModel(MockPosterior()), m2=MockModel(MockPosterior()), ) with self.assertRaises(TypeError): MultiModelAcquisitionFunction(model_dict=model_dict) acqf = DummyMultiModelAcqf(model_dict=model_dict) self.assertIs(acqf.model_dict, model_dict)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from botorch.acquisition import qAnalyticProbabilityOfImprovement from botorch.acquisition.analytic import ( _compute_log_prob_feas, _ei_helper, _log_ei_helper, AnalyticAcquisitionFunction, ConstrainedExpectedImprovement, ExpectedImprovement, LogConstrainedExpectedImprovement, LogExpectedImprovement, LogNoisyExpectedImprovement, LogProbabilityOfImprovement, NoisyExpectedImprovement, PosteriorMean, ProbabilityOfImprovement, ScalarizedPosteriorMean, UpperConfidenceBound, ) from botorch.acquisition.objective import ( IdentityMCObjective, ScalarizedPosteriorTransform, ) from botorch.exceptions import UnsupportedError from botorch.models import FixedNoiseGP, SingleTaskGP from botorch.posteriors import GPyTorchPosterior from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal NEI_NOISE = [ [-0.099], [-0.004], [0.227], [-0.182], [0.018], [0.334], [-0.270], [0.156], [-0.237], [0.052], ] class DummyAnalyticAcquisitionFunction(AnalyticAcquisitionFunction): def forward(self, X): pass class TestAnalyticAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): AnalyticAcquisitionFunction() # raise if model is multi-output, but no posterior transform is given mean = torch.zeros(1, 2) variance = torch.ones(1, 2) mm = MockModel(MockPosterior(mean=mean, variance=variance)) with self.assertRaises(UnsupportedError): DummyAnalyticAcquisitionFunction(model=mm) class TestExpectedImprovement(BotorchTestCase): def test_expected_improvement(self): for dtype in (torch.float, torch.double): mean = torch.tensor([[-0.5]], device=self.device, dtype=dtype) variance = torch.ones(1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean, variance=variance)) # basic test module = ExpectedImprovement(model=mm, best_f=0.0) log_module = LogExpectedImprovement(model=mm, best_f=0.0) X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy ei, log_ei = module(X), log_module(X) ei_expected = torch.tensor(0.19780, device=self.device, dtype=dtype) self.assertAllClose(ei, ei_expected, atol=1e-4) self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4) # test maximize module = ExpectedImprovement(model=mm, best_f=0.0, maximize=False) log_module = LogExpectedImprovement(model=mm, best_f=0.0, maximize=False) X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy ei, log_ei = module(X), log_module(X) ei_expected = torch.tensor(0.6978, device=self.device, dtype=dtype) self.assertAllClose(ei, ei_expected, atol=1e-4) self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4) with self.assertRaises(UnsupportedError): module.set_X_pending(None) with self.assertRaises(UnsupportedError): log_module.set_X_pending(None) # test posterior transform (single-output) mean = torch.tensor([0.5], device=self.device, dtype=dtype) covar = torch.tensor([[0.16]], device=self.device, dtype=dtype) mvn = MultivariateNormal(mean, covar) p = GPyTorchPosterior(mvn) mm = MockModel(p) weights = torch.tensor([0.5], device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) ei = ExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) log_ei = LogExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(1, 2, device=self.device, dtype=dtype) ei_expected = torch.tensor(0.2601, device=self.device, dtype=dtype) self.assertAllClose(ei(X), ei_expected, atol=1e-4) self.assertAllClose(log_ei(X), ei_expected.log(), atol=1e-4) # test posterior transform (multi-output) mean = torch.tensor([[-0.25, 0.5]], device=self.device, dtype=dtype) covar = torch.tensor( [[[0.5, 0.125], [0.125, 0.5]]], device=self.device, dtype=dtype ) mvn = MultitaskMultivariateNormal(mean, covar) p = GPyTorchPosterior(mvn) mm = MockModel(p) weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) ei = ExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) log_ei = LogExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(1, 2, device=self.device, dtype=dtype) ei_expected = torch.tensor([0.6910], device=self.device, dtype=dtype) self.assertAllClose(ei(X), ei_expected, atol=1e-4) self.assertAllClose(log_ei(X), ei_expected.log(), atol=1e-4) # making sure we compare the lower branch of _log_ei_helper to _ei_helper z = torch.tensor(-2.13, dtype=dtype, device=self.device) self.assertAllClose(_log_ei_helper(z), _ei_helper(z).log(), atol=1e-6) # numerical stress test for log EI digits = 100 if dtype == torch.float64 else 20 zero = torch.tensor([0], dtype=dtype, device=self.device) ten = torch.tensor(10, dtype=dtype, device=self.device) digits_tensor = torch.arange(0, digits, dtype=dtype, device=self.device) large_z = ten ** (digits_tensor) small_z = ten ** (-digits_tensor) # flipping the appropriate tensors so that elements are in increasing order test_z = [-large_z.flip(-1), -small_z, zero, small_z.flip(-1), large_z] for z in test_z: z.requires_grad = True y = _log_ei_helper(z) # noqa # check that y isn't NaN of Inf self.assertFalse(y.isnan().any()) self.assertFalse(y.isinf().any()) # function values should increase with z self.assertTrue((y.diff() >= 0).all()) # lets check the backward pass y.sum().backward() # check that gradients aren't NaN of Inf g = z.grad self.assertFalse(g.isnan().any()) self.assertFalse(g.isinf().any()) self.assertTrue((g >= 0).all()) # gradient is positive for all z with self.assertRaises(TypeError): _log_ei_helper(z.to(dtype=torch.float16)) def test_expected_improvement_batch(self): for dtype in (torch.float, torch.double): mean = torch.tensor([-0.5, 0.0, 0.5], device=self.device, dtype=dtype).view( 3, 1, 1 ) variance = torch.ones(3, 1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean, variance=variance)) module = ExpectedImprovement(model=mm, best_f=0.0) log_module = LogExpectedImprovement(model=mm, best_f=0.0) X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) # dummy ei, log_ei = module(X), log_module(X) ei_expected = torch.tensor( [0.19780, 0.39894, 0.69780], device=self.device, dtype=dtype ) self.assertAllClose(ei, ei_expected, atol=1e-4) self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4) # check for proper error if multi-output model mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype) variance2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype) mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2)) with self.assertRaises(UnsupportedError): ExpectedImprovement(model=mm2, best_f=0.0) with self.assertRaises(UnsupportedError): LogExpectedImprovement(model=mm2, best_f=0.0) # test posterior transform (single-output) mean = torch.tensor([[[0.5]], [[0.25]]], device=self.device, dtype=dtype) covar = torch.tensor( [[[[0.16]]], [[[0.125]]]], device=self.device, dtype=dtype ) mvn = MultivariateNormal(mean, covar) p = GPyTorchPosterior(mvn) mm = MockModel(p) weights = torch.tensor([0.5], device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) ei = ExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) log_ei = LogExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(2, 1, 2, device=self.device, dtype=dtype) ei_expected = torch.tensor( [[0.2601], [0.1500]], device=self.device, dtype=dtype ) self.assertAllClose(ei(X), ei_expected, atol=1e-4) self.assertAllClose(log_ei(X), ei(X).log(), atol=1e-4) # test posterior transform (multi-output) mean = torch.tensor( [[[-0.25, 0.5]], [[0.2, -0.1]]], device=self.device, dtype=dtype ) covar = torch.tensor( [[[0.5, 0.125], [0.125, 0.5]], [[0.25, -0.1], [-0.1, 0.25]]], device=self.device, dtype=dtype, ) mvn = MultitaskMultivariateNormal(mean, covar) p = GPyTorchPosterior(mvn) mm = MockModel(p) weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) ei = ExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) log_ei = LogExpectedImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(2, 1, 2, device=self.device, dtype=dtype) ei_expected = torch.tensor( [0.6910, 0.5371], device=self.device, dtype=dtype ) self.assertAllClose(ei(X), ei_expected, atol=1e-4) self.assertAllClose(log_ei(X), ei_expected.log(), atol=1e-4) # test bad posterior transform class with self.assertRaises(UnsupportedError): ExpectedImprovement( model=mm, best_f=0.0, posterior_transform=IdentityMCObjective() ) with self.assertRaises(UnsupportedError): LogExpectedImprovement( model=mm, best_f=0.0, posterior_transform=IdentityMCObjective() ) class TestPosteriorMean(BotorchTestCase): def test_posterior_mean(self): for dtype in (torch.float, torch.double): mean = torch.rand(3, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean)) module = PosteriorMean(model=mm) X = torch.rand(3, 1, 2, device=self.device, dtype=dtype) pm = module(X) self.assertTrue(torch.equal(pm, mean.view(-1))) module = PosteriorMean(model=mm, maximize=False) X = torch.rand(3, 1, 2, device=self.device, dtype=dtype) pm = module(X) self.assertTrue(torch.equal(pm, -mean.view(-1))) # check for proper error if multi-output model mean2 = torch.rand(1, 2, device=self.device, dtype=dtype) mm2 = MockModel(MockPosterior(mean=mean2)) with self.assertRaises(UnsupportedError): PosteriorMean(model=mm2) def test_posterior_mean_batch(self): for dtype in (torch.float, torch.double): mean = torch.tensor([-0.5, 0.0, 0.5], device=self.device, dtype=dtype).view( 3, 1, 1 ) mm = MockModel(MockPosterior(mean=mean)) module = PosteriorMean(model=mm) X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) pm = module(X) self.assertTrue(torch.equal(pm, mean.view(-1))) # check for proper error if multi-output model mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype) mm2 = MockModel(MockPosterior(mean=mean2)) with self.assertRaises(UnsupportedError): PosteriorMean(model=mm2) class TestProbabilityOfImprovement(BotorchTestCase): def test_probability_of_improvement(self): for dtype in (torch.float, torch.double): mean = torch.zeros(1, 1, device=self.device, dtype=dtype) variance = torch.ones(1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean, variance=variance)) kwargs = {"model": mm, "best_f": 1.96} module = ProbabilityOfImprovement(**kwargs) log_module = LogProbabilityOfImprovement(**kwargs) X = torch.zeros(1, 1, device=self.device, dtype=dtype) pi, log_pi = module(X), log_module(X) pi_expected = torch.tensor(0.0250, device=self.device, dtype=dtype) self.assertAllClose(pi, pi_expected, atol=1e-4) self.assertAllClose(log_pi.exp(), pi) kwargs = {"model": mm, "best_f": 1.96, "maximize": False} module = ProbabilityOfImprovement(**kwargs) log_module = LogProbabilityOfImprovement(**kwargs) X = torch.zeros(1, 1, device=self.device, dtype=dtype) pi, log_pi = module(X), log_module(X) pi_expected = torch.tensor(0.9750, device=self.device, dtype=dtype) self.assertAllClose(pi, pi_expected, atol=1e-4) self.assertAllClose(log_pi.exp(), pi) # check for proper error if multi-output model mean2 = torch.rand(1, 2, device=self.device, dtype=dtype) variance2 = torch.ones_like(mean2) mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2)) with self.assertRaises(UnsupportedError): ProbabilityOfImprovement(model=mm2, best_f=0.0) with self.assertRaises(UnsupportedError): LogProbabilityOfImprovement(model=mm2, best_f=0.0) def test_probability_of_improvement_batch(self): for dtype in (torch.float, torch.double): mean = torch.tensor([0.0, 0.67449], device=self.device, dtype=dtype).view( 2, 1, 1 ) variance = torch.ones_like(mean) mm = MockModel(MockPosterior(mean=mean, variance=variance)) module = ProbabilityOfImprovement(model=mm, best_f=0.0) log_module = LogProbabilityOfImprovement(model=mm, best_f=0.0) X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype) pi, log_pi = module(X), log_module(X) pi_expected = torch.tensor([0.5, 0.75], device=self.device, dtype=dtype) self.assertAllClose(pi, pi_expected, atol=1e-4) self.assertAllClose(log_pi.exp(), pi) # check for proper error if multi-output model mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype) variance2 = torch.ones_like(mean2) mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2)) with self.assertRaises(UnsupportedError): ProbabilityOfImprovement(model=mm2, best_f=0.0) with self.assertRaises(UnsupportedError): LogProbabilityOfImprovement(model=mm2, best_f=0.0) class TestqAnalyticProbabilityOfImprovement(BotorchTestCase): def test_q_analytic_probability_of_improvement(self): for dtype in (torch.float, torch.double): mean = torch.zeros(1, device=self.device, dtype=dtype) cov = torch.eye(n=1, device=self.device, dtype=dtype) mvn = MultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) # basic test module = qAnalyticProbabilityOfImprovement(model=mm, best_f=1.96) X = torch.rand(1, 2, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor(0.0250, device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # basic test, maximize module = qAnalyticProbabilityOfImprovement( model=mm, best_f=1.96, maximize=False ) X = torch.rand(1, 2, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor(0.9750, device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # basic test, posterior transform (single-output) mean = torch.ones(1, device=self.device, dtype=dtype) cov = torch.eye(n=1, device=self.device, dtype=dtype) mvn = MultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) weights = torch.tensor([0.5], device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) module = qAnalyticProbabilityOfImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(1, 2, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor(0.8413, device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # basic test, posterior transform (multi-output) mean = torch.ones(1, 2, device=self.device, dtype=dtype) cov = torch.eye(n=2, device=self.device, dtype=dtype).unsqueeze(0) mvn = MultitaskMultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) weights = torch.ones(2, device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) module = qAnalyticProbabilityOfImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(1, 1, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor(0.9214, device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # basic test, q = 2 mean = torch.zeros(2, device=self.device, dtype=dtype) cov = torch.eye(n=2, device=self.device, dtype=dtype) mvn = MultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) module = qAnalyticProbabilityOfImprovement(model=mm, best_f=1.96) X = torch.zeros(2, 2, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor(0.049375, device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) def test_batch_q_analytic_probability_of_improvement(self): for dtype in (torch.float, torch.double): # test batch mode mean = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype) cov = ( torch.eye(n=1, device=self.device, dtype=dtype) .unsqueeze(0) .repeat(2, 1, 1) ) mvn = MultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) module = qAnalyticProbabilityOfImprovement(model=mm, best_f=0) X = torch.rand(2, 1, 1, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor([0.5, 0.8413], device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # test batched model and best_f values mean = torch.zeros(2, 1, device=self.device, dtype=dtype) cov = ( torch.eye(n=1, device=self.device, dtype=dtype) .unsqueeze(0) .repeat(2, 1, 1) ) mvn = MultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) best_f = torch.tensor([0.0, -1.0], device=self.device, dtype=dtype) module = qAnalyticProbabilityOfImprovement(model=mm, best_f=best_f) X = torch.rand(2, 1, 1, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor([[0.5, 0.8413]], device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # test batched model, output transform (single output) mean = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype) cov = ( torch.eye(n=1, device=self.device, dtype=dtype) .unsqueeze(0) .repeat(2, 1, 1) ) mvn = MultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) weights = torch.tensor([0.5], device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) module = qAnalyticProbabilityOfImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(2, 1, 2, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor([0.5, 0.8413], device=self.device, dtype=dtype) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # test batched model, output transform (multiple output) mean = torch.tensor( [[[1.0, 1.0]], [[0.0, 1.0]]], device=self.device, dtype=dtype ) cov = ( torch.eye(n=2, device=self.device, dtype=dtype) .unsqueeze(0) .repeat(2, 1, 1) ) mvn = MultitaskMultivariateNormal(mean=mean, covariance_matrix=cov) posterior = GPyTorchPosterior(mvn) mm = MockModel(posterior) weights = torch.ones(2, device=self.device, dtype=dtype) transform = ScalarizedPosteriorTransform(weights) module = qAnalyticProbabilityOfImprovement( model=mm, best_f=0.0, posterior_transform=transform ) X = torch.rand(2, 1, 2, device=self.device, dtype=dtype) pi = module(X) pi_expected = torch.tensor( [0.9214, 0.7602], device=self.device, dtype=dtype ) self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4)) # test bad posterior transform class with self.assertRaises(UnsupportedError): qAnalyticProbabilityOfImprovement( model=mm, best_f=0.0, posterior_transform=IdentityMCObjective() ) class TestUpperConfidenceBound(BotorchTestCase): def test_upper_confidence_bound(self): for dtype in (torch.float, torch.double): mean = torch.tensor([[0.5]], device=self.device, dtype=dtype) variance = torch.tensor([[1.0]], device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean, variance=variance)) module = UpperConfidenceBound(model=mm, beta=1.0) X = torch.zeros(1, 1, device=self.device, dtype=dtype) ucb = module(X) ucb_expected = torch.tensor(1.5, device=self.device, dtype=dtype) self.assertAllClose(ucb, ucb_expected, atol=1e-4) module = UpperConfidenceBound(model=mm, beta=1.0, maximize=False) X = torch.zeros(1, 1, device=self.device, dtype=dtype) ucb = module(X) ucb_expected = torch.tensor(0.5, device=self.device, dtype=dtype) self.assertAllClose(ucb, ucb_expected, atol=1e-4) # check for proper error if multi-output model mean2 = torch.rand(1, 2, device=self.device, dtype=dtype) variance2 = torch.rand(1, 2, device=self.device, dtype=dtype) mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2)) with self.assertRaises(UnsupportedError): UpperConfidenceBound(model=mm2, beta=1.0) def test_upper_confidence_bound_batch(self): for dtype in (torch.float, torch.double): mean = torch.tensor([0.0, 0.5], device=self.device, dtype=dtype).view( 2, 1, 1 ) variance = torch.tensor([1.0, 4.0], device=self.device, dtype=dtype).view( 2, 1, 1 ) mm = MockModel(MockPosterior(mean=mean, variance=variance)) module = UpperConfidenceBound(model=mm, beta=1.0) X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype) ucb = module(X) ucb_expected = torch.tensor([1.0, 2.5], device=self.device, dtype=dtype) self.assertAllClose(ucb, ucb_expected, atol=1e-4) # check for proper error if multi-output model mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype) variance2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype) mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2)) with self.assertRaises(UnsupportedError): UpperConfidenceBound(model=mm2, beta=1.0) class TestConstrainedExpectedImprovement(BotorchTestCase): def test_constrained_expected_improvement(self): for dtype in (torch.float, torch.double): # one constraint mean = torch.tensor( [[-0.5, 0.0]], device=self.device, dtype=dtype ).unsqueeze(dim=-2) variance = torch.ones(1, 2, device=self.device, dtype=dtype).unsqueeze( dim=-2 ) mm = MockModel(MockPosterior(mean=mean, variance=variance)) kwargs = { "model": mm, "best_f": 0.0, "objective_index": 0, "constraints": {1: [None, 0]}, } module = ConstrainedExpectedImprovement(**kwargs) log_module = LogConstrainedExpectedImprovement(**kwargs) # test initialization for k in [ "con_lower_inds", "con_upper_inds", "con_both_inds", "con_both", "con_lower", "con_upper", ]: self.assertIn(k, module._buffers) self.assertIn(k, log_module._buffers) X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy ei = module(X) ei_expected_unconstrained = torch.tensor( [0.19780], device=self.device, dtype=dtype ) ei_expected = ei_expected_unconstrained * 0.5 self.assertAllClose(ei, ei_expected, atol=1e-4) log_ei = log_module(X) self.assertAllClose(log_ei, ei.log(), atol=1e-5) # testing LogCEI and CEI for lower, upper, and simultaneous bounds for bounds in [[None, 0], [0, None], [0, 1]]: kwargs["constraints"] = {1: bounds} module = ConstrainedExpectedImprovement(**kwargs) log_module = LogConstrainedExpectedImprovement(**kwargs) ei, log_ei = module(X), log_module(X) self.assertAllClose(log_ei, ei.log(), atol=1e-5) constructors = [ ConstrainedExpectedImprovement, LogConstrainedExpectedImprovement, ] for constructor in constructors: # check that error raised if no constraints with self.assertRaises(ValueError): module = constructor( model=mm, best_f=0.0, objective_index=0, constraints={} ) # check that error raised if objective is a constraint with self.assertRaises(ValueError): module = constructor( model=mm, best_f=0.0, objective_index=0, constraints={0: [None, 0]}, ) # check that error raised if constraint lower > upper with self.assertRaises(ValueError): module = constructor( model=mm, best_f=0.0, objective_index=0, constraints={0: [1, 0]} ) # three constraints N = torch.distributions.Normal(loc=0.0, scale=1.0) a = N.icdf(torch.tensor(0.75)) # get a so that P(-a <= N <= a) = 0.5 mean = torch.tensor( [[-0.5, 0.0, 5.0, 0.0]], device=self.device, dtype=dtype ).unsqueeze(dim=-2) variance = torch.ones(1, 4, device=self.device, dtype=dtype).unsqueeze( dim=-2 ) mm = MockModel(MockPosterior(mean=mean, variance=variance)) kwargs = { "model": mm, "best_f": 0.0, "objective_index": 0, "constraints": {1: [None, 0], 2: [5.0, None], 3: [-a, a]}, } module = ConstrainedExpectedImprovement(**kwargs) log_module = LogConstrainedExpectedImprovement(**kwargs) X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy ei = module(X) ei_expected_unconstrained = torch.tensor( [0.19780], device=self.device, dtype=dtype ) ei_expected = ei_expected_unconstrained * 0.5 * 0.5 * 0.5 self.assertAllClose(ei, ei_expected, atol=1e-4) # testing log module with regular implementation log_ei = log_module(X) self.assertAllClose(log_ei, ei_expected.log(), atol=1e-4) # test maximize kwargs = { "model": mm, "best_f": 0.0, "objective_index": 0, "constraints": {1: [None, 0]}, "maximize": False, } module_min = ConstrainedExpectedImprovement(**kwargs) log_module_min = LogConstrainedExpectedImprovement(**kwargs) ei_min = module_min(X) ei_expected_unconstrained_min = torch.tensor( [0.6978], device=self.device, dtype=dtype ) ei_expected_min = ei_expected_unconstrained_min * 0.5 self.assertAllClose(ei_min, ei_expected_min, atol=1e-4) log_ei_min = log_module_min(X) self.assertAllClose(log_ei_min, ei_min.log(), atol=1e-4) # test invalid onstraints for constructor in constructors: with self.assertRaises(ValueError): constructor( model=mm, best_f=0.0, objective_index=0, constraints={1: [1.0, -1.0]}, ) # numerical stress test for _compute_log_prob_feas, which gets added to # log_ei in the forward pass, a quantity we already tested above # the limits here are determined by the largest power of ten x, such that # x - (b - a) < x # evaluates to true. In this test, the bounds are a, b = -digits, digits. digits = 10 if dtype == torch.float64 else 5 zero = torch.tensor([0], dtype=dtype, device=self.device) ten = torch.tensor(10, dtype=dtype, device=self.device) digits_tensor = 1 + torch.arange( -digits, digits, dtype=dtype, device=self.device ) X_positive = ten ** (digits_tensor) # flipping -X_positive so that elements are in increasing order means = torch.cat((-X_positive.flip(-1), zero, X_positive)).unsqueeze(-1) means.requires_grad = True log_module = LogConstrainedExpectedImprovement( model=mm, best_f=0.0, objective_index=1, constraints={0: [-5, 5]}, ) log_prob = _compute_log_prob_feas( log_module, means=means, sigmas=torch.ones_like(means) ) log_prob.sum().backward() self.assertFalse(log_prob.isnan().any()) self.assertFalse(log_prob.isinf().any()) self.assertFalse(means.grad.isnan().any()) self.assertFalse(means.grad.isinf().any()) # probability of feasibility increases until X = 0, decreases from there on prob_diff = log_prob.diff() k = len(X_positive) eps = 1e-6 if dtype == torch.float32 else 1e-15 self.assertTrue((prob_diff[:k] > -eps).all()) self.assertTrue((means.grad[:k] > -eps).all()) # probability has stationary point at zero mean_grad_at_zero = means.grad[len(X_positive)] self.assertTrue( torch.allclose(mean_grad_at_zero, torch.zeros_like(mean_grad_at_zero)) ) # probability increases again self.assertTrue((prob_diff[-k:] < eps).all()) self.assertTrue((means.grad[-k:] < eps).all()) def test_constrained_expected_improvement_batch(self): for dtype in (torch.float, torch.double): mean = torch.tensor( [[-0.5, 0.0, 5.0, 0.0], [0.0, 0.0, 5.0, 0.0], [0.5, 0.0, 5.0, 0.0]], device=self.device, dtype=dtype, ).unsqueeze(dim=-2) variance = torch.ones(3, 4, device=self.device, dtype=dtype).unsqueeze( dim=-2 ) N = torch.distributions.Normal(loc=0.0, scale=1.0) a = N.icdf(torch.tensor(0.75)) # get a so that P(-a <= N <= a) = 0.5 mm = MockModel(MockPosterior(mean=mean, variance=variance)) kwargs = { "model": mm, "best_f": 0.0, "objective_index": 0, "constraints": {1: [None, 0], 2: [5.0, None], 3: [-a, a]}, } module = ConstrainedExpectedImprovement(**kwargs) log_module = LogConstrainedExpectedImprovement(**kwargs) X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) # dummy ei, log_ei = module(X), log_module(X) self.assertTrue(ei.shape == torch.Size([3])) self.assertTrue(log_ei.shape == torch.Size([3])) ei_expected_unconstrained = torch.tensor( [0.19780, 0.39894, 0.69780], device=self.device, dtype=dtype ) ei_expected = ei_expected_unconstrained * 0.5 * 0.5 * 0.5 self.assertAllClose(ei, ei_expected, atol=1e-4) self.assertAllClose(log_ei, ei.log(), atol=1e-4) class TestNoisyExpectedImprovement(BotorchTestCase): def _get_model(self, dtype=torch.float): state_dict = { "mean_module.raw_constant": torch.tensor([-0.0066]), "covar_module.raw_outputscale": torch.tensor(1.0143), "covar_module.base_kernel.raw_lengthscale": torch.tensor([[-0.99]]), "covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor( 3.0 ), "covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0), "covar_module.outputscale_prior.concentration": torch.tensor(2.0), "covar_module.outputscale_prior.rate": torch.tensor(0.1500), } train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze( -1 ) train_y = torch.sin(train_x * (2 * math.pi)) noise = torch.tensor(NEI_NOISE, device=self.device, dtype=dtype) train_y += noise train_yvar = torch.full_like(train_y, 0.25**2) model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar) model.load_state_dict(state_dict) model.to(train_x) model.eval() return model def test_noisy_expected_improvement(self): for dtype in (torch.float, torch.double): model = self._get_model(dtype=dtype) X_observed = model.train_inputs[0] nfan = 5 nEI = NoisyExpectedImprovement(model, X_observed, num_fantasies=nfan) LogNEI = LogNoisyExpectedImprovement(model, X_observed, num_fantasies=nfan) # before assigning, check that the attributes exist self.assertTrue(hasattr(LogNEI, "model")) self.assertTrue(hasattr(LogNEI, "best_f")) self.assertTrue(isinstance(LogNEI.model, FixedNoiseGP)) LogNEI.model = nEI.model # let the two share their values and fantasies LogNEI.best_f = nEI.best_f X_test = torch.tensor( [[[0.25]], [[0.75]]], device=X_observed.device, dtype=dtype, ) X_test_log = X_test.clone() X_test.requires_grad = True X_test_log.requires_grad = True val = nEI(X_test) # testing logNEI yields the same result (also checks dtype) log_val = LogNEI(X_test_log) exp_log_val = log_val.exp() # notably, val[1] is usually zero in this test, which is precisely what # gives rise to problems during optimization, and what logNEI avoids # since it generally takes a large negative number (<-2000) and has # strong gradient signals in this regime. rtol = 1e-12 if dtype == torch.double else 1e-6 atol = rtol self.assertAllClose(exp_log_val, val, atol=atol, rtol=rtol) # test basics self.assertEqual(val.dtype, dtype) self.assertEqual(val.device.type, X_observed.device.type) self.assertEqual(val.shape, torch.Size([2])) # test values self.assertGreater(val[0].item(), 8e-5) self.assertLess(val[1].item(), 1e-6) # test gradient val.sum().backward() self.assertGreater(X_test.grad[0].abs().item(), 8e-6) # testing gradient through exp of log computation exp_log_val.sum().backward() # testing that first gradient element coincides. The second is in the # regime where the naive implementation looses accuracy. atol = 2e-5 if dtype == torch.float32 else 1e-12 rtol = atol self.assertTrue( torch.allclose(X_test.grad[0], X_test_log.grad[0], atol=atol, rtol=rtol) ) # test non-FixedNoiseGP model other_model = SingleTaskGP(X_observed, model.train_targets.unsqueeze(-1)) for constructor in ( NoisyExpectedImprovement, LogNoisyExpectedImprovement, ): with self.assertRaises(UnsupportedError): constructor(other_model, X_observed, num_fantasies=5) # Test constructor with minimize acqf = constructor(model, X_observed, num_fantasies=5, maximize=False) # test evaluation without gradients enabled with torch.no_grad(): acqf(X_test) # testing gradients are only propagated if X_observed requires them # i.e. kernel hyper-parameters are not tracked through to best_f X_observed.requires_grad = False acqf = constructor(model, X_observed, num_fantasies=5) self.assertFalse(acqf.best_f.requires_grad) X_observed.requires_grad = True acqf = constructor(model, X_observed, num_fantasies=5) self.assertTrue(acqf.best_f.requires_grad) class TestScalarizedPosteriorMean(BotorchTestCase): def test_scalarized_posterior_mean(self): for dtype in (torch.float, torch.double): mean = torch.tensor([[0.25], [0.5]], device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean)) weights = torch.tensor([0.5, 1.0], device=self.device, dtype=dtype) module = ScalarizedPosteriorMean(model=mm, weights=weights) X = torch.empty(1, 1, device=self.device, dtype=dtype) pm = module(X) self.assertTrue( torch.allclose(pm, (mean.squeeze(-1) * module.weights).sum(dim=-1)) ) def test_scalarized_posterior_mean_batch(self): for dtype in (torch.float, torch.double): mean = torch.tensor( [[-0.5, 1.0], [0.0, 1.0], [0.5, 1.0]], device=self.device, dtype=dtype ).view(3, 2, 1) mm = MockModel(MockPosterior(mean=mean)) weights = torch.tensor([0.5, 1.0], device=self.device, dtype=dtype) module = ScalarizedPosteriorMean(model=mm, weights=weights) X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) pm = module(X) self.assertTrue( torch.allclose(pm, (mean.squeeze(-1) * module.weights).sum(dim=-1)) )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import warnings from typing import Optional import torch from botorch import settings from botorch.acquisition import LearnedObjective from botorch.acquisition.objective import ( ConstrainedMCObjective, ExpectationPosteriorTransform, GenericMCObjective, IdentityMCObjective, LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN, LinearMCObjective, MCAcquisitionObjective, PosteriorTransform, ScalarizedPosteriorTransform, ) from botorch.exceptions.errors import UnsupportedError from botorch.exceptions.warnings import _get_single_precision_warning, InputDataWarning from botorch.models.deterministic import PosteriorMeanModel from botorch.models.pairwise_gp import PairwiseGP from botorch.models.transforms.input import Normalize from botorch.posteriors import GPyTorchPosterior from botorch.sampling.normal import SobolQMCNormalSampler from botorch.utils import apply_constraints from botorch.utils.testing import _get_test_posterior, BotorchTestCase from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal from linear_operator.operators.dense_linear_operator import to_linear_operator from torch import Tensor def generic_obj_deprecated(samples: Tensor) -> Tensor: return torch.log(torch.sum(samples**2, dim=-1)) def generic_obj(samples: Tensor, X=None) -> Tensor: return generic_obj_deprecated(samples) def infeasible_con(samples: Tensor) -> Tensor: return torch.ones(samples.shape[0:-1], device=samples.device, dtype=samples.dtype) def feasible_con(samples: Tensor) -> Tensor: return -( torch.ones(samples.shape[0:-1], device=samples.device, dtype=samples.dtype) ) class TestPosteriorTransform(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): PosteriorTransform() class TestScalarizedPosteriorTransform(BotorchTestCase): def test_scalarized_posterior_transform(self): for batch_shape, m, dtype in itertools.product( ([], [3]), (1, 2), (torch.float, torch.double) ): offset = torch.rand(1).item() weights = torch.randn(m, device=self.device, dtype=dtype) obj = ScalarizedPosteriorTransform(weights=weights, offset=offset) posterior = _get_test_posterior( batch_shape, m=m, device=self.device, dtype=dtype ) mean, covar = ( posterior.distribution.mean, posterior.distribution.covariance_matrix, ) new_posterior = obj(posterior) exp_size = torch.Size(batch_shape + [1, 1]) self.assertEqual(new_posterior.mean.shape, exp_size) new_mean_exp = offset + mean @ weights self.assertAllClose(new_posterior.mean[..., -1], new_mean_exp) self.assertEqual(new_posterior.variance.shape, exp_size) new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1) self.assertTrue( torch.allclose(new_posterior.variance[..., -1], new_covar_exp) ) # test error with self.assertRaises(ValueError): ScalarizedPosteriorTransform(weights=torch.rand(2, m)) # test evaluate Y = torch.rand(2, m, device=self.device, dtype=dtype) val = obj.evaluate(Y) val_expected = offset + Y @ weights self.assertTrue(torch.equal(val, val_expected)) class TestExpectationPosteriorTransform(BotorchTestCase): def test_init(self): # Without weights. tf = ExpectationPosteriorTransform(n_w=5) self.assertEqual(tf.n_w, 5) self.assertAllClose(tf.weights, torch.ones(5, 1) * 0.2) # Errors with weights. with self.assertRaisesRegex(ValueError, "a tensor of size"): ExpectationPosteriorTransform(n_w=3, weights=torch.ones(5, 1)) with self.assertRaisesRegex(ValueError, "non-negative"): ExpectationPosteriorTransform(n_w=3, weights=-torch.ones(3, 1)) # Successful init with weights. weights = torch.tensor([[1.0, 2.0], [2.0, 4.0], [3.0, 6.0]]) tf = ExpectationPosteriorTransform(n_w=3, weights=weights) self.assertAllClose(tf.weights, weights / torch.tensor([6.0, 12.0])) def test_evaluate(self): for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} # Without weights. tf = ExpectationPosteriorTransform(n_w=3) Y = torch.rand(3, 6, 2, **tkwargs) self.assertTrue( torch.allclose(tf.evaluate(Y), Y.view(3, 2, 3, 2).mean(dim=-2)) ) # With weights - weights intentionally doesn't use tkwargs. weights = torch.tensor([[1.0, 2.0], [2.0, 1.0]]) tf = ExpectationPosteriorTransform(n_w=2, weights=weights) expected = (Y.view(3, 3, 2, 2) * weights.to(Y)).sum(dim=-2) / 3.0 self.assertAllClose(tf.evaluate(Y), expected) def test_expectation_posterior_transform(self): tkwargs = {"dtype": torch.float, "device": self.device} # Without weights, simple expectation, single output, no batch. # q = 2, n_w = 3. org_loc = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], **tkwargs) org_covar = torch.tensor( [ [1.0, 0.8, 0.7, 0.3, 0.2, 0.1], [0.8, 1.0, 0.9, 0.25, 0.15, 0.1], [0.7, 0.9, 1.0, 0.2, 0.2, 0.05], [0.3, 0.25, 0.2, 1.0, 0.7, 0.6], [0.2, 0.15, 0.2, 0.7, 1.0, 0.7], [0.1, 0.1, 0.05, 0.6, 0.7, 1.0], ], **tkwargs, ) org_mvn = MultivariateNormal(org_loc, to_linear_operator(org_covar)) org_post = GPyTorchPosterior(distribution=org_mvn) tf = ExpectationPosteriorTransform(n_w=3) tf_post = tf(org_post) self.assertIsInstance(tf_post, GPyTorchPosterior) self.assertEqual(tf_post.sample().shape, torch.Size([1, 2, 1])) tf_mvn = tf_post.distribution self.assertIsInstance(tf_mvn, MultivariateNormal) expected_loc = torch.tensor([2.0, 5.0], **tkwargs) # This is the average of each 3 x 3 block. expected_covar = torch.tensor([[0.8667, 0.1722], [0.1722, 0.7778]], **tkwargs) self.assertAllClose(tf_mvn.loc, expected_loc) self.assertAllClose(tf_mvn.covariance_matrix, expected_covar, atol=1e-3) # With weights, 2 outputs, batched. tkwargs = {"dtype": torch.double, "device": self.device} # q = 2, n_w = 2, m = 2, leading to 8 values for loc and 8x8 cov. org_loc = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], **tkwargs) # We have 2 4x4 matrices with 0s as filler. Each block is for one outcome. # Each 2x2 sub block corresponds to `n_w`. org_covar = torch.tensor( [ [1.0, 0.8, 0.3, 0.2, 0.0, 0.0, 0.0, 0.0], [0.8, 1.4, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0], [0.3, 0.2, 1.2, 0.5, 0.0, 0.0, 0.0, 0.0], [0.2, 0.1, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.7, 0.4, 0.3], [0.0, 0.0, 0.0, 0.0, 0.7, 0.8, 0.3, 0.2], [0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 1.4, 0.5], [0.0, 0.0, 0.0, 0.0, 0.3, 0.2, 0.5, 1.2], ], **tkwargs, ) # Making it batched by adding two more batches, mostly the same. org_loc = org_loc.repeat(3, 1) org_loc[1] += 100 org_loc[2] += 1000 org_covar = org_covar.repeat(3, 1, 1) # Construct the transform with weights. weights = torch.tensor([[1.0, 3.0], [2.0, 1.0]]) tf = ExpectationPosteriorTransform(n_w=2, weights=weights) # Construct the posterior. org_mvn = MultitaskMultivariateNormal( # The return of mvn.loc and the required input are different. # We constructed it according to the output of mvn.loc, # reshaping here to have the required `b x n x t` shape. org_loc.view(3, 2, 4).transpose(-2, -1), to_linear_operator(org_covar), interleaved=True, # To test the error. ) org_post = GPyTorchPosterior(distribution=org_mvn) # Error if interleaved. with self.assertRaisesRegex(UnsupportedError, "interleaved"): tf(org_post) # Construct the non-interleaved posterior. org_mvn = MultitaskMultivariateNormal( org_loc.view(3, 2, 4).transpose(-2, -1), to_linear_operator(org_covar), interleaved=False, ) org_post = GPyTorchPosterior(distribution=org_mvn) self.assertTrue(torch.equal(org_mvn.loc, org_loc)) tf_post = tf(org_post) self.assertIsInstance(tf_post, GPyTorchPosterior) self.assertEqual(tf_post.sample().shape, torch.Size([1, 3, 2, 2])) tf_mvn = tf_post.distribution self.assertIsInstance(tf_mvn, MultitaskMultivariateNormal) expected_loc = torch.tensor([[1.6667, 3.6667, 5.25, 7.25]], **tkwargs).repeat( 3, 1 ) expected_loc[1] += 100 expected_loc[2] += 1000 # This is the weighted average of each 2 x 2 block. expected_covar = torch.tensor( [ [1.0889, 0.1667, 0.0, 0.0], [0.1667, 0.8, 0.0, 0.0], [0.0, 0.0, 0.875, 0.35], [0.0, 0.0, 0.35, 1.05], ], **tkwargs, ).repeat(3, 1, 1) self.assertAllClose(tf_mvn.loc, expected_loc, atol=1e-3) self.assertAllClose(tf_mvn.covariance_matrix, expected_covar, atol=1e-3) class TestMCAcquisitionObjective(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): MCAcquisitionObjective() def test_verify_output_shape(self): obj = IdentityMCObjective() self.assertTrue(obj._verify_output_shape) samples = torch.zeros(2, 3, 1) X = torch.ones(2, 1) # No error if X is not given. obj(samples=samples) # Error if X is given, 2 != 3 with self.assertRaises(RuntimeError): obj(samples=samples, X=X) # No error if _verify_output_shape=False obj._verify_output_shape = False obj(samples=samples, X=X) class TestGenericMCObjective(BotorchTestCase): def test_generic_mc_objective(self): for dtype in (torch.float, torch.double): obj = GenericMCObjective(generic_obj) samples = torch.randn(1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) samples = torch.randn(2, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) samples = torch.randn(3, 1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) samples = torch.randn(3, 2, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) def test_generic_mc_objective_deprecated(self): for dtype in (torch.float, torch.double): with warnings.catch_warnings(record=True) as ws, settings.debug(True): obj = GenericMCObjective(generic_obj_deprecated) warning_msg = ( "The `objective` callable of `GenericMCObjective` is expected to " "take two arguments. Passing a callable that expects a single " "argument will result in an error in future versions." ) self.assertTrue( any(issubclass(w.category, DeprecationWarning) for w in ws) ) self.assertTrue(any(warning_msg in str(w.message) for w in ws)) samples = torch.randn(1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) samples = torch.randn(2, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) samples = torch.randn(3, 1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) samples = torch.randn(3, 2, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), generic_obj(samples))) class TestConstrainedMCObjective(BotorchTestCase): def test_constrained_mc_objective(self): for dtype in (torch.float, torch.double): # one feasible constraint obj = ConstrainedMCObjective( objective=generic_obj, constraints=[feasible_con] ) samples = torch.randn(1, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[feasible_con], samples=samples, infeasible_cost=0.0, ) self.assertTrue(torch.equal(obj(samples), constrained_obj)) # one infeasible constraint obj = ConstrainedMCObjective( objective=generic_obj, constraints=[infeasible_con] ) samples = torch.randn(2, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[infeasible_con], samples=samples, infeasible_cost=0.0, ) self.assertTrue(torch.equal(obj(samples), constrained_obj)) # one feasible, one infeasible obj = ConstrainedMCObjective( objective=generic_obj, constraints=[feasible_con, infeasible_con] ) samples = torch.randn(2, 1, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[feasible_con, infeasible_con], samples=samples, infeasible_cost=torch.tensor([0.0], device=self.device, dtype=dtype), ) # one feasible, one infeasible different etas obj = ConstrainedMCObjective( objective=generic_obj, constraints=[feasible_con, infeasible_con], eta=torch.tensor([1, 10]), ) samples = torch.randn(2, 1, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[feasible_con, infeasible_con], samples=samples, eta=torch.tensor([1, 10]), infeasible_cost=torch.tensor([0.0], device=self.device, dtype=dtype), ) self.assertTrue(torch.equal(obj(samples), constrained_obj)) # one feasible, one infeasible, infeasible_cost obj = ConstrainedMCObjective( objective=generic_obj, constraints=[feasible_con, infeasible_con], infeasible_cost=5.0, ) samples = torch.randn(3, 2, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[feasible_con, infeasible_con], samples=samples, infeasible_cost=5.0, ) self.assertTrue(torch.equal(obj(samples), constrained_obj)) # one feasible, one infeasible, infeasible_cost, different eta obj = ConstrainedMCObjective( objective=generic_obj, constraints=[feasible_con, infeasible_con], infeasible_cost=5.0, eta=torch.tensor([1, 10]), ) samples = torch.randn(3, 2, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[feasible_con, infeasible_con], samples=samples, infeasible_cost=5.0, eta=torch.tensor([1, 10]), ) self.assertTrue(torch.equal(obj(samples), constrained_obj)) # one feasible, one infeasible, infeasible_cost, higher dimension obj = ConstrainedMCObjective( objective=generic_obj, constraints=[feasible_con, infeasible_con], infeasible_cost=torch.tensor([5.0], device=self.device, dtype=dtype), ) samples = torch.randn(4, 3, 2, device=self.device, dtype=dtype) constrained_obj = apply_constraints( obj=generic_obj(samples), constraints=[feasible_con, infeasible_con], samples=samples, infeasible_cost=5.0, ) self.assertTrue(torch.equal(obj(samples), constrained_obj)) class TestIdentityMCObjective(BotorchTestCase): def test_identity_mc_objective(self): for dtype in (torch.float, torch.double): obj = IdentityMCObjective() # single-element tensor samples = torch.randn(1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), samples[0])) # single-dimensional non-squeezable tensor samples = torch.randn(2, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), samples)) # two-dimensional squeezable tensor samples = torch.randn(3, 1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), samples.squeeze(-1))) # two-dimensional non-squeezable tensor samples = torch.randn(3, 2, device=self.device, dtype=dtype) self.assertTrue(torch.equal(obj(samples), samples)) class TestLinearMCObjective(BotorchTestCase): def test_linear_mc_objective(self) -> None: # Test passes for each seed torch.manual_seed(torch.randint(high=1000, size=(1,))) for dtype in (torch.float, torch.double): weights = torch.rand(3, device=self.device, dtype=dtype) obj = LinearMCObjective(weights=weights) samples = torch.randn(4, 2, 3, device=self.device, dtype=dtype) atol = 1e-8 if dtype == torch.double else 3e-8 rtol = 1e-5 if dtype == torch.double else 4e-5 self.assertAllClose(obj(samples), samples @ weights, atol=atol, rtol=rtol) samples = torch.randn(5, 4, 2, 3, device=self.device, dtype=dtype) self.assertAllClose( obj(samples), samples @ weights, atol=atol, rtol=rtol, ) # make sure this errors if sample output dimensions are incompatible shape_mismatch_msg = "Output shape of samples not equal to that of weights" with self.assertRaisesRegex(RuntimeError, shape_mismatch_msg): obj(samples=torch.randn(2, device=self.device, dtype=dtype)) with self.assertRaisesRegex(RuntimeError, shape_mismatch_msg): obj(samples=torch.randn(1, device=self.device, dtype=dtype)) # make sure we can't construct objectives with multi-dim. weights weights_1d_msg = "weights must be a one-dimensional tensor." with self.assertRaisesRegex(ValueError, expected_regex=weights_1d_msg): LinearMCObjective( weights=torch.rand(2, 3, device=self.device, dtype=dtype) ) with self.assertRaisesRegex(ValueError, expected_regex=weights_1d_msg): LinearMCObjective( weights=torch.tensor(1.0, device=self.device, dtype=dtype) ) class TestLearnedObjective(BotorchTestCase): def setUp(self, suppress_input_warnings: bool = False) -> None: super().setUp(suppress_input_warnings=suppress_input_warnings) self.x_dim = 2 def _get_pref_model( self, dtype: Optional[torch.dtype] = None, input_transform: Optional[Normalize] = None, ) -> PairwiseGP: train_X = torch.rand((2, self.x_dim), dtype=dtype) train_comps = torch.LongTensor([[0, 1]]) pref_model = PairwiseGP(train_X, train_comps, input_transform=input_transform) return pref_model def test_learned_preference_objective(self) -> None: pref_model = self._get_pref_model(dtype=torch.float64) og_sample_shape = 3 batch_size = 2 n = 8 test_X = torch.rand( torch.Size((og_sample_shape, batch_size, n, self.x_dim)), dtype=torch.float64, ) # test default setting where sampler = # IIDNormalSampler(sample_shape=torch.Size([1])) with self.subTest("default sampler"): pref_obj = LearnedObjective(pref_model=pref_model) first_call_output = pref_obj(test_X) self.assertEqual( first_call_output.shape, torch.Size([og_sample_shape, batch_size, n]) ) # test when sampler has num_samples = 16 with self.subTest("SobolQMCNormalSampler"): num_samples = 16 pref_obj = LearnedObjective( pref_model=pref_model, sampler=SobolQMCNormalSampler(sample_shape=torch.Size([num_samples])), ) self.assertEqual( pref_obj(test_X).shape, torch.Size([num_samples * og_sample_shape, batch_size, n]), ) # test posterior mean with self.subTest("PosteriorMeanModel"): mean_pref_model = PosteriorMeanModel(model=pref_model) pref_obj = LearnedObjective(pref_model=mean_pref_model) self.assertEqual( pref_obj(test_X).shape, torch.Size([og_sample_shape, batch_size, n]) ) # cannot use a deterministic model together with a sampler with self.subTest("deterministic model"), self.assertRaises(AssertionError): LearnedObjective( pref_model=mean_pref_model, sampler=SobolQMCNormalSampler(sample_shape=torch.Size([num_samples])), ) def test_dtype_compatibility_with_PairwiseGP(self) -> None: og_sample_shape = 3 batch_size = 2 n = 8 test_X = torch.rand( torch.Size((og_sample_shape, batch_size, n, self.x_dim)), ) for pref_model_dtype, test_x_dtype, expected_output_dtype in [ (torch.float64, torch.float64, torch.float64), (torch.float32, torch.float32, torch.float32), (torch.float64, torch.float32, torch.float64), ]: with self.subTest( "numerical behavior", pref_model_dtype=pref_model_dtype, test_x_dtype=test_x_dtype, expected_output_dtype=expected_output_dtype, ): # Ignore a single-precision warning in PairwiseGP # and mixed-precision warning tested below with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=InputDataWarning, message=_get_single_precision_warning(str(torch.float32)), ) pref_model = self._get_pref_model( dtype=pref_model_dtype, input_transform=Normalize(d=2), ) pref_obj = LearnedObjective(pref_model=pref_model) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=InputDataWarning, message=LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN, ) first_call_output = pref_obj(test_X.to(dtype=test_x_dtype)) second_call_output = pref_obj(test_X.to(dtype=test_x_dtype)) self.assertEqual(first_call_output.dtype, expected_output_dtype) self.assertTrue(torch.equal(first_call_output, second_call_output)) with self.subTest("mixed precision warning"): # should warn and test should pass with warnings.catch_warnings(): warnings.simplefilter("ignore", category=InputDataWarning) pref_model = self._get_pref_model( dtype=torch.float64, input_transform=Normalize(d=2) ) pref_obj = LearnedObjective(pref_model=pref_model) with self.assertWarnsRegex( InputDataWarning, LEARNED_OBJECTIVE_PREF_MODEL_MIXED_DTYPE_WARN ): first_call_output = pref_obj(test_X)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.acquisition.analytic import ExpectedImprovement from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.acquisition.penalized import ( GaussianPenalty, group_lasso_regularizer, GroupLassoPenalty, L0Approximation, L0PenaltyApprox, L0PenaltyApproxObjective, L1Penalty, L1PenaltyObjective, L2Penalty, PenalizedAcquisitionFunction, PenalizedMCObjective, ) from botorch.exceptions import UnsupportedError from botorch.sampling.normal import IIDNormalSampler from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from torch import Tensor def generic_obj(samples: Tensor, X=None) -> Tensor: return torch.log(torch.sum(samples**2, dim=-1)) class TestL2Penalty(BotorchTestCase): def test_gaussian_penalty(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} init_point = torch.tensor([1.0, 1.0, 1.0], **tkwargs) l2_module = L2Penalty(init_point=init_point) # testing a batch of two points sample_point = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs) diff_norm_squared = ( torch.linalg.norm((sample_point - init_point), ord=2, dim=-1) ** 2 ) real_value = diff_norm_squared.max(dim=-1).values computed_value = l2_module(sample_point) self.assertEqual(computed_value.item(), real_value.item()) class TestL1Penalty(BotorchTestCase): def test_l1_penalty(self): for dtype in (torch.float, torch.double): init_point = torch.tensor([1.0, 1.0, 1.0], device=self.device, dtype=dtype) l1_module = L1Penalty(init_point=init_point) # testing a batch of two points sample_point = torch.tensor( [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], device=self.device, dtype=dtype ) diff_l1_norm = torch.linalg.norm((sample_point - init_point), ord=1, dim=-1) real_value = diff_l1_norm.max(dim=-1).values computed_value = l1_module(sample_point) self.assertEqual(computed_value.item(), real_value.item()) class TestGaussianPenalty(BotorchTestCase): def test_gaussian_penalty(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} init_point = torch.tensor([1.0, 1.0, 1.0], **tkwargs) sigma = 0.1 gaussian_module = GaussianPenalty(init_point=init_point, sigma=sigma) # testing a batch of two points sample_point = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs) diff_norm_squared = ( torch.linalg.norm((sample_point - init_point), ord=2, dim=-1) ** 2 ) max_l2_distance = diff_norm_squared.max(dim=-1).values real_value = torch.exp(max_l2_distance / 2 / sigma**2) computed_value = gaussian_module(sample_point) self.assertEqual(computed_value.item(), real_value.item()) class TestGroupLassoPenalty(BotorchTestCase): def test_group_lasso_penalty(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} init_point = torch.tensor([0.5, 0.5, 0.5], **tkwargs) groups = [[0, 2], [1]] group_lasso_module = GroupLassoPenalty(init_point=init_point, groups=groups) # testing a single point sample_point = torch.tensor([[1.0, 2.0, 3.0]], **tkwargs) real_value = group_lasso_regularizer( sample_point - init_point, groups ) # torch.tensor([5.105551242828369], **tkwargs) computed_value = group_lasso_module(sample_point) self.assertEqual(computed_value.item(), real_value.item()) # testing unsupported input dim: X.shape[-2] > 1 sample_point_2 = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs) with self.assertRaises(NotImplementedError): group_lasso_module(sample_point_2) class TestL0Approximation(BotorchTestCase): def test_L0Approximation(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} target_point = torch.zeros(2, **tkwargs) # test init l0 = L0Approximation(target_point=target_point, **tkwargs) self.assertTrue(torch.equal(l0.target_point, target_point)) self.assertAllClose(l0.a.data, torch.tensor(1.0, **tkwargs)) # verify L0 norm self.assertTrue( torch.equal( l0(torch.zeros(2, **tkwargs)).data, torch.tensor([0], **tkwargs) ) ) # check two-dim input tensors X self.assertTrue( torch.equal( l0(torch.zeros(3, 2, **tkwargs)).data, torch.zeros(3, 1, **tkwargs) ) ) # test raise when X and target_point have mismatched shape with self.assertRaises(ValueError): l0(torch.zeros(3, **tkwargs)) # test init with different a l0 = L0Approximation(target_point=target_point, a=2.0, **tkwargs) self.assertAllClose(l0.a.data, torch.tensor(2.0, **tkwargs)) self.assertAllClose( l0(torch.ones(2, **tkwargs)).data, torch.tensor([0.2350], **tkwargs), rtol=1e-04, ) # reset a l0.a.data.fill_(0.5) self.assertTrue(torch.equal(l0.a.data, torch.tensor(0.5, **tkwargs))) self.assertAllClose( l0(torch.ones(2, **tkwargs)).data, torch.tensor([1.7293], **tkwargs), rtol=1e-04, ) def test_L0PenaltyApproxObjective(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} target_point = torch.zeros(2, **tkwargs) # test init l0_obj = L0PenaltyApproxObjective(target_point=target_point, **tkwargs) self.assertTrue(torch.equal(l0_obj.target_point, target_point)) self.assertAllClose(l0_obj.a.data, torch.tensor(1.0, **tkwargs)) # check two-dim input tensors X self.assertTrue( torch.equal( l0_obj(torch.zeros(3, 2, **tkwargs)).data, torch.zeros(1, 3, **tkwargs), ) ) # check "batch_shape x q x dim" input tensors X batch_shape = 16 self.assertTrue( torch.equal( l0_obj(torch.zeros(batch_shape, 3, 2, **tkwargs)).data, torch.zeros(1, batch_shape, 3, **tkwargs), ) ) def test_L0PenaltyApprox(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} target_point = torch.zeros(2, **tkwargs) # test init l0_acqf = L0PenaltyApprox(target_point=target_point, **tkwargs) self.assertTrue(torch.equal(l0_acqf.target_point, target_point)) self.assertAllClose(l0_acqf.a.data, torch.tensor(1.0, **tkwargs)) # check two-dim input tensors X self.assertTrue( torch.equal( l0_acqf(torch.zeros(3, 2, **tkwargs)).data, torch.tensor(0, **tkwargs), ) ) # check "batch_shape x q x dim" input tensors X batch_shape = 16 self.assertTrue( torch.equal( l0_acqf(torch.zeros(batch_shape, 3, 2, **tkwargs)).data, torch.zeros(batch_shape, **tkwargs), ) ) class TestPenalizedAcquisitionFunction(BotorchTestCase): def test_penalized_acquisition_function(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} mock_model = MockModel( MockPosterior( mean=torch.tensor([[1.0]], **tkwargs), variance=torch.tensor([[1.0]], **tkwargs), ) ) init_point = torch.tensor([0.5, 0.5, 0.5], **tkwargs) groups = [[0, 2], [1]] raw_acqf = ExpectedImprovement(model=mock_model, best_f=1.0) penalty = GroupLassoPenalty(init_point=init_point, groups=groups) lmbda = 0.1 acqf = PenalizedAcquisitionFunction( raw_acqf=raw_acqf, penalty_func=penalty, regularization_parameter=lmbda ) sample_point = torch.tensor([[1.0, 2.0, 3.0]], **tkwargs) raw_value = raw_acqf(sample_point) penalty_value = penalty(sample_point) real_value = raw_value - lmbda * penalty_value computed_value = acqf(sample_point) self.assertTrue(torch.equal(real_value, computed_value)) # testing X_pending for analytic raw_acqfn (EI) X_pending = torch.tensor([0.1, 0.2, 0.3], **tkwargs) with self.assertRaises(UnsupportedError): acqf.set_X_pending(X_pending) # testing X_pending for non-analytic raw_acqfn (EI) sampler = IIDNormalSampler(sample_shape=torch.Size([2])) raw_acqf_2 = qExpectedImprovement( model=mock_model, best_f=0, sampler=sampler ) init_point = torch.tensor([1.0, 1.0, 1.0], **tkwargs) l2_module = L2Penalty(init_point=init_point) acqf_2 = PenalizedAcquisitionFunction( raw_acqf=raw_acqf_2, penalty_func=l2_module, regularization_parameter=lmbda, ) X_pending = torch.tensor([0.1, 0.2, 0.3], **tkwargs) acqf_2.set_X_pending(X_pending) self.assertTrue(torch.equal(acqf_2.X_pending, X_pending)) class TestL1PenaltyObjective(BotorchTestCase): def test_l1_penalty(self): for dtype in (torch.float, torch.double): init_point = torch.tensor([1.0, 1.0, 1.0], device=self.device, dtype=dtype) l1_module = L1PenaltyObjective(init_point=init_point) # testing a batch of two points sample_point = torch.tensor( [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], device=self.device, dtype=dtype ) real_values = torch.linalg.norm( (sample_point - init_point), ord=1, dim=-1 ).unsqueeze(dim=0) computed_values = l1_module(sample_point) self.assertTrue(torch.equal(real_values, computed_values)) class TestPenalizedMCObjective(BotorchTestCase): def test_penalized_mc_objective(self): for dtype in (torch.float, torch.double): init_point = torch.tensor( [0.0, 0.0, 0.0, 0.0, 0.0], device=self.device, dtype=dtype ) l1_penalty_obj = L1PenaltyObjective(init_point=init_point) obj = PenalizedMCObjective( objective=generic_obj, penalty_objective=l1_penalty_obj, regularization_parameter=0.1, ) # test self.expand_dim self.assertIsNone(obj.expand_dim) # test 'd' Tensor X samples = torch.randn(4, 3, device=self.device, dtype=dtype) X = torch.randn(4, 5, device=self.device, dtype=dtype) penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X) self.assertTrue(torch.equal(obj(samples, X), penalized_obj)) # test 'q x d' Tensor X samples = torch.randn(4, 2, 3, device=self.device, dtype=dtype) X = torch.randn(2, 5, device=self.device, dtype=dtype) penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X) self.assertTrue(torch.equal(obj(samples, X), penalized_obj)) # test 'batch-shape x q x d' Tensor X samples = torch.randn(4, 3, 2, 3, device=self.device, dtype=dtype) X = torch.randn(3, 2, 5, device=self.device, dtype=dtype) penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X) self.assertTrue(torch.equal(obj(samples, X), penalized_obj)) # test passing expand_dim expand_dim = -2 obj2 = PenalizedMCObjective( objective=generic_obj, penalty_objective=l1_penalty_obj, regularization_parameter=0.1, expand_dim=expand_dim, ) self.assertEqual(obj2.expand_dim, -2) # test 'd' Tensor X mcmc_samples = 8 # MCMC_dim = -3 samples = torch.randn(mcmc_samples, 4, 3, device=self.device, dtype=dtype) X = torch.randn(4, 5, device=self.device, dtype=dtype) penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X).unsqueeze( expand_dim ) self.assertTrue(torch.equal(obj2(samples, X), penalized_obj)) # test 'q x d' Tensor X # MCMC_dim = -3 samples = torch.randn( 4, mcmc_samples, 2, 3, device=self.device, dtype=dtype ) X = torch.randn(2, 5, device=self.device, dtype=dtype) penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X).unsqueeze( expand_dim ) self.assertTrue(torch.equal(obj2(samples, X), penalized_obj)) # test 'batch-shape x q x d' Tensor X # MCMC_dim = -3 samples = torch.randn( 4, 3, mcmc_samples, 2, 3, device=self.device, dtype=dtype ) X = torch.randn(3, 2, 5, device=self.device, dtype=dtype) penalized_obj = generic_obj(samples) - 0.1 * l1_penalty_obj(X).unsqueeze( expand_dim ) self.assertTrue(torch.equal(obj2(samples, X), penalized_obj))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from contextlib import ExitStack from unittest import mock import torch from botorch.acquisition.analytic import PosteriorMean, ScalarizedPosteriorMean from botorch.acquisition.cost_aware import GenericCostAwareUtility from botorch.acquisition.knowledge_gradient import ( _get_value_function, _split_fantasy_points, ProjectedAcquisitionFunction, qKnowledgeGradient, qMultiFidelityKnowledgeGradient, ) from botorch.acquisition.monte_carlo import qExpectedImprovement, qSimpleRegret from botorch.acquisition.objective import ( GenericMCObjective, ScalarizedPosteriorTransform, ) from botorch.acquisition.utils import project_to_sample_points from botorch.exceptions.errors import UnsupportedError from botorch.generation.gen import gen_candidates_scipy from botorch.models import SingleTaskGP from botorch.optim.optimize import optimize_acqf from botorch.optim.utils import _filter_kwargs from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultitaskMultivariateNormal from .test_monte_carlo import DummyNonScalarizingPosteriorTransform NO = "botorch.utils.testing.MockModel.num_outputs" def mock_util(X, deltas): return 0.5 * deltas.sum(dim=0) class TestQKnowledgeGradient(BotorchTestCase): def test_initialize_q_knowledge_gradient(self): for dtype in (torch.float, torch.double): mean = torch.zeros(1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean)) # test error when neither specifying neither sampler nor num_fantasies with self.assertRaises(ValueError): qKnowledgeGradient(model=mm, num_fantasies=None) # test error when sampler and num_fantasies arg are inconsistent sampler = IIDNormalSampler(sample_shape=torch.Size([16])) with self.assertRaises(ValueError): qKnowledgeGradient(model=mm, num_fantasies=32, sampler=sampler) # test default construction qKG = qKnowledgeGradient(model=mm, num_fantasies=32) self.assertEqual(qKG.num_fantasies, 32) self.assertIsInstance(qKG.sampler, SobolQMCNormalSampler) self.assertEqual(qKG.sampler.sample_shape, torch.Size([32])) self.assertIsNone(qKG.objective) self.assertIsNone(qKG.inner_sampler) self.assertIsNone(qKG.X_pending) self.assertIsNone(qKG.current_value) self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 32 + 3) # test custom construction obj = GenericMCObjective(lambda Y, X: Y.mean(dim=-1)) sampler = IIDNormalSampler(sample_shape=torch.Size([16])) X_pending = torch.zeros(2, 2, device=self.device, dtype=dtype) qKG = qKnowledgeGradient( model=mm, num_fantasies=16, sampler=sampler, objective=obj, X_pending=X_pending, ) self.assertEqual(qKG.num_fantasies, 16) self.assertEqual(qKG.sampler, sampler) self.assertEqual(qKG.sampler.sample_shape, torch.Size([16])) self.assertEqual(qKG.objective, obj) self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler) self.assertEqual(qKG.inner_sampler.sample_shape, torch.Size([128])) self.assertTrue(torch.equal(qKG.X_pending, X_pending)) self.assertIsNone(qKG.current_value) self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 16 + 3) # test assignment of num_fantasies from sampler if not provided qKG = qKnowledgeGradient(model=mm, num_fantasies=None, sampler=sampler) self.assertEqual(qKG.sampler.sample_shape, torch.Size([16])) # test custom construction with inner sampler and current value inner_sampler = SobolQMCNormalSampler(sample_shape=torch.Size([256])) current_value = torch.zeros(1, device=self.device, dtype=dtype) qKG = qKnowledgeGradient( model=mm, num_fantasies=8, objective=obj, inner_sampler=inner_sampler, current_value=current_value, ) self.assertEqual(qKG.num_fantasies, 8) self.assertEqual(qKG.sampler.sample_shape, torch.Size([8])) self.assertEqual(qKG.objective, obj) self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler) self.assertEqual(qKG.inner_sampler, inner_sampler) self.assertIsNone(qKG.X_pending) self.assertTrue(torch.equal(qKG.current_value, current_value)) self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 8 + 3) # test construction with posterior_transform qKG_s = qKnowledgeGradient( model=mm, num_fantasies=16, sampler=sampler, posterior_transform=ScalarizedPosteriorTransform(weights=torch.rand(2)), ) self.assertIsNone(qKG_s.inner_sampler) self.assertIsInstance( qKG_s.posterior_transform, ScalarizedPosteriorTransform ) # test error if multi-output model and no objective or posterior transform mean2 = torch.zeros(1, 2, device=self.device, dtype=dtype) mm2 = MockModel(MockPosterior(mean=mean2)) with self.assertRaises(UnsupportedError): qKnowledgeGradient(model=mm2) # test error if multi-output model and no objective and posterior transform # does not scalarize with self.assertRaises(UnsupportedError): qKnowledgeGradient( model=mm2, posterior_transform=DummyNonScalarizingPosteriorTransform(), ) with self.assertRaisesRegex( UnsupportedError, "Objectives that are not an `MCAcquisitionObjective` are not " "supported.", ): qKnowledgeGradient(model=mm, objective="car") def test_evaluate_q_knowledge_gradient(self): # Stop gap measure to avoid test failures on Ampere devices # TODO: Find an elegant way of disallowing tf32 for botorch/gpytorch # without blanket-disallowing it for all of torch. torch.backends.cuda.matmul.allow_tf32 = False for dtype in (torch.float, torch.double): # basic test n_f = 4 mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qKG = qKnowledgeGradient(model=mm, num_fantasies=n_f) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) val = qKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) self.assertAllClose(val, mean.mean(), atol=1e-4) self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :])) # batched evaluation b = 2 mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qKG = qKnowledgeGradient(model=mm, num_fantasies=n_f) val = qKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1])) self.assertTrue( torch.allclose(val, mean.mean(dim=0).squeeze(-1), atol=1e-4) ) self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :])) # pending points and current value X_pending = torch.rand(2, 1, device=self.device, dtype=dtype) mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) current_value = torch.rand(1, device=self.device, dtype=dtype) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qKG = qKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=X_pending, current_value=current_value, ) val = qKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1])) expected = (mean.mean() - current_value).reshape([]) self.assertAllClose(val, expected, atol=1e-4) self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :])) # test objective (inner MC sampling) objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1)) samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(samples=samples)) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qKG = qKnowledgeGradient( model=mm, num_fantasies=n_f, objective=objective ) val = qKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) self.assertAllClose(val, objective(samples).mean(), atol=1e-4) self.assertTrue(torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :])) # test scalarized posterior transform weights = torch.rand(2, device=self.device, dtype=dtype) post_tf = ScalarizedPosteriorTransform(weights=weights) mean = torch.tensor([1.0, 0.5], device=self.device, dtype=dtype).expand( n_f, 1, 2 ) cov = torch.tensor( [[1.0, 0.1], [0.1, 0.5]], device=self.device, dtype=dtype ).expand(n_f, 2, 2) posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov)) mfm = MockModel(posterior) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(None) qKG = qKnowledgeGradient( model=mm, num_fantasies=n_f, posterior_transform=post_tf ) val = qKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_expected = (mean * weights).sum(-1).mean(0)[0] self.assertAllClose(val, val_expected) def test_evaluate_kg(self): # a thorough test using real model and dtype double d = 2 dtype = torch.double bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype).repeat(1, d) train_X = torch.rand(3, d, device=self.device, dtype=dtype) train_Y = torch.rand(3, 1, device=self.device, dtype=dtype) model = SingleTaskGP(train_X, train_Y) qKG = qKnowledgeGradient( model=model, num_fantasies=2, objective=None, X_pending=torch.rand(2, d, device=self.device, dtype=dtype), current_value=torch.rand(1, device=self.device, dtype=dtype), ) X = torch.rand(4, 3, d, device=self.device, dtype=dtype) options = {"num_inner_restarts": 2, "raw_inner_samples": 3} val = qKG.evaluate( X, bounds=bounds, num_restarts=2, raw_samples=3, options=options ) # verify output shape self.assertEqual(val.size(), torch.Size([4])) # verify dtype self.assertEqual(val.dtype, dtype) # test i) no dimension is squeezed out, ii) dtype float, iii) MC objective, # and iv) t_batch_mode_transform dtype = torch.float bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype) train_X = torch.rand(1, 1, device=self.device, dtype=dtype) train_Y = torch.rand(1, 1, device=self.device, dtype=dtype) model = SingleTaskGP(train_X, train_Y) qKG = qKnowledgeGradient( model=model, num_fantasies=1, objective=GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1)), ) X = torch.rand(1, 1, device=self.device, dtype=dtype) options = {"num_inner_restarts": 1, "raw_inner_samples": 1} val = qKG.evaluate( X, bounds=bounds, num_restarts=1, raw_samples=1, options=options ) # verify output shape self.assertEqual(val.size(), torch.Size([1])) # verify dtype self.assertEqual(val.dtype, dtype) class TestQMultiFidelityKnowledgeGradient(BotorchTestCase): def test_initialize_qMFKG(self): for dtype in (torch.float, torch.double): mean = torch.zeros(1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean)) # test error when not specifying current_value with self.assertRaises(UnsupportedError): qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=None, cost_aware_utility=mock.Mock() ) # test default construction mock_cau = mock.Mock() current_value = torch.zeros(1, device=self.device, dtype=dtype) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=32, current_value=current_value, cost_aware_utility=mock_cau, ) self.assertEqual(qMFKG.num_fantasies, 32) self.assertIsInstance(qMFKG.sampler, SobolQMCNormalSampler) self.assertEqual(qMFKG.sampler.sample_shape, torch.Size([32])) self.assertIsNone(qMFKG.objective) self.assertIsNone(qMFKG.inner_sampler) self.assertIsNone(qMFKG.X_pending) self.assertEqual(qMFKG.get_augmented_q_batch_size(q=3), 32 + 3) self.assertEqual(qMFKG.cost_aware_utility, mock_cau) self.assertTrue(torch.equal(qMFKG.current_value, current_value)) self.assertIsNone(qMFKG._cost_sampler) X = torch.rand(2, 3, device=self.device, dtype=dtype) self.assertTrue(torch.equal(qMFKG.project(X), X)) self.assertTrue(torch.equal(qMFKG.expand(X), X)) self.assertIsNone(qMFKG.valfunc_cls) self.assertIsNone(qMFKG.valfunc_argfac) # make sure cost sampling logic works self.assertIsInstance(qMFKG.cost_sampler, SobolQMCNormalSampler) self.assertEqual(qMFKG.cost_sampler.sample_shape, torch.Size([32])) def test_evaluate_qMFKG(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} # basic test n_f = 4 current_value = torch.rand(1, **tkwargs) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, 1, 1, **tkwargs) variance = torch.rand(n_f, 1, 1, **tkwargs) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) X = torch.rand(n_f + 1, 1, **tkwargs) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertAllClose(val, val_exp, atol=1e-4) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # batched evaluation b = 2 current_value = torch.rand(b, **tkwargs) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, b, 1, **tkwargs) variance = torch.rand(n_f, b, 1, **tkwargs) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) X = torch.rand(b, n_f + 1, 1, **tkwargs) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertAllClose(val, val_exp, atol=1e-4) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # pending points and current value mean = torch.rand(n_f, 1, 1, **tkwargs) variance = torch.rand(n_f, 1, 1, **tkwargs) X_pending = torch.rand(2, 1, **tkwargs) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) current_value = torch.rand(1, **tkwargs) X = torch.rand(n_f + 1, 1, **tkwargs) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=X_pending, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertAllClose(val, val_exp, atol=1e-4) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test objective (inner MC sampling) objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1)) samples = torch.randn(3, 1, 1, **tkwargs) mfm = MockModel(MockPosterior(samples=samples)) X = torch.rand(n_f + 1, 1, **tkwargs) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, objective=objective, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, objective(samples) - current_value).mean(dim=0) self.assertAllClose(val, val_exp, atol=1e-4) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test valfunc_cls and valfunc_argfac d, p, d_prime = 4, 3, 2 samples = torch.ones(3, 1, 1, **tkwargs) mean = torch.tensor([[0.25], [0.5], [0.75]], **tkwargs).expand( n_f, 1, -1, -1 ) weights = torch.tensor([0.5, 1.0, 1.0], **tkwargs) mfm = MockModel(MockPosterior(mean=mean, samples=samples)) X = torch.rand(n_f * d + d, d, **tkwargs) sample_points = torch.rand(p, d_prime, **tkwargs) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, project=lambda X: project_to_sample_points(X, sample_points), valfunc_cls=ScalarizedPosteriorMean, valfunc_argfac=lambda model: {"weights": weights}, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4])) val_exp = torch.tensor([1.375], **tkwargs) self.assertAllClose(val, val_exp, atol=1e-4) patch_f.reset_mock() # Make posterior sample shape agree with X mfm._posterior._samples = torch.ones(1, 3, 1, **tkwargs) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, project=lambda X: project_to_sample_points(X, sample_points), valfunc_cls=qExpectedImprovement, valfunc_argfac=lambda model: {"best_f": 0.0}, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4])) val_exp = torch.tensor(1.0, device=self.device, dtype=dtype) self.assertAllClose(val, val_exp, atol=1e-4) def test_fixed_evaluation_qMFKG(self): # mock test qMFKG.evaluate() with expand, project & cost aware utility for dtype in (torch.float, torch.double): mean = torch.zeros(1, 1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean)) cau = GenericCostAwareUtility(mock_util) n_f = 4 mean = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with ExitStack() as es: patch_f = es.enter_context( mock.patch.object(MockModel, "fantasize", return_value=mfm) ) mock_num_outputs = es.enter_context( mock.patch(NO, new_callable=mock.PropertyMock) ) es.enter_context( mock.patch( "botorch.optim.optimize.optimize_acqf", return_value=( torch.ones(1, 1, 1, device=self.device, dtype=dtype), torch.ones(1, device=self.device, dtype=dtype), ), ), ) es.enter_context( mock.patch( "botorch.generation.gen.gen_candidates_scipy", return_value=( torch.ones(1, 1, 1, device=self.device, dtype=dtype), torch.ones(1, device=self.device, dtype=dtype), ), ), ) mock_num_outputs.return_value = 1 qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=torch.rand(1, 1, 1, device=self.device, dtype=dtype), current_value=torch.zeros(1, device=self.device, dtype=dtype), cost_aware_utility=cau, project=lambda X: torch.zeros_like(X), expand=lambda X: torch.ones_like(X), ) val = qMFKG.evaluate( X=torch.zeros(1, 1, 1, device=self.device, dtype=dtype), bounds=torch.tensor( [[0.0], [1.0]], device=self.device, dtype=dtype ), num_restarts=1, raw_samples=1, ) patch_f.asset_called_once() cargs, ckwargs = patch_f.call_args self.assertTrue( torch.equal( ckwargs["X"], torch.ones(1, 2, 1, device=self.device, dtype=dtype), ) ) self.assertEqual( val, cau(None, torch.ones(1, device=self.device, dtype=dtype)) ) # test with defaults - should see no errors qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, ) qMFKG.evaluate( X=torch.zeros(1, 1, 1, device=self.device, dtype=dtype), bounds=torch.tensor( [[0.0], [1.0]], device=self.device, dtype=dtype ), num_restarts=1, raw_samples=1, ) def test_optimize_w_posterior_transform(self): # This is mainly testing that we can optimize without errors. for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} mean = torch.tensor([1.0, 0.5], **tkwargs).expand(2, 1, 2) cov = torch.tensor([[1.0, 0.1], [0.1, 0.5]], **tkwargs).expand(2, 2, 2) posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov)) model = MockModel(posterior) n_f = 4 mean = torch.tensor([1.0, 0.5], **tkwargs).expand(n_f, 2, 1, 2) cov = torch.tensor([[1.0, 0.1], [0.1, 0.5]], **tkwargs).expand(n_f, 2, 2, 2) posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov)) mfm = MockModel(posterior) bounds = torch.zeros(2, 2, **tkwargs) bounds[1] = 1 options = {"num_inner_restarts": 2, "raw_inner_samples": 2} with mock.patch.object(MockModel, "fantasize", return_value=mfm): kg = qMultiFidelityKnowledgeGradient( model=model, num_fantasies=n_f, posterior_transform=ScalarizedPosteriorTransform( weights=torch.rand(2, **tkwargs) ), ) # Mocking this to get around grad issues. with mock.patch( f"{optimize_acqf.__module__}.gen_candidates_scipy", return_value=( torch.zeros(2, n_f + 1, 2, **tkwargs), torch.zeros(2, **tkwargs), ), ), mock.patch( f"{optimize_acqf.__module__}._filter_kwargs", wraps=lambda f, **kwargs: _filter_kwargs( function=gen_candidates_scipy, **kwargs ), ): candidate, value = optimize_acqf( acq_function=kg, bounds=bounds, q=1, num_restarts=2, raw_samples=2, options=options, ) self.assertTrue(torch.equal(candidate, torch.zeros(1, 2, **tkwargs))) class TestKGUtils(BotorchTestCase): def test_get_value_function(self): with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) # test PosteriorMean vf = _get_value_function(mm) # test initialization self.assertIn("model", vf._modules) self.assertEqual(vf._modules["model"], mm) self.assertIsInstance(vf, PosteriorMean) self.assertIsNone(vf.posterior_transform) # test SimpleRegret obj = GenericMCObjective(lambda Y, X: Y.sum(dim=-1)) sampler = IIDNormalSampler(sample_shape=torch.Size([2])) vf = _get_value_function(model=mm, objective=obj, sampler=sampler) self.assertIsInstance(vf, qSimpleRegret) self.assertEqual(vf.objective, obj) self.assertEqual(vf.sampler, sampler) # test with project mock_project = mock.Mock( return_value=torch.ones(1, 1, 1, device=self.device) ) vf = _get_value_function( model=mm, objective=obj, sampler=sampler, project=mock_project, ) self.assertIsInstance(vf, ProjectedAcquisitionFunction) self.assertEqual(vf.objective, obj) self.assertEqual(vf.sampler, sampler) self.assertEqual(vf.project, mock_project) test_X = torch.rand(1, 1, 1, device=self.device) with mock.patch.object( vf, "base_value_function", __class__=torch.nn.Module, return_value=None ) as patch_bvf: vf(test_X) mock_project.assert_called_once_with(test_X) patch_bvf.assert_called_once_with( torch.ones(1, 1, 1, device=self.device) ) def test_split_fantasy_points(self): for dtype in (torch.float, torch.double): X = torch.randn(5, 3, device=self.device, dtype=dtype) # test error when passing inconsistent n_f with self.assertRaises(ValueError): _split_fantasy_points(X, n_f=6) # basic test X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=2) self.assertEqual(X_actual.shape, torch.Size([3, 3])) self.assertEqual(X_fantasies.shape, torch.Size([2, 1, 3])) self.assertTrue(torch.equal(X_actual, X[:3, :])) self.assertTrue(torch.equal(X_fantasies, X[3:, :].unsqueeze(-2))) # batched test X = torch.randn(2, 5, 3, device=self.device, dtype=dtype) X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=2) self.assertEqual(X_actual.shape, torch.Size([2, 3, 3])) self.assertEqual(X_fantasies.shape, torch.Size([2, 2, 1, 3])) self.assertTrue(torch.equal(X_actual, X[..., :3, :])) X_fantasies_exp = X[..., 3:, :].unsqueeze(-2).permute(1, 0, 2, 3) self.assertTrue(torch.equal(X_fantasies, X_fantasies_exp))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.acquisition.analytic import ExpectedImprovement from botorch.acquisition.fixed_feature import ( FixedFeatureAcquisitionFunction, get_device_of_sequence, get_dtype_of_sequence, ) from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.models import SingleTaskGP from botorch.utils.testing import BotorchTestCase, MockAcquisitionFunction class TestFixedFeatureAcquisitionFunction(BotorchTestCase): def test_fixed_features(self) -> None: train_X = torch.rand(5, 3, device=self.device) train_Y = train_X.norm(dim=-1, keepdim=True) model = SingleTaskGP(train_X, train_Y).to(device=self.device).eval() for q in [1, 2]: qEI = qExpectedImprovement(model, best_f=0.0) # test single point test_X = torch.rand(q, 3, device=self.device) qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[2], values=test_X[..., -1:] ) qei = qEI(test_X) qei_ff = qEI_ff(test_X[..., :-1]) self.assertAllClose(qei, qei_ff) # test list input with float and scalar tensor for value in [0.5, torch.tensor(0.5)]: qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[2], values=[value] ) qei_ff = qEI_ff(test_X[..., :-1]) test_X_clone = test_X.clone() test_X_clone[..., 2] = value qei = qEI(test_X_clone) self.assertAllClose(qei, qei_ff) # test list input with Tensor and float qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[0, 2], values=[test_X[..., [0]], value] ) qei_ff = qEI_ff(test_X[..., [1]]) self.assertAllClose(qei, qei_ff) # test t-batch with broadcasting and list of floats test_X = torch.rand(q, 3, device=self.device).expand(4, q, 3) qei = qEI(test_X) qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[2], values=test_X[0, :, -1:] ) qei_ff = qEI_ff(test_X[..., :-1]) self.assertAllClose(qei, qei_ff) # test t-batch with broadcasting and list of floats and Tensor # test list input with float and scalar tensor for value in [0.5, torch.tensor(0.5)]: qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[0, 2], values=[test_X[0, :, [0]], value] ) qei_ff = qEI_ff(test_X[..., [1]]) test_X_clone = test_X.clone() test_X_clone[..., 2] = value qei = qEI(test_X_clone) self.assertAllClose(qei, qei_ff) # test X_pending X_pending = torch.rand(2, 3, device=self.device) qEI.set_X_pending(X_pending) qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[2], values=test_X[..., -1:] ) self.assertAllClose(qEI.X_pending, qEI_ff.X_pending) # test setting X_pending from qEI_ff # (set target value to be last dim of X_pending and check if the # constructed X_pending on qEI is the full X_pending) X_pending = torch.rand(2, 3, device=self.device) qEI.X_pending = None qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[2], values=X_pending[..., -1:] ) qEI_ff.set_X_pending(X_pending[..., :-1]) self.assertAllClose(qEI.X_pending, X_pending) # test setting to None qEI_ff.X_pending = None self.assertIsNone(qEI_ff.X_pending) # test gradient test_X = torch.rand(1, 3, device=self.device, requires_grad=True) qei = qEI(test_X) qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[2], values=test_X[..., [2]].detach() ) test_X_ff = test_X[..., :-1].detach().clone().requires_grad_(True) qei_ff = qEI_ff(test_X_ff) self.assertAllClose(qei, qei_ff) qei.backward() qei_ff.backward() self.assertAllClose(test_X.grad[..., :-1], test_X_ff.grad) # test list input with float and scalar tensor for value in [0.5, torch.tensor(0.5)]: # computing with fixed features test_X_ff = test_X[..., [1]].detach().clone().requires_grad_(True) qEI_ff = FixedFeatureAcquisitionFunction( qEI, d=3, columns=[0, 2], values=[test_X[..., [0]].detach(), value] ) qei_ff = qEI_ff(test_X_ff) qei_ff.backward() # computing ground truth test_X_clone = test_X.detach().clone() test_X_clone[..., 2] = value test_X_clone.requires_grad_(True) qei = qEI(test_X_clone) qei.backward() self.assertAllClose(test_X_clone.grad[..., [1]], test_X_ff.grad) # test error b/c of incompatible input shapes with self.assertRaises(ValueError): qEI_ff(test_X) # test error when there is no X_pending (analytic EI) test_X = torch.rand(q, 3, device=self.device) analytic_EI = ExpectedImprovement(model, best_f=0.0) EI_ff = FixedFeatureAcquisitionFunction( analytic_EI, d=3, columns=[2], values=test_X[..., -1:] ) with self.assertRaises(ValueError): EI_ff.X_pending def test_values_dtypes(self) -> None: acqf = MockAcquisitionFunction() for input, d, expected_dtype in [ (torch.tensor([0.0], dtype=torch.float32), 1, torch.float32), (torch.tensor([0.0], dtype=torch.float64), 1, torch.float64), ( [ torch.tensor([0.0], dtype=torch.float32), torch.tensor([0.0], dtype=torch.float64), ], 2, torch.float64, ), ([0.0], 1, torch.float64), ([torch.tensor(0.0, dtype=torch.float32), 0.0], 2, torch.float64), ]: with self.subTest(input=input, d=d, expected_dtype=expected_dtype): self.assertEqual(get_dtype_of_sequence(input), expected_dtype) ff = FixedFeatureAcquisitionFunction( acqf, d=d, columns=[2], values=input ) self.assertEqual(ff.values.dtype, expected_dtype) def test_values_devices(self) -> None: acqf = MockAcquisitionFunction() cpu = torch.device("cpu") cuda = torch.device("cuda") test_cases = [ (torch.tensor([0.0], device=cpu), 1, cpu), ([0.0], 1, cpu), ([0.0, torch.tensor([0.0], device=cpu)], 2, cpu), ] # Can only properly test this when running CUDA tests if self.device == torch.cuda: test_cases = test_cases + [ (torch.tensor([0.0], device=cuda), 1, cuda), ( [ torch.tensor([0.0], dtype=cpu), torch.tensor([0.0], dtype=cuda), ], 2, cuda, ), ([0.0], 1, cpu), ([torch.tensor(0.0, dtype=cuda), 0.0], 2, cuda), ] for input, d, expected_device in test_cases: with self.subTest(input=input, d=d, expected_device=expected_device): self.assertEqual(get_device_of_sequence(input), expected_device) ff = FixedFeatureAcquisitionFunction( acqf, d=d, columns=[2], values=input ) self.assertEqual(ff.values.device, expected_device)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from unittest import mock import torch from botorch import settings from botorch.acquisition.cached_cholesky import CachedCholeskyMCAcquisitionFunction from botorch.acquisition.monte_carlo import MCAcquisitionFunction from botorch.acquisition.objective import GenericMCObjective from botorch.exceptions.warnings import BotorchWarning from botorch.models import SingleTaskGP from botorch.models.deterministic import GenericDeterministicModel from botorch.models.higher_order_gp import HigherOrderGP from botorch.models.model import ModelList from botorch.models.transforms.outcome import Log from botorch.sampling.normal import IIDNormalSampler from botorch.utils.low_rank import extract_batch_covar from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from linear_operator.utils.errors import NanError, NotPSDError CHOLESKY_PATH = "linear_operator.operators._linear_operator.psd_safe_cholesky" EXTRACT_BATCH_COVAR_PATH = "botorch.acquisition.cached_cholesky.extract_batch_covar" class DummyCachedCholeskyAcqf( MCAcquisitionFunction, CachedCholeskyMCAcquisitionFunction ): def forward(self, X): return X class TestCachedCholeskyMCAcquisitionFunction(BotorchTestCase): def test_setup(self): mean = torch.zeros(1, 1) variance = torch.ones(1, 1) mm = MockModel(MockPosterior(mean=mean, variance=variance)) # basic test w/ invalid model. sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler) acqf._setup(model=mm) self.assertFalse(acqf._cache_root) with self.assertWarnsRegex(RuntimeWarning, "cache_root"): acqf._setup(model=mm, cache_root=True) self.assertFalse(acqf._cache_root) # Unsupported outcome transform. stgp = SingleTaskGP( torch.zeros(1, 1), torch.zeros(1, 1), outcome_transform=Log() ) with self.assertWarnsRegex(RuntimeWarning, "cache_root"): acqf._setup(model=stgp, cache_root=True) self.assertFalse(acqf._cache_root) # ModelList is not supported. model_list = ModelList(SingleTaskGP(torch.zeros(1, 1), torch.zeros(1, 1))) with self.assertWarnsRegex(RuntimeWarning, "cache_root"): acqf._setup(model=model_list, cache_root=True) self.assertFalse(acqf._cache_root) # basic test w/ supported model. stgp = SingleTaskGP(torch.zeros(1, 1), torch.zeros(1, 1)) acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler) acqf._setup(model=stgp, cache_root=True) self.assertTrue(acqf._cache_root) # test the base_samples are set to None self.assertIsNone(acqf.sampler.base_samples) # test model that uses matheron's rule and sampler.batch_range != (0, -1) hogp = HigherOrderGP(torch.zeros(1, 1), torch.zeros(1, 1, 1)).eval() acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler) with self.assertWarnsRegex(RuntimeWarning, "cache_root"): acqf._setup(model=hogp, cache_root=True) # test deterministic model model = GenericDeterministicModel(f=lambda X: X) acqf = DummyCachedCholeskyAcqf(model=model, sampler=sampler) acqf._setup(model=model, cache_root=True) self.assertFalse(acqf._cache_root) def test_cache_root_decomposition(self): tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype # test mt-mvn train_x = torch.rand(2, 1, **tkwargs) train_y = torch.rand(2, 2, **tkwargs) test_x = torch.rand(2, 1, **tkwargs) model = SingleTaskGP(train_x, train_y) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) with torch.no_grad(): posterior = model.posterior(test_x) acqf = DummyCachedCholeskyAcqf( model=model, sampler=sampler, objective=GenericMCObjective(lambda Y: Y[..., 0]), ) baseline_L = torch.eye(2, **tkwargs) with mock.patch( EXTRACT_BATCH_COVAR_PATH, wraps=extract_batch_covar ) as mock_extract_batch_covar: with mock.patch( CHOLESKY_PATH, return_value=baseline_L ) as mock_cholesky: baseline_L_acqf = acqf._compute_root_decomposition( posterior=posterior ) mock_extract_batch_covar.assert_called_once_with( posterior.distribution ) mock_cholesky.assert_called_once() # test mvn model = SingleTaskGP(train_x, train_y[:, :1]) with torch.no_grad(): posterior = model.posterior(test_x) with mock.patch(EXTRACT_BATCH_COVAR_PATH) as mock_extract_batch_covar: with mock.patch( CHOLESKY_PATH, return_value=baseline_L ) as mock_cholesky: baseline_L_acqf = acqf._compute_root_decomposition( posterior=posterior ) mock_extract_batch_covar.assert_not_called() mock_cholesky.assert_called_once() self.assertTrue(torch.equal(baseline_L_acqf, baseline_L)) def test_get_f_X_samples(self): tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype mean = torch.zeros(5, 1, **tkwargs) variance = torch.ones(5, 1, **tkwargs) mm = MockModel( MockPosterior( mean=mean, variance=variance, samples=torch.rand(5, 1, **tkwargs) ) ) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler) with self.assertWarnsRegex(RuntimeWarning, "cache_root"): acqf._setup(model=mm, cache_root=True) self.assertFalse(acqf._cache_root) acqf._cache_root = True q = 3 baseline_L = torch.eye(5 - q, **tkwargs) acqf._baseline_L = baseline_L posterior = mm.posterior(torch.rand(5, 1, **tkwargs)) # basic test rv = torch.rand(1, 5, 1, **tkwargs) with mock.patch( "botorch.acquisition.cached_cholesky.sample_cached_cholesky", return_value=rv, ) as mock_sample_cached_cholesky: samples = acqf._get_f_X_samples(posterior=posterior, q_in=q) mock_sample_cached_cholesky.assert_called_once_with( posterior=posterior, baseline_L=acqf._baseline_L, q=q, base_samples=acqf.sampler.base_samples, sample_shape=acqf.sampler.sample_shape, ) self.assertTrue(torch.equal(rv, samples)) # test fall back when sampling from cached cholesky fails for error_cls in (NanError, NotPSDError): base_samples = torch.rand(1, 5, 1, **tkwargs) acqf.sampler.base_samples = base_samples acqf._baseline_L = baseline_L with mock.patch( "botorch.acquisition.cached_cholesky.sample_cached_cholesky", side_effect=error_cls, ) as mock_sample_cached_cholesky: with warnings.catch_warnings(record=True) as ws, settings.debug( True ): samples = acqf._get_f_X_samples(posterior=posterior, q_in=q) mock_sample_cached_cholesky.assert_called_once_with( posterior=posterior, baseline_L=acqf._baseline_L, q=q, base_samples=base_samples, sample_shape=acqf.sampler.sample_shape, ) self.assertTrue(issubclass(ws[0].category, BotorchWarning)) self.assertTrue(samples.shape, torch.Size([1, q, 1])) # test HOGP hogp = HigherOrderGP(torch.zeros(2, 1), torch.zeros(2, 1, 1)).eval() acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler) acqf._setup(model=hogp, cache_root=True) mock_samples = torch.rand(5, 1, 1, **tkwargs) posterior = MockPosterior( mean=mean, variance=variance, samples=mock_samples ) samples = acqf._get_f_X_samples(posterior=posterior, q_in=q) self.assertTrue(torch.equal(samples, mock_samples[2:].unsqueeze(0)))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.preference import ( AnalyticExpectedUtilityOfBestOption, PairwiseBayesianActiveLearningByDisagreement, ) from botorch.exceptions.errors import UnsupportedError from botorch.models import SingleTaskGP from botorch.models.deterministic import FixedSingleSampleModel from botorch.models.pairwise_gp import PairwiseGP from botorch.utils.testing import BotorchTestCase class TestPreferenceAcquisitionFunctions(BotorchTestCase): def setUp(self) -> None: super().setUp() self.twargs = {"dtype": torch.double} self.X_dim = 3 self.Y_dim = 2 X = torch.rand(2, self.X_dim, **self.twargs) Y = torch.rand(2, self.Y_dim, **self.twargs) comps = torch.tensor([[1, 0]], dtype=torch.long) self.model = SingleTaskGP(X, Y) self.pref_model_on_X = PairwiseGP(X, comps) self.pref_model_on_Y = PairwiseGP(Y, comps) self.deterministic_model = FixedSingleSampleModel(model=self.model) def pairwise_preference_acqf_test( self, acqf_class: AcquisitionFunction, test_previous_winner: bool ): for outcome_model in [self.deterministic_model, None]: pref_model = ( self.pref_model_on_X if outcome_model is None else self.pref_model_on_Y ) # Test with an outcome model and a preference model acqf = acqf_class(pref_model=pref_model, outcome_model=outcome_model) # test forward with different number of points X1 = torch.rand(1, self.X_dim, **self.twargs) X2 = torch.rand(2, self.X_dim, **self.twargs) X3 = torch.rand(3, self.X_dim, **self.twargs) # q = 1 with self.assertRaises((UnsupportedError, AssertionError)): acqf(X1) # q = 2 acqf(X2) # q > 2 with self.assertRaises((UnsupportedError, AssertionError)): acqf(X3) if test_previous_winner: previous_winner = ( torch.rand(1, self.X_dim, **self.twargs) if outcome_model is None else torch.rand(1, self.Y_dim, **self.twargs) ) acqf = acqf_class( pref_model=pref_model, outcome_model=outcome_model, previous_winner=previous_winner, ) # q = 1 acqf(X1) # q = 2 with self.assertRaises((UnsupportedError, AssertionError)): acqf(X2) # q > 2 with self.assertRaises((UnsupportedError, AssertionError)): acqf(X3) def test_analytic_eubo(self): self.pairwise_preference_acqf_test( acqf_class=AnalyticExpectedUtilityOfBestOption, test_previous_winner=True, ) def test_analytic_bald(self): self.pairwise_preference_acqf_test( acqf_class=PairwiseBayesianActiveLearningByDisagreement, test_previous_winner=False, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import product import torch from botorch.acquisition.joint_entropy_search import qJointEntropySearch from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.outcome import Standardize from botorch.sampling.normal import SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase def get_model(train_X, train_Y, use_model_list, standardize_model): num_objectives = train_Y.shape[-1] if standardize_model: if use_model_list: outcome_transform = Standardize(m=1) else: outcome_transform = Standardize(m=num_objectives) else: outcome_transform = None if use_model_list: model = ModelListGP( *[ SingleTaskGP( train_X=train_X, train_Y=train_Y[:, i : i + 1], outcome_transform=outcome_transform, ) for i in range(num_objectives) ] ) else: model = SingleTaskGP( train_X=train_X, train_Y=train_Y, outcome_transform=outcome_transform, ) return model class TestQJointEntropySearch(BotorchTestCase): def test_joint_entropy_search(self): torch.manual_seed(1) tkwargs = {"device": self.device} estimation_types = ("LB", "MC") num_objectives = 1 for ( dtype, estimation_type, use_model_list, standardize_model, maximize, condition_noiseless, ) in product( (torch.float, torch.double), estimation_types, (False, True), (False, True), (False, True), (False, True), ): tkwargs["dtype"] = dtype input_dim = 2 train_X = torch.rand(4, input_dim, **tkwargs) train_Y = torch.rand(4, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) num_samples = 20 optimal_inputs = torch.rand(num_samples, input_dim, **tkwargs) optimal_outputs = torch.rand(num_samples, num_objectives, **tkwargs) # test acquisition X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)] for i in range(len(X_pending_list)): X_pending = X_pending_list[i] acq = qJointEntropySearch( model=model, optimal_inputs=optimal_inputs, optimal_outputs=optimal_outputs, estimation_type=estimation_type, num_samples=64, X_pending=X_pending, condition_noiseless=condition_noiseless, maximize=maximize, ) self.assertIsInstance(acq.sampler, SobolQMCNormalSampler) test_Xs = [ torch.rand(4, 1, input_dim, **tkwargs), torch.rand(4, 3, input_dim, **tkwargs), torch.rand(4, 5, 1, input_dim, **tkwargs), torch.rand(4, 5, 3, input_dim, **tkwargs), ] for j in range(len(test_Xs)): acq_X = acq(test_Xs[j]) # assess shape self.assertTrue(acq_X.shape == test_Xs[j].shape[:-2]) with self.assertRaises(ValueError): acq = qJointEntropySearch( model=model, optimal_inputs=optimal_inputs, optimal_outputs=optimal_outputs, estimation_type="NO_EST", num_samples=64, X_pending=X_pending, condition_noiseless=condition_noiseless, maximize=maximize, ) acq_X = acq(test_Xs[j]) # Support with fully bayesian models is not yet implemented. Thus, we # throw an error for now. fully_bayesian_model = SaasFullyBayesianSingleTaskGP(train_X, train_Y) with self.assertRaises(NotImplementedError): acq = qJointEntropySearch( model=fully_bayesian_model, optimal_inputs=optimal_inputs, optimal_outputs=optimal_outputs, estimation_type="LB", )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from unittest import mock import torch from botorch.acquisition.active_learning import ( PairwiseMCPosteriorVariance, qNegIntegratedPosteriorVariance, ) from botorch.acquisition.objective import ( GenericMCObjective, ScalarizedPosteriorTransform, ) from botorch.models.pairwise_gp import PairwiseGP from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultitaskMultivariateNormal class TestQNegIntegratedPosteriorVariance(BotorchTestCase): def test_init(self): mm = MockModel(MockPosterior(mean=torch.rand(2, 1))) mc_points = torch.rand(2, 2) qNIPV = qNegIntegratedPosteriorVariance(model=mm, mc_points=mc_points) sampler = qNIPV.sampler self.assertIsInstance(sampler, SobolQMCNormalSampler) self.assertEqual(sampler.sample_shape, torch.Size([1])) self.assertTrue(torch.equal(mc_points, qNIPV.mc_points)) self.assertIsNone(qNIPV.X_pending) self.assertIsNone(qNIPV.posterior_transform) sampler = IIDNormalSampler(sample_shape=torch.Size([2])) qNIPV = qNegIntegratedPosteriorVariance( model=mm, mc_points=mc_points, sampler=sampler ) self.assertIsInstance(qNIPV.sampler, IIDNormalSampler) self.assertEqual(qNIPV.sampler.sample_shape, torch.Size([2])) def test_q_neg_int_post_variance(self): no = "botorch.utils.testing.MockModel.num_outputs" for dtype in (torch.float, torch.double): # basic test mean = torch.zeros(4, 1, device=self.device, dtype=dtype) variance = torch.rand(4, 1, device=self.device, dtype=dtype) mc_points = torch.rand(10, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm): with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 # TODO: Make this work with arbitrary models mm = MockModel(None) qNIPV = qNegIntegratedPosteriorVariance( model=mm, mc_points=mc_points ) X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy val = qNIPV(X) self.assertAllClose(val, -(variance.mean()), atol=1e-4) # batched model mean = torch.zeros(2, 4, 1, device=self.device, dtype=dtype) variance = torch.rand(2, 4, 1, device=self.device, dtype=dtype) mc_points = torch.rand(2, 10, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm): with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 # TODO: Make this work with arbitrary models mm = MockModel(None) qNIPV = qNegIntegratedPosteriorVariance( model=mm, mc_points=mc_points ) # TODO: Allow broadcasting for batch evaluation X = torch.empty(2, 1, 1, device=self.device, dtype=dtype) # dummy val = qNIPV(X) val_exp = -variance.mean(dim=-2).squeeze(-1) self.assertAllClose(val, val_exp, atol=1e-4) # multi-output model mean = torch.zeros(4, 2, device=self.device, dtype=dtype) variance = torch.rand(4, 2, device=self.device, dtype=dtype) cov = torch.diag_embed(variance.view(-1)) f_posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov)) mc_points = torch.rand(10, 1, device=self.device, dtype=dtype) mfm = MockModel(f_posterior) with mock.patch.object(MockModel, "fantasize", return_value=mfm): with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(None) weights = torch.tensor([0.5, 0.5], device=self.device, dtype=dtype) qNIPV = qNegIntegratedPosteriorVariance( model=mm, mc_points=mc_points, posterior_transform=ScalarizedPosteriorTransform( weights=weights ), ) X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy val = qNIPV(X) self.assertTrue( torch.allclose(val, -0.5 * variance.mean(), atol=1e-4) ) # batched multi-output model mean = torch.zeros(4, 3, 1, 2, device=self.device, dtype=dtype) variance = torch.rand(4, 3, 1, 2, device=self.device, dtype=dtype) cov = torch.diag_embed(variance.view(4, 3, -1)) f_posterior = GPyTorchPosterior(MultitaskMultivariateNormal(mean, cov)) mc_points = torch.rand(4, 1, device=self.device, dtype=dtype) mfm = MockModel(f_posterior) with mock.patch.object(MockModel, "fantasize", return_value=mfm): with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 mm = MockModel(None) weights = torch.tensor([0.5, 0.5], device=self.device, dtype=dtype) qNIPV = qNegIntegratedPosteriorVariance( model=mm, mc_points=mc_points, posterior_transform=ScalarizedPosteriorTransform( weights=weights ), ) X = torch.empty(3, 1, 1, device=self.device, dtype=dtype) # dummy val = qNIPV(X) val_exp = -0.5 * variance.mean(dim=0).view(3, -1).mean(dim=-1) self.assertAllClose(val, val_exp, atol=1e-4) class TestPairwiseMCPosteriorVariance(BotorchTestCase): def test_pairwise_mc_post_var(self): train_X = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0]]) train_comp = torch.tensor([[0, 1]], dtype=torch.long) model = PairwiseGP(train_X, train_comp) # example link function probit = torch.distributions.normal.Normal(0, 1).cdf probit_obj = GenericMCObjective(objective=lambda Y, X: probit(Y.squeeze(-1))) pv = PairwiseMCPosteriorVariance(model=model, objective=probit_obj) n_test_pair = 8 good_X_2 = torch.rand((n_test_pair, 2, 3)) good_X_4 = torch.rand((n_test_pair, 4, 3)) bad_X = torch.rand((n_test_pair, 3, 3)) # ensure q is a multiple of 2 with self.assertRaises(RuntimeError): pv(bad_X) self.assertEqual(pv(good_X_2).shape, torch.Size([n_test_pair])) self.assertEqual(pv(good_X_4).shape, torch.Size([n_test_pair]))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import product import torch from botorch.acquisition.multi_objective.predictive_entropy_search import ( _safe_update_omega, _update_damping, qMultiObjectivePredictiveEntropySearch, ) from botorch.exceptions import UnsupportedError from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.outcome import Standardize from botorch.utils.testing import BotorchTestCase def dummy_sample_pareto_sets(model, num_pareto_samples, num_pareto_points): m = model.models[0] if isinstance(model, ModelListGP) else model input_dim = m.train_inputs[0].shape[-1] tkwargs = {"dtype": m.train_inputs[0].dtype, "device": m.train_inputs[0].device} return torch.rand( num_pareto_samples, num_pareto_points, input_dim, **tkwargs, ) def get_model(train_X, train_Y, use_model_list, standardize_model): num_objectives = train_Y.shape[-1] if standardize_model: if use_model_list: outcome_transform = Standardize(m=1) else: outcome_transform = Standardize(m=num_objectives) else: outcome_transform = None if use_model_list: model = ModelListGP( *[ SingleTaskGP( train_X=train_X, train_Y=train_Y[:, i : i + 1], outcome_transform=outcome_transform, ) for i in range(num_objectives) ] ) else: model = SingleTaskGP( train_X=train_X, train_Y=train_Y, outcome_transform=outcome_transform, ) return model class TestQMultiObjectivePredictiveEntropySearch(BotorchTestCase): def test_initialization_errors(self): torch.manual_seed(1) tkwargs = {"device": self.device} standardize_model = False for (dtype, num_objectives, use_model_list,) in product( (torch.float, torch.double), (1, 2, 3), (False, True), ): tkwargs["dtype"] = dtype # test batched model train_X = torch.rand(4, 3, 2, **tkwargs) train_Y = torch.rand(4, 3, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) num_pareto_samples = 3 if num_objectives > 1: num_pareto_points = 4 else: num_pareto_points = 1 pareto_sets = dummy_sample_pareto_sets( model, num_pareto_samples, num_pareto_points ) # test batch model error with self.assertRaises(NotImplementedError): qMultiObjectivePredictiveEntropySearch( model=model, pareto_sets=pareto_sets, ) # test wrong Pareto set shape train_X = torch.rand(1, 2, **tkwargs) train_Y = torch.rand(1, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) pareto_sets = dummy_sample_pareto_sets( model, num_pareto_samples, num_pareto_points ) with self.assertRaises(UnsupportedError): qMultiObjectivePredictiveEntropySearch( model=model, pareto_sets=pareto_sets.unsqueeze(0), ) with self.assertRaises(UnsupportedError): qMultiObjectivePredictiveEntropySearch( model=model, pareto_sets=pareto_sets.unsqueeze(-1), ) def test_moo_predictive_entropy_search(self, use_model_list=False, maximize=False): torch.manual_seed(1) tkwargs = {"device": self.device} for (dtype, num_objectives, standardize_model,) in product( (torch.float, torch.double), (1, 2, 3), (False, True), ): tkwargs["dtype"] = dtype input_dim = 2 train_X = torch.rand(4, input_dim, **tkwargs) train_Y = torch.rand(4, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) num_pareto_samples = 3 num_pareto_points = 1 if num_objectives == 1 else 4 pareto_sets = dummy_sample_pareto_sets( model, num_pareto_samples, num_pareto_points ) # test acquisition X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)] for i in range(len(X_pending_list)): X_pending = X_pending_list[i] acq = qMultiObjectivePredictiveEntropySearch( model=model, pareto_sets=pareto_sets, maximize=maximize, X_pending=X_pending, ) test_Xs = [ torch.rand(4, 1, input_dim, **tkwargs), torch.rand(4, 3, input_dim, **tkwargs), torch.rand(4, 5, 1, input_dim, **tkwargs), torch.rand(4, 5, 3, input_dim, **tkwargs), ] for test_X in test_Xs: acq_X = acq(test_X) # assess shape self.assertTrue(acq_X.shape == test_X.shape[:-2]) def test_moo_predictive_entropy_search_maximize(self): self.test_moo_predictive_entropy_search(maximize=True) def test_moo_predictive_entropy_search_model_list(self): self.test_moo_predictive_entropy_search(use_model_list=True) def test_moo_predictive_entropy_search_model_list_maximize(self): self.test_moo_predictive_entropy_search(use_model_list=True, maximize=True) def test_update_damping(self): # test error when old and new covariance are not positive semi-definite tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype cov_old = torch.ones(1, 2, 2, **tkwargs) cov_new = torch.ones(1, 2, 2, **tkwargs) damping_factor = torch.ones(1, **tkwargs) jitter = 0.0 with self.assertRaises(ValueError): _update_damping( nat_cov=cov_old, nat_cov_new=cov_new, damping_factor=damping_factor, jitter=jitter, ) def test_safe_omega_update(self): tkwargs = {"device": self.device} # test exception when EP fails because the jitter is too small and omega # update skips. This naturally depends on the precision. for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype N = 1 P = 3 M = 2 mean_f = torch.zeros(2, M, N + P, **tkwargs) cov_f = torch.ones(2, M, N + P, N + P, **tkwargs) omega_f_nat_mean = torch.zeros(2, M, N + P, P, 2, **tkwargs) omega_f_nat_cov = torch.zeros(2, M, N + P, P, 2, 2, **tkwargs) maximize = True jitter = 0.0 # The inversion of a factor of `cov_f` will fail spit out a # `torch._C._LinAlgError` error. omega_f_nat_mean_new, omega_f_nat_cov_new = _safe_update_omega( mean_f=mean_f, cov_f=cov_f, omega_f_nat_mean=omega_f_nat_mean, omega_f_nat_cov=omega_f_nat_cov, N=N, P=P, M=M, maximize=maximize, jitter=jitter, ) self.assertTrue(torch.equal(omega_f_nat_mean, omega_f_nat_mean_new)) self.assertTrue(torch.equal(omega_f_nat_cov, omega_f_nat_cov_new))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from itertools import product from unittest import mock import torch from botorch.acquisition.multi_objective.objective import ( MCMultiOutputObjective, UnstandardizeMCMultiOutputObjective, ) from botorch.acquisition.multi_objective.utils import ( compute_sample_box_decomposition, get_default_partitioning_alpha, prune_inferior_points_multi_objective, random_search_optimizer, sample_optimal_points, ) from botorch.exceptions.errors import UnsupportedError from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.outcome import Standardize from botorch.utils.gp_sampling import get_gp_samples from botorch.utils.multi_objective import is_non_dominated from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from torch import Tensor class TestUtils(BotorchTestCase): def test_get_default_partitioning_alpha(self): for m in range(2, 7): expected_val = 0.0 if m < 5 else 10 ** (-8 + m) self.assertEqual( expected_val, get_default_partitioning_alpha(num_objectives=m) ) # In `BotorchTestCase.setUp` warnings are filtered, so here we # remove the filter to ensure a warning is issued as expected. warnings.resetwarnings() with warnings.catch_warnings(record=True) as ws: self.assertEqual(0.1, get_default_partitioning_alpha(num_objectives=7)) self.assertEqual(len(ws), 1) class DummyMCMultiOutputObjective(MCMultiOutputObjective): def forward(self, samples: Tensor) -> Tensor: return samples class TestMultiObjectiveUtils(BotorchTestCase): def setUp(self): super().setUp() self.model = mock.MagicMock() self.objective = DummyMCMultiOutputObjective() self.X_observed = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]) self.X_pending = torch.tensor([[1.0, 3.0, 4.0]]) self.mc_samples = 250 self.qmc = True self.ref_point = [0.0, 0.0] self.Y = torch.tensor([[1.0, 2.0]]) self.seed = 1 def test_prune_inferior_points_multi_objective(self): tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype X = torch.rand(3, 2, **tkwargs) ref_point = torch.tensor([0.25, 0.25], **tkwargs) # the event shape is `q x m` = 3 x 2 samples = torch.tensor([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]], **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # test that a batched X raises errors with self.assertRaises(UnsupportedError): prune_inferior_points_multi_objective( model=mm, X=X.expand(2, 3, 2), ref_point=ref_point ) # test that a batched model raises errors (event shape is `q x m` = 3 x m) mm2 = MockModel(MockPosterior(samples=samples.expand(2, 3, 2))) with self.assertRaises(UnsupportedError): prune_inferior_points_multi_objective( model=mm2, X=X, ref_point=ref_point ) # test that invalid max_frac is checked properly with self.assertRaises(ValueError): prune_inferior_points_multi_objective( model=mm, X=X, max_frac=1.1, ref_point=ref_point ) # test basic behaviour X_pruned = prune_inferior_points_multi_objective( model=mm, X=X, ref_point=ref_point ) self.assertTrue(torch.equal(X_pruned, X[[-1]])) # test unstd objective unstd_obj = UnstandardizeMCMultiOutputObjective( Y_mean=samples.mean(dim=0), Y_std=samples.std(dim=0), outcomes=[0, 1] ) X_pruned = prune_inferior_points_multi_objective( model=mm, X=X, ref_point=ref_point, objective=unstd_obj ) self.assertTrue(torch.equal(X_pruned, X[[-1]])) # test constraints samples_constrained = torch.tensor( [[1.0, 2.0, -1.0], [2.0, 1.0, -1.0], [3.0, 4.0, 1.0]], **tkwargs ) mm_constrained = MockModel(MockPosterior(samples=samples_constrained)) X_pruned = prune_inferior_points_multi_objective( model=mm_constrained, X=X, ref_point=ref_point, objective=unstd_obj, constraints=[lambda Y: Y[..., -1]], ) self.assertTrue(torch.equal(X_pruned, X[:2])) # test non-repeated samples (requires mocking out MockPosterior's rsample) samples = torch.tensor( [[[3.0], [0.0], [0.0]], [[0.0], [2.0], [0.0]], [[0.0], [0.0], [1.0]]], device=self.device, dtype=dtype, ) with mock.patch.object(MockPosterior, "rsample", return_value=samples): mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points_multi_objective( model=mm, X=X, ref_point=ref_point ) self.assertTrue(torch.equal(X_pruned, X)) # test max_frac limiting with mock.patch.object(MockPosterior, "rsample", return_value=samples): mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points_multi_objective( model=mm, X=X, ref_point=ref_point, max_frac=2 / 3 ) if self.device.type == "cuda": # sorting has different order on cuda self.assertTrue( torch.equal(X_pruned, X[[2, 1]]) or torch.equal(X_pruned, X[[1, 2]]) ) else: self.assertTrue(torch.equal(X_pruned, X[:2])) # test that zero-probability is in fact pruned samples[2, 0, 0] = 10 with mock.patch.object(MockPosterior, "rsample", return_value=samples): mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points_multi_objective( model=mm, X=X, ref_point=ref_point ) self.assertTrue(torch.equal(X_pruned, X[:2])) # test marginalize_dim and constraints samples = torch.tensor([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]], **tkwargs) samples = samples.unsqueeze(-3).expand( *samples.shape[:-2], 2, *samples.shape[-2:], ) mm = MockModel(MockPosterior(samples=samples)) X_pruned = prune_inferior_points_multi_objective( model=mm, X=X, ref_point=ref_point, objective=unstd_obj, constraints=[lambda Y: Y[..., -1] - 3.0], marginalize_dim=-3, ) self.assertTrue(torch.equal(X_pruned, X[:2])) def test_compute_sample_box_decomposition(self): tkwargs = {"device": self.device} for dtype, maximize in product((torch.float, torch.double), (True, False)): tkwargs["dtype"] = dtype # test error when inputting incorrect Pareto front X = torch.rand(4, 3, 2, 1, **tkwargs) with self.assertRaises(UnsupportedError): compute_sample_box_decomposition(pareto_fronts=X, maximize=maximize) # test single and multi-objective setting for num_objectives in (1, 5): X = torch.rand(4, 3, num_objectives, **tkwargs) bd1 = compute_sample_box_decomposition( pareto_fronts=X, maximize=maximize ) # assess shape self.assertTrue(bd1.ndim == 4) self.assertTrue(bd1.shape[-1] == num_objectives) self.assertTrue(bd1.shape[-3] == 2) if num_objectives == 1: self.assertTrue(bd1.shape[-2] == 1) # assess whether upper bound is greater than lower bound self.assertTrue(torch.all(bd1[:, 1, ...] - bd1[:, 0, ...] >= 0)) # test constrained setting num_constraints = 7 bd2 = compute_sample_box_decomposition( pareto_fronts=X, maximize=maximize, num_constraints=num_constraints, ) # assess shape self.assertTrue(bd2.ndim == 4) self.assertTrue(bd2.shape[-1] == num_objectives + num_constraints) self.assertTrue(bd2.shape[-2] == bd1.shape[-2] + 1) self.assertTrue(bd2.shape[-3] == 2) # assess whether upper bound is greater than lower bound self.assertTrue(torch.all(bd2[:, 1, ...] - bd2[:, 0, ...] >= 0)) # the constraint padding should not change the box-decomposition # if the box-decomposition procedure is not random self.assertTrue(torch.equal(bd1, bd2[..., 0:-1, 0:num_objectives])) # test with a specified optimum opt_X = 2.0 if maximize else -3.0 X[:, 0, :] = opt_X bd3 = compute_sample_box_decomposition( pareto_fronts=X, maximize=maximize ) # check optimum if maximize: self.assertTrue(torch.all(bd3[:, 1, ...] == opt_X)) else: self.assertTrue(torch.all(bd3[:, 0, ...] == opt_X)) def get_model( dtype, device, num_points, input_dim, num_objectives, use_model_list, standardize_model, ): torch.manual_seed(123) tkwargs = {"dtype": dtype, "device": device} train_X = torch.rand(num_points, input_dim, **tkwargs) train_Y = torch.rand(num_points, num_objectives, **tkwargs) if standardize_model: if use_model_list: outcome_transform = Standardize(m=1) else: outcome_transform = Standardize(m=num_objectives) else: outcome_transform = None if use_model_list and num_objectives > 1: model = ModelListGP( *[ SingleTaskGP( train_X=train_X, train_Y=train_Y[:, i : i + 1], outcome_transform=outcome_transform, ) for i in range(num_objectives) ] ) else: model = SingleTaskGP( train_X=train_X, train_Y=train_Y, outcome_transform=outcome_transform, ) return model.eval(), train_X, train_Y class TestThompsonSampling(BotorchTestCase): def test_random_search_optimizer(self): torch.manual_seed(1) input_dim = 3 num_initial = 5 tkwargs = {"device": self.device} optimizer_kwargs = { "pop_size": 1000, "max_tries": 5, } for ( dtype, maximize, num_objectives, use_model_list, standardize_model, ) in product( (torch.float, torch.double), (True, False), (1, 2), (False, True), (False, True), ): tkwargs["dtype"] = dtype num_points = num_objectives model, X, Y = get_model( num_points=num_initial, input_dim=input_dim, num_objectives=num_objectives, use_model_list=use_model_list, standardize_model=standardize_model, **tkwargs, ) model_sample = get_gp_samples( model=model, num_outputs=num_objectives, n_samples=1, ) input_dim = X.shape[-1] # fake bounds bounds = torch.zeros((2, input_dim), **tkwargs) bounds[1] = 1.0 pareto_set, pareto_front = random_search_optimizer( model=model_sample, bounds=bounds, num_points=num_points, maximize=maximize, **optimizer_kwargs, ) # check shape self.assertTrue(pareto_set.ndim == 2) self.assertTrue(pareto_front.ndim == 2) self.assertTrue(pareto_set.shape[-1] == X.shape[-1]) self.assertTrue(pareto_front.shape[-1] == Y.shape[-1]) self.assertTrue(pareto_front.shape[-2] == pareto_set.shape[-2]) num_optimal_points = pareto_front.shape[-2] # check if samples are non-dominated weight = 1.0 if maximize else -1.0 count = torch.sum(is_non_dominated(Y=weight * pareto_front)) self.assertTrue(count == num_optimal_points) # Ask for more optimal points than query evaluations with self.assertRaises(RuntimeError): random_search_optimizer( model=model_sample, bounds=bounds, num_points=20, maximize=maximize, max_tries=1, pop_size=10, ) def test_sample_optimal_points(self): torch.manual_seed(1) input_dim = 3 num_initial = 5 tkwargs = {"device": self.device} optimizer_kwargs = { "pop_size": 100, "max_tries": 1, } num_samples = 2 num_points = 1 for ( dtype, maximize, num_objectives, opt_kwargs, use_model_list, standardize_model, ) in product( (torch.float, torch.double), (True, False), (1, 2), (optimizer_kwargs, None), (False, True), (False, True), ): tkwargs["dtype"] = dtype model, X, Y = get_model( num_points=num_initial, input_dim=input_dim, num_objectives=num_objectives, use_model_list=use_model_list, standardize_model=standardize_model, **tkwargs, ) input_dim = X.shape[-1] bounds = torch.zeros((2, input_dim), **tkwargs) bounds[1] = 1.0 # check the error when asking for too many optimal points if num_objectives == 1: with self.assertRaises(UnsupportedError): sample_optimal_points( model=model, bounds=bounds, num_samples=num_samples, num_points=2, maximize=maximize, optimizer=random_search_optimizer, optimizer_kwargs=opt_kwargs, ) pareto_sets, pareto_fronts = sample_optimal_points( model=model, bounds=bounds, num_samples=num_samples, num_points=num_points, maximize=maximize, optimizer=random_search_optimizer, optimizer_kwargs=opt_kwargs, ) # check shape ps_desired_shape = torch.Size([num_samples, num_points, input_dim]) pf_desired_shape = torch.Size([num_samples, num_points, num_objectives]) self.assertTrue(pareto_sets.shape == ps_desired_shape) self.assertTrue(pareto_fronts.shape == pf_desired_shape)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from unittest import mock import torch from botorch import settings from botorch.acquisition.multi_objective.multi_fidelity import MOMF from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective from botorch.exceptions.errors import BotorchError from botorch.exceptions.warnings import BotorchWarning from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.multi_objective.box_decompositions.non_dominated import ( FastNondominatedPartitioning, NondominatedPartitioning, ) from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior class TestMOMF(BotorchTestCase): def test_momf(self): tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype ref_point = [0.0, 0.0] t_ref_point = torch.tensor(ref_point, **tkwargs) pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) partitioning = NondominatedPartitioning(ref_point=t_ref_point) # the event shape is `b x q x m` = 1 x 1 x 2 samples = torch.zeros(1, 1, 2, **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # test error if there is not pareto_Y initialized in partitioning with self.assertRaises(BotorchError): MOMF(model=mm, ref_point=ref_point, partitioning=partitioning) partitioning.update(Y=pareto_Y) # test error if ref point has wrong shape with self.assertRaises(ValueError): MOMF(model=mm, ref_point=ref_point[:1], partitioning=partitioning) X = torch.zeros(1, 1, **tkwargs) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) # check ref point self.assertTrue( torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs)) ) # check cached indices self.assertTrue(hasattr(acqf, "q_subset_indices")) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0]], device=self.device), ) ) # test q=2 X2 = torch.zeros(2, 1, **tkwargs) samples2 = torch.zeros(1, 2, 2, **tkwargs) mm2 = MockModel(MockPosterior(samples=samples2)) acqf.model = mm2 self.assertEqual(acqf.model, mm2) res = acqf(X2) self.assertEqual(res.item(), 0.0) # check cached indices self.assertTrue(hasattr(acqf, "q_subset_indices")) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0], [1]], device=self.device), ) ) self.assertIn("q_choose_2", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_2"], torch.tensor([[0, 1]], device=self.device), ) ) self.assertNotIn("q_choose_3", acqf.q_subset_indices) # now back to 1 and sure all caches were cleared acqf.model = mm res = acqf(X) self.assertNotIn("q_choose_2", acqf.q_subset_indices) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0]], device=self.device), ) ) X = torch.zeros(1, 1, **tkwargs) samples = torch.zeros(1, 1, 2, **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) # get mm sample shape to match shape of X + X_pending acqf.model._posterior._samples = torch.zeros(1, 2, 2, **tkwargs) res = acqf(X) X2 = torch.zeros(1, 1, 1, requires_grad=True, **tkwargs) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual(len(ws), 1) self.assertTrue(issubclass(ws[-1].category, BotorchWarning)) # test objective acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, objective=IdentityMCMultiOutputObjective(), ) # get mm sample shape to match shape of X acqf.model._posterior._samples = torch.zeros(1, 1, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 0.0) # Test that the hypervolume improvement is correct for given sample # test q = 1 X = torch.zeros(1, 1, **tkwargs) # basic test samples = torch.tensor([[[6.5, 4.5]]], **tkwargs) mm = MockModel(MockPosterior(samples=samples)) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 1.5) # test q = 1, does not contribute samples = torch.tensor([0.0, 1.0], **tkwargs).view(1, 1, 2) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 0.0) # test q = 2, both points contribute X = torch.zeros(2, 1, **tkwargs) samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) # since q = 2, fidelity cost is 0 but fixed cost is 1 for each # hence total cost is 2 MOMF defaults to an Affine Cost Model. self.assertEqual(res.item(), 1.75 / 2) # test q = 2, only 1 point contributes samples = torch.tensor([[6.5, 4.5], [6.0, 4.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 1.5 / 2) # test q = 2, neither contributes samples = torch.tensor([[2.0, 2.0], [0.0, 0.1]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 0.0) # test q = 2, test point better than current best second objective samples = torch.tensor([[6.5, 4.5], [6.0, 6.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 8.0 / 2) # test q = 2, test point better than current-best first objective samples = torch.tensor([[6.5, 4.5], [9.0, 2.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 2.0 / 2) # test q = 3, all contribute X = torch.zeros(3, 1, **tkwargs) samples = torch.tensor( [[6.5, 4.5], [9.0, 2.0], [7.0, 4.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) # since q = 3, fidelity cost is 0 but fixed cost is 1 for each # hence total cost is 3. self.assertEqual(res.item(), 2.25 / 3) # test q = 3, not all contribute samples = torch.tensor( [[6.5, 4.5], [9.0, 2.0], [7.0, 5.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertTrue( torch.allclose(res, torch.tensor(3.5 / 3, **tkwargs), atol=1e-15) ) # test q = 3, none contribute samples = torch.tensor( [[0.0, 4.5], [1.0, 2.0], [3.0, 0.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) # test m = 3, q=1 pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]], **tkwargs, ) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.zeros(1, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 12.0) # test m = 3, q=1, X is ones so fidelity + fixed_cost is 2 pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]], **tkwargs, ) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.ones(1, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 12.0 / 2) # test m = 3, q=1, with custom callable function pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]], **tkwargs, ) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) def cost(x): return (6 * x[..., -1]).unsqueeze(-1) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, cost_call=cost, ) X = torch.ones(1, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 12.0 / 6) # change reference point ref_point = [0.0] * 3 X = torch.zeros(1, 2, **tkwargs) t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 4.0) # test m = 3, no contribution ref_point = [1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) # test m = 3, q = 2 pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs ) samples = torch.tensor( [[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.zeros(2, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 22.0 / 2) # test batched model pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs ) samples = torch.tensor( [[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs ).unsqueeze(0) samples = torch.stack([samples, samples + 1], dim=1) mm = MockModel(MockPosterior(samples=samples)) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.zeros(2, 2, **tkwargs) res = acqf(X) self.assertTrue( torch.equal( res, # batch_shape x model_batch_shape torch.tensor([[22.0, 60.0]], **tkwargs) / 2, ) ) # test batched model with batched partitioning with multiple batch dims pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) pareto_Y = torch.stack( [ pareto_Y, pareto_Y + 0.5, ], dim=0, ) samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0) samples = torch.stack([samples, samples + 1], dim=1) mm = MockModel(MockPosterior(samples=samples)) ref_point = [-1.0] * 2 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = FastNondominatedPartitioning( ref_point=t_ref_point, Y=pareto_Y ) cell_bounds = partitioning.get_hypercell_bounds().unsqueeze(1) with mock.patch.object( partitioning, "get_hypercell_bounds", return_value=cell_bounds ): acqf = MOMF( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) # test multiple batch dims self.assertEqual(acqf.cell_lower_bounds.shape, torch.Size([1, 2, 4, 2])) self.assertEqual(acqf.cell_upper_bounds.shape, torch.Size([1, 2, 4, 2])) X = torch.zeros(2, 2, **tkwargs) res = acqf(X) self.assertTrue( torch.equal( res, # batch_shape x model_batch_shape torch.tensor( [[1.75, 3.5]], dtype=samples.dtype, device=samples.device ) / 2, ) )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from copy import deepcopy from itertools import product from math import pi from unittest import mock import torch from botorch import settings from botorch.acquisition.cached_cholesky import _get_cache_root_not_supported_message from botorch.acquisition.multi_objective.monte_carlo import ( MultiObjectiveMCAcquisitionFunction, qExpectedHypervolumeImprovement, qNoisyExpectedHypervolumeImprovement, ) from botorch.acquisition.multi_objective.multi_output_risk_measures import ( MultiOutputRiskMeasureMCObjective, ) from botorch.acquisition.multi_objective.objective import ( GenericMCMultiOutputObjective, IdentityMCMultiOutputObjective, MCMultiOutputObjective, ) from botorch.acquisition.objective import IdentityMCObjective from botorch.exceptions.errors import BotorchError, UnsupportedError from botorch.exceptions.warnings import BotorchWarning from botorch.models import ( GenericDeterministicModel, HigherOrderGP, KroneckerMultiTaskGP, MultiTaskGP, ) from botorch.models.gp_regression import SingleTaskGP from botorch.models.transforms.input import InputPerturbation from botorch.models.transforms.outcome import Standardize from botorch.posteriors.posterior_list import PosteriorList from botorch.posteriors.transformed import TransformedPosterior from botorch.sampling.list_sampler import ListSampler from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.low_rank import sample_cached_cholesky from botorch.utils.multi_objective.box_decompositions.dominated import ( DominatedPartitioning, ) from botorch.utils.multi_objective.box_decompositions.non_dominated import ( FastNondominatedPartitioning, NondominatedPartitioning, ) from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from botorch.utils.transforms import match_batch_shape, standardize class DummyMultiObjectiveMCAcquisitionFunction(MultiObjectiveMCAcquisitionFunction): def forward(self, X): pass class DummyMCMultiOutputObjective(MCMultiOutputObjective): def forward(self, samples, X=None): if X is not None: return samples[..., : X.shape[-2], :] else: return samples class TestMultiObjectiveMCAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): MultiObjectiveMCAcquisitionFunction() def test_init(self): mm = MockModel(MockPosterior(mean=torch.rand(2, 1), samples=torch.rand(2, 1))) # test default init acqf = DummyMultiObjectiveMCAcquisitionFunction(model=mm) self.assertIsInstance(acqf.objective, IdentityMCMultiOutputObjective) self.assertIsNone(acqf.sampler) # Initialize the sampler. acqf.get_posterior_samples(mm.posterior(torch.ones(1, 1))) self.assertEqual(acqf.sampler.sample_shape, torch.Size([128])) self.assertIsNone(acqf.X_pending) # test custom init sampler = SobolQMCNormalSampler(sample_shape=torch.Size([64])) objective = DummyMCMultiOutputObjective() X_pending = torch.rand(2, 1) acqf = DummyMultiObjectiveMCAcquisitionFunction( model=mm, sampler=sampler, objective=objective, X_pending=X_pending ) self.assertEqual(acqf.objective, objective) self.assertEqual(acqf.sampler, sampler) self.assertTrue(torch.equal(acqf.X_pending, X_pending)) # test unsupported objective with self.assertRaises(UnsupportedError): DummyMultiObjectiveMCAcquisitionFunction( model=mm, objective=IdentityMCObjective() ) # test constraints with input perturbation. mm.input_transform = InputPerturbation(perturbation_set=torch.rand(2, 1)) with self.assertRaises(UnsupportedError): DummyMultiObjectiveMCAcquisitionFunction( model=mm, constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])] ) class TestQExpectedHypervolumeImprovement(BotorchTestCase): def test_q_expected_hypervolume_improvement(self): tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): tkwargs["dtype"] = dtype ref_point = [0.0, 0.0] t_ref_point = torch.tensor(ref_point, **tkwargs) pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) partitioning = NondominatedPartitioning(ref_point=t_ref_point) # the event shape is `b x q x m` = 1 x 1 x 2 samples = torch.zeros(1, 1, 2, **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # test error if there is not pareto_Y initialized in partitioning with self.assertRaises(BotorchError): qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning ) partitioning.update(Y=pareto_Y) # test error if ref point has wrong shape with self.assertRaises(ValueError): qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point[:1], partitioning=partitioning ) X = torch.zeros(1, 1, **tkwargs) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) # check ref point self.assertTrue( torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs)) ) # check cached indices self.assertTrue(hasattr(acqf, "q_subset_indices")) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0]], device=self.device), ) ) # test q=2 X2 = torch.zeros(2, 1, **tkwargs) samples2 = torch.zeros(1, 2, 2, **tkwargs) mm2 = MockModel(MockPosterior(samples=samples2)) acqf.model = mm2 self.assertEqual(acqf.model, mm2) self.assertIn("model", acqf._modules) self.assertEqual(acqf._modules["model"], mm2) res = acqf(X2) self.assertEqual(res.item(), 0.0) # check cached indices self.assertTrue(hasattr(acqf, "q_subset_indices")) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0], [1]], device=self.device), ) ) self.assertIn("q_choose_2", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_2"], torch.tensor([[0, 1]], device=self.device), ) ) self.assertNotIn("q_choose_3", acqf.q_subset_indices) # now back to 1 and sure all caches were cleared acqf.model = mm res = acqf(X) self.assertNotIn("q_choose_2", acqf.q_subset_indices) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0]], device=self.device), ) ) X = torch.zeros(1, 1, **tkwargs) samples = torch.zeros(1, 1, 2, **tkwargs) mm = MockModel(MockPosterior(samples=samples)) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([2]), seed=12345) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2])) bs = acqf.sampler.base_samples.clone() res = acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test, qmc sampler = SobolQMCNormalSampler(sample_shape=torch.Size([2])) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 2])) bs = acqf.sampler.base_samples.clone() acqf(X) self.assertTrue(torch.equal(acqf.sampler.base_samples, bs)) # basic test for X_pending and warning acqf.set_X_pending() self.assertIsNone(acqf.X_pending) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) acqf.set_X_pending(X) self.assertEqual(acqf.X_pending, X) # get mm sample shape to match shape of X + X_pending acqf.model._posterior._samples = torch.zeros(1, 2, 2, **tkwargs) res = acqf(X) X2 = torch.zeros(1, 1, 1, requires_grad=True, **tkwargs) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending(X2) self.assertEqual(acqf.X_pending, X2) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) # test objective acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, objective=IdentityMCMultiOutputObjective(), ) # get mm sample shape to match shape of X acqf.model._posterior._samples = torch.zeros(1, 1, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 0.0) # Test that the hypervolume improvement is correct for given sample # test q = 1 X = torch.zeros(1, 1, **tkwargs) # basic test samples = torch.tensor([[[6.5, 4.5]]], **tkwargs) mm = MockModel(MockPosterior(samples=samples)) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 1.5) # test q = 1, does not contribute samples = torch.tensor([0.0, 1.0], **tkwargs).view(1, 1, 2) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 0.0) # test q = 2, both points contribute X = torch.zeros(2, 1, **tkwargs) samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 1.75) # test q = 2, only 1 point contributes samples = torch.tensor([[6.5, 4.5], [6.0, 4.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 1.5) # test q = 2, neither contributes samples = torch.tensor([[2.0, 2.0], [0.0, 0.1]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 0.0) # test q = 2, test point better than current best second objective samples = torch.tensor([[6.5, 4.5], [6.0, 6.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf.model = mm res = acqf(X) self.assertEqual(res.item(), 8.0) # test q = 2, test point better than current-best first objective samples = torch.tensor([[6.5, 4.5], [9.0, 2.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 2.0) # test q = 3, all contribute X = torch.zeros(3, 1, **tkwargs) samples = torch.tensor( [[6.5, 4.5], [9.0, 2.0], [7.0, 4.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 2.25) # test q = 3, not all contribute samples = torch.tensor( [[6.5, 4.5], [9.0, 2.0], [7.0, 5.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 3.5) # test q = 3, none contribute samples = torch.tensor( [[0.0, 4.5], [1.0, 2.0], [3.0, 0.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) # test m = 3, q=1 pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0], [1.0, 3.0, 4.0]], **tkwargs, ) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.zeros(1, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 12.0) # change reference point ref_point = [0.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 4.0) # test m = 3, no contribution ref_point = [1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) res = acqf(X) self.assertEqual(res.item(), 0.0) # test m = 3, q = 2 pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs ) samples = torch.tensor( [[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs ).unsqueeze(0) mm = MockModel(MockPosterior(samples=samples)) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.zeros(2, 2, **tkwargs) res = acqf(X) self.assertEqual(res.item(), 22.0) # test batched model pareto_Y = torch.tensor( [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs ) samples = torch.tensor( [[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]], **tkwargs ).unsqueeze(0) samples = torch.stack([samples, samples + 1], dim=1) mm = MockModel(MockPosterior(samples=samples)) ref_point = [-1.0] * 3 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = NondominatedPartitioning(ref_point=t_ref_point, Y=pareto_Y) acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) X = torch.zeros(2, 2, **tkwargs) res = acqf(X) self.assertTrue( torch.equal( res, # batch_shape x model_batch_shape torch.tensor([[22.0, 60.0]], **tkwargs), ) ) # test batched model with batched partitioning with multiple batch dims pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) pareto_Y = torch.stack( [ pareto_Y, pareto_Y + 0.5, ], dim=0, ) samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]], **tkwargs).unsqueeze(0) samples = torch.stack([samples, samples + 1], dim=1) mm = MockModel(MockPosterior(samples=samples)) ref_point = [-1.0] * 2 t_ref_point = torch.tensor(ref_point, **tkwargs) partitioning = FastNondominatedPartitioning( ref_point=t_ref_point, Y=pareto_Y ) cell_bounds = partitioning.get_hypercell_bounds().unsqueeze(1) with mock.patch.object( partitioning, "get_hypercell_bounds", return_value=cell_bounds ): acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, ) # test multiple batch dims self.assertEqual(acqf.cell_lower_bounds.shape, torch.Size([1, 2, 4, 2])) self.assertEqual(acqf.cell_upper_bounds.shape, torch.Size([1, 2, 4, 2])) X = torch.zeros(2, 2, **tkwargs) res = acqf(X) self.assertTrue( torch.equal( res, # batch_shape x model_batch_shape torch.tensor( [[1.75, 3.5]], dtype=samples.dtype, device=samples.device ), ) ) def test_constrained_q_expected_hypervolume_improvement(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} ref_point = [0.0, 0.0] t_ref_point = torch.tensor(ref_point, **tkwargs) pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) partitioning = NondominatedPartitioning(ref_point=t_ref_point) partitioning.update(Y=pareto_Y) # test q=1 # the event shape is `b x q x m` = 1 x 1 x 2 samples = torch.tensor([[[6.5, 4.5]]], **tkwargs) mm = MockModel(MockPosterior(samples=samples)) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) X = torch.zeros(1, 1, **tkwargs) # test zero slack for eta in (1e-1, 1e-2): expected_values = [0.5 * 1.5, 0.5 * 0.5 * 1.5] for i, constraints in enumerate( [ [lambda Z: torch.zeros_like(Z[..., -1])], [ lambda Z: torch.zeros_like(Z[..., -1]), lambda Z: torch.zeros_like(Z[..., -1]), ], ] ): acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, constraints=constraints, eta=eta, ) res = acqf(X) self.assertAlmostEqual(res.item(), expected_values[i], places=4) # test multiple constraints one and multiple etas constraints = [ lambda Z: torch.ones_like(Z[..., -1]), lambda Z: torch.ones_like(Z[..., -1]), ] etas = [1, torch.tensor([1, 10])] expected_values = [ ( torch.sigmoid(torch.as_tensor(-1.0)) * torch.sigmoid(torch.as_tensor(-1.0)) * 1.5 ).item(), ( torch.sigmoid(torch.as_tensor(-1.0)) * torch.sigmoid(torch.as_tensor(-1.0 / 10.0)) * 1.5 ).item(), ] for eta, expected_value in zip(etas, expected_values): acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, constraints=constraints, eta=eta, ) res = acqf(X) self.assertAlmostEqual( res.item(), expected_value, places=4, ) # test feasible acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])], eta=1e-3, ) res = acqf(X) self.assertAlmostEqual(res.item(), 1.5, places=4) # test infeasible acqf = qExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning, sampler=sampler, constraints=[lambda Z: 100.0 * torch.ones_like(Z[..., -1])], eta=1e-3, ) res = acqf(X) self.assertAlmostEqual(res.item(), 0.0, places=4) # TODO: Test non-trivial constraint values, multiple constraints, and q > 1 class TestQNoisyExpectedHypervolumeImprovement(BotorchTestCase): def setUp(self): self.ref_point = [0.0, 0.0, 0.0] self.Y_raw = torch.tensor( [ [2.0, 0.5, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0], ], device=self.device, ) self.pareto_Y_raw = torch.tensor( [ [2.0, 0.5, 1.0], [1.0, 2.0, 1.0], ], device=self.device, ) super().setUp() def test_q_noisy_expected_hypervolume_improvement(self): tkwargs = {"device": self.device} for dtype, m in product( (torch.float, torch.double), (1, 2, 3), ): tkwargs["dtype"] = dtype ref_point = self.ref_point[:m] Y = self.Y_raw[:, :m].to(**tkwargs) pareto_Y = self.pareto_Y_raw[:, :m].to(**tkwargs) X_baseline = torch.rand(Y.shape[0], 1, **tkwargs) # the event shape is `b x q + r x m` = 1 x 1 x 2 baseline_samples = Y samples = torch.cat( [baseline_samples.unsqueeze(0), torch.zeros(1, 1, m, **tkwargs)], dim=1, ) mm = MockModel(MockPosterior(samples=baseline_samples)) X = torch.zeros(1, 1, **tkwargs) # basic test sampler = IIDNormalSampler(sample_shape=torch.Size([1])) # test error is raised if m == 1 if m == 1: with self.assertRaisesRegex( ValueError, "qNoisyExpectedHypervolumeImprovement supports m>=2 outcomes ", ): acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, cache_root=False, ) continue acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, cache_root=False, ) # set the MockPosterior to use samples over baseline points and new # candidates acqf.model._posterior._samples = samples res = acqf(X) self.assertEqual(res.item(), 0.0) # check ref point self.assertTrue( torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs)) ) # check cached indices self.assertTrue(hasattr(acqf, "q_subset_indices")) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0]], device=self.device), ) ) # test q=2 X2 = torch.zeros(2, 1, **tkwargs) samples2 = torch.cat( [baseline_samples.unsqueeze(0), torch.zeros(1, 2, m, **tkwargs)], dim=1, ) mm2 = MockModel(MockPosterior(samples=baseline_samples)) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm2, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, cache_root=False, ) # set the MockPosterior to use samples over baseline points and new # candidates acqf.model._posterior._samples = samples2 res = acqf(X2) self.assertEqual(res.item(), 0.0) # check cached indices self.assertTrue(hasattr(acqf, "q_subset_indices")) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0], [1]], device=self.device), ) ) self.assertIn("q_choose_2", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_2"], torch.tensor([[0, 1]], device=self.device), ) ) self.assertNotIn("q_choose_3", acqf.q_subset_indices) # now back to 1 and sure all caches were cleared acqf.model = mm res = acqf(X) self.assertNotIn("q_choose_2", acqf.q_subset_indices) self.assertIn("q_choose_1", acqf.q_subset_indices) self.assertTrue( torch.equal( acqf.q_subset_indices["q_choose_1"], torch.tensor([[0]], device=self.device), ) ) # test error is raised if X_baseline is batched sampler = IIDNormalSampler(sample_shape=torch.Size([1])) with self.assertRaises(UnsupportedError): qNoisyExpectedHypervolumeImprovement( model=mm2, ref_point=ref_point, X_baseline=X_baseline.unsqueeze(0), sampler=sampler, cache_root=False, ) # test objective # set the MockPosterior to use samples over baseline points mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), cache_root=False, ) # sample_shape x n x m original_base_samples = sampler.base_samples.detach().clone() # set the MockPosterior to use samples over baseline points and new # candidates mm._posterior._samples = samples res = acqf(X) self.assertEqual(res.item(), 0.0) # test that original base samples were retained self.assertTrue( torch.equal( # sample_shape x batch_shape x n x m sampler.base_samples[0, 0, : original_base_samples.shape[1], :], original_base_samples[0], ) ) # test that base_samples for X_baseline are fixed # set the MockPosterior to use samples over baseline points mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, cache_root=False, ) orig_base_sampler = deepcopy(acqf.base_sampler) # set the MockPosterior to use samples over baseline points and new # candidates mm._posterior._samples = samples with torch.no_grad(): acqf(X) self.assertTrue( torch.equal( orig_base_sampler.base_samples, acqf.base_sampler.base_samples ) ) self.assertTrue( torch.allclose( acqf.base_sampler.base_samples, acqf.sampler.base_samples[..., : X_baseline.shape[0], :], ) ) mm._posterior._samples = baseline_samples # test empty pareto set ref_point2 = [15.0, 14.0, 16.0][:m] sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point2, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), cache_root=False, ) self.assertTrue((acqf.cell_lower_bounds[..., 0] == 15).all()) self.assertTrue((acqf.cell_lower_bounds[..., 1] == 14).all()) if m == 3: self.assertTrue((acqf.cell_lower_bounds[..., 2] == 16).all()) self.assertTrue(torch.isinf(acqf.cell_upper_bounds).all()) for b in (acqf.cell_lower_bounds, acqf.cell_upper_bounds): self.assertEqual(list(b.shape), [1, 1, m]) self.assertEqual(list(b.shape), [1, 1, m]) # test no baseline points ref_point2 = [15.0, 14.0, 16.0][:m] sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point2, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), prune_baseline=True, cache_root=False, ) self.assertTrue((acqf.cell_lower_bounds[..., 0] == 15).all()) self.assertTrue((acqf.cell_lower_bounds[..., 1] == 14).all()) if m == 3: self.assertTrue((acqf.cell_lower_bounds[..., 2] == 16).all()) self.assertTrue(torch.isinf(acqf.cell_upper_bounds).all()) for b in (acqf.cell_lower_bounds, acqf.cell_upper_bounds): self.assertEqual(list(b.shape), [1, 1, m]) self.assertEqual(list(b.shape), [1, 1, m]) # test X_pending with CBD for incremental_nehvi in (False, True): mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), incremental_nehvi=incremental_nehvi, cache_root=False, ) original_base_samples = sampler.base_samples.detach().clone() # the box decomposition algorithm is faster on the CPU for m>2, # so NEHVI runs it on the CPU expected_pareto_Y = pareto_Y if m == 2 else pareto_Y.cpu() self.assertTrue( torch.equal(acqf.partitioning.pareto_Y[0], expected_pareto_Y) ) self.assertIsNone(acqf.X_pending) new_Y = torch.tensor( [[0.5, 3.0, 0.5][:m]], dtype=dtype, device=self.device ) mm._posterior._samples = torch.cat( [ baseline_samples, new_Y, ] ).unsqueeze(0) bd = DominatedPartitioning( ref_point=torch.tensor(ref_point).to(**tkwargs), Y=pareto_Y ) initial_hv = bd.compute_hypervolume() # test _initial_hvs if not incremental_nehvi: self.assertTrue(hasattr(acqf, "_initial_hvs")) self.assertTrue(torch.equal(acqf._initial_hvs, initial_hv.view(-1))) # test forward X_test = torch.rand(1, 1, dtype=dtype, device=self.device) with torch.no_grad(): val = acqf(X_test) bd.update(mm._posterior._samples[0, -1:]) expected_val = bd.compute_hypervolume() - initial_hv self.assertTrue(torch.equal(val, expected_val.view(-1))) # test that original base_samples were retained self.assertTrue( torch.equal( # sample_shape x batch_shape x n x m sampler.base_samples[0, 0, : original_base_samples.shape[1], :], original_base_samples[0], ) ) # test X_pending mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), incremental_nehvi=incremental_nehvi, cache_root=False, ) # sample_shape x n x m original_base_samples = sampler.base_samples.detach().clone() mm._posterior._samples = torch.cat( [ baseline_samples, new_Y, ], dim=0, ) X_pending = torch.rand(1, 1, dtype=dtype, device=self.device) acqf.set_X_pending(X_pending) if not incremental_nehvi: self.assertTrue(torch.equal(expected_val, acqf._prev_nehvi)) self.assertIsNone(acqf.X_pending) # check that X_baseline has been updated self.assertTrue(torch.equal(acqf.X_baseline[:-1], acqf._X_baseline)) self.assertTrue(torch.equal(acqf.X_baseline[-1:], X_pending)) # check that partitioning has been updated acqf_pareto_Y = acqf.partitioning.pareto_Y[0] # the box decomposition algorithm is faster on the CPU for m>2, # so NEHVI runs it on the CPU self.assertTrue(torch.equal(acqf_pareto_Y[:-1], expected_pareto_Y)) expected_new_Y = new_Y if m == 2 else new_Y.cpu() self.assertTrue(torch.equal(acqf_pareto_Y[-1:], expected_new_Y)) # test that base samples were retained self.assertTrue( torch.equal( # sample_shape x n x m sampler.base_samples[0, : original_base_samples.shape[1], :], original_base_samples[0], ) ) self.assertTrue( torch.equal( acqf.sampler.base_samples, acqf.base_sampler.base_samples, ) ) # test incremental nehvi in forward new_Y2 = torch.cat( [ new_Y, torch.tensor( [[0.25, 9.5, 1.5][:m]], dtype=dtype, device=self.device ), ], dim=0, ) mm._posterior._samples = torch.cat( [ baseline_samples, new_Y2, ] ).unsqueeze(0) X_test = torch.rand(1, 1, dtype=dtype, device=self.device) with torch.no_grad(): val = acqf(X_test) if incremental_nehvi: # set initial hv to include X_pending initial_hv = bd.compute_hypervolume() bd.update(mm._posterior._samples[0, -1:]) expected_val = bd.compute_hypervolume() - initial_hv self.assertTrue(torch.equal(val, expected_val.view(-1))) # add another point X_pending2 = torch.cat( [X_pending, torch.rand(1, 1, dtype=dtype, device=self.device)], dim=0 ) mm._posterior._samples = mm._posterior._samples.squeeze(0) acqf.set_X_pending(X_pending2) self.assertIsNone(acqf.X_pending) # check that X_baseline has been updated self.assertTrue(torch.equal(acqf.X_baseline[:-2], acqf._X_baseline)) self.assertTrue(torch.equal(acqf.X_baseline[-2:], X_pending2)) # check that partitioning has been updated acqf_pareto_Y = acqf.partitioning.pareto_Y[0] self.assertTrue(torch.equal(acqf_pareto_Y[:-2], expected_pareto_Y)) expected_new_Y2 = new_Y2 if m == 2 else new_Y2.cpu() self.assertTrue(torch.equal(acqf_pareto_Y[-2:], expected_new_Y2)) # test set X_pending with grad # Get posterior samples to agree with X_pending mm._posterior._samples = torch.zeros(1, 7, m, **tkwargs) with warnings.catch_warnings(record=True) as ws, settings.debug(True): acqf.set_X_pending( torch.cat([X_pending2, X_pending2], dim=0).requires_grad_(True) ) self.assertIsNone(acqf.X_pending) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) # test max iep mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), incremental_nehvi=False, max_iep=1, cache_root=False, ) mm._posterior._samples = torch.cat( [ baseline_samples, new_Y, ] ) acqf.set_X_pending(X_pending) self.assertTrue(torch.equal(acqf.X_pending, X_pending)) acqf_pareto_Y = acqf.partitioning.pareto_Y[0] self.assertTrue(torch.equal(acqf_pareto_Y, expected_pareto_Y)) mm._posterior._samples = torch.cat( [ baseline_samples, new_Y2, ] ) # check that after second pending point is added, X_pending is set to None # and the pending points are included in the box decompositions acqf.set_X_pending(X_pending2) self.assertIsNone(acqf.X_pending) acqf_pareto_Y = acqf.partitioning.pareto_Y[0] self.assertTrue(torch.equal(acqf_pareto_Y[:-2], expected_pareto_Y)) self.assertTrue(torch.equal(acqf_pareto_Y[-2:], expected_new_Y2)) # test qNEHVI without CBD mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), cache_pending=False, cache_root=False, ) mm._posterior._samples = torch.cat( [ baseline_samples, new_Y, ] ).unsqueeze(0) X_pending10 = X_pending.expand(10, 1) acqf.set_X_pending(X_pending10) self.assertTrue(torch.equal(acqf.X_pending, X_pending10)) acqf_pareto_Y = acqf.partitioning.pareto_Y[0] self.assertTrue(torch.equal(acqf_pareto_Y, expected_pareto_Y)) acqf.set_X_pending(X_pending) mm._posterior._samples = torch.cat( [ baseline_samples, new_Y2, ] ).unsqueeze(0) with torch.no_grad(): val = acqf(X_test) bd = DominatedPartitioning( ref_point=torch.tensor(ref_point).to(**tkwargs), Y=pareto_Y ) initial_hv = bd.compute_hypervolume() bd.update(mm._posterior._samples.squeeze(0)) expected_val = bd.compute_hypervolume() - initial_hv self.assertTrue(torch.equal(expected_val.view(1), val)) # test alpha > 0 mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), cache_pending=False, alpha=1e-3, cache_root=False, ) if len(ref_point) == 2: partitioning = acqf.partitioning else: partitioning = acqf.partitioning.box_decompositions[0] self.assertIsInstance(partitioning, NondominatedPartitioning) self.assertEqual(partitioning.alpha, 1e-3) # test set_X_pending when X_pending = None acqf.set_X_pending(X_pending10) self.assertTrue(torch.equal(acqf.X_pending, X_pending10)) acqf.set_X_pending(None) self.assertIsNone(acqf.X_pending) # test X_pending is not None on __init__ mm._posterior._samples = torch.zeros(1, 5, m, **tkwargs) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, objective=IdentityMCMultiOutputObjective(), alpha=1e-3, X_pending=X_pending2, cache_root=False, ) self.assertTrue(torch.equal(X_baseline, acqf._X_baseline)) self.assertTrue(torch.equal(acqf.X_baseline[:-2], acqf._X_baseline)) self.assertTrue(torch.equal(acqf.X_baseline[-2:], X_pending2)) def test_constrained_q_noisy_expected_hypervolume_improvement(self): # TODO: improve tests with constraints for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} ref_point = [0.0, 0.0] pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) X_baseline = torch.zeros(pareto_Y.shape[0], 1, **tkwargs) baseline_samples = pareto_Y # test q=1 # the event shape is `b x q x m` = 1 x 1 x 2 samples = torch.cat( [ baseline_samples.unsqueeze(0), torch.tensor([[[6.5, 4.5]]], **tkwargs), ], dim=1, ) mm = MockModel(MockPosterior(samples=baseline_samples)) X = torch.zeros(1, 1, **tkwargs) # test zero slack multiple constraints, multiple etas for eta in [1e-1, 1e-2, torch.tensor([1.0, 10.0])]: # set the MockPosterior to use samples over baseline points mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, constraints=[ lambda Z: torch.zeros_like(Z[..., -1]), lambda Z: torch.zeros_like(Z[..., -1]), ], eta=eta, cache_root=False, ) # set the MockPosterior to use samples over baseline points and new # candidates mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 0.5 * 0.5 * 1.5, places=4) # test zero slack single constraint for eta in (1e-1, 1e-2): # set the MockPosterior to use samples over baseline points mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, constraints=[lambda Z: torch.zeros_like(Z[..., -1])], eta=eta, cache_root=False, ) # set the MockPosterior to use samples over baseline points and new # candidates mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 0.5 * 1.5, places=4) # set X_pending X_pending = torch.rand(1, 1, **tkwargs) acqf.set_X_pending(X_pending) samples = torch.cat( [ samples, torch.tensor([[[10.0, 0.5]]], **tkwargs), ], dim=1, ) mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 0.5 * 0.5, places=4) # test incremental nehvi=False mm._posterior._samples = baseline_samples acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, constraints=[lambda Z: torch.zeros_like(Z[..., -1])], eta=1e-3, incremental_nehvi=False, cache_root=False, ) samples = torch.cat( [ baseline_samples.unsqueeze(0), torch.tensor([[[6.5, 4.5]]], **tkwargs), ], dim=1, ) mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 0.5 * 1.5, places=4) acqf.set_X_pending(X_pending) samples = torch.cat( [ samples, torch.tensor([[[10.0, 0.5]]], **tkwargs), ], dim=1, ) mm._posterior._samples = samples res = acqf(X) # test that HVI is not incremental # Note that the cached pending point uses strict constraint evaluation # so the HVI from the cached pending point is 1.5. # The new X contributes an HVI of 0.5, but with a constraint slack of 0, # the sigmoid soft-evaluation yields a constrained HVI of 0.25 self.assertAlmostEqual(res.item(), 1.75, places=4) # test feasible # set the MockPosterior to use samples over baseline points mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])], eta=1e-3, cache_root=False, ) samples = torch.cat( [ baseline_samples.unsqueeze(0), torch.tensor([[[6.5, 4.5]]], **tkwargs), ], dim=1, ) mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 1.5, places=4) # test multiple constraints one eta with # this crashes for large etas, and I do not why # set the MockPosterior to use samples over baseline points etas = [torch.tensor([1.0]), torch.tensor([1.0, 10.0])] constraints = [ [lambda Z: torch.ones_like(Z[..., -1])], [ lambda Z: torch.ones_like(Z[..., -1]), lambda Z: torch.ones_like(Z[..., -1]), ], ] expected_values = [ (torch.sigmoid(torch.as_tensor(-1.0 / 1)) * 1.5).item(), ( torch.sigmoid(torch.as_tensor(-1.0 / 1)) * torch.sigmoid(torch.as_tensor(-1.0 / 10)) * 1.5 ).item(), ] for eta, constraint, expected_value in zip( etas, constraints, expected_values ): acqf.constraints = constraint acqf.eta = eta res = acqf(X) self.assertAlmostEqual( res.item(), expected_value, places=4, ) # test infeasible # set the MockPosterior to use samples over baseline points mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, constraints=[lambda Z: 100.0 * torch.ones_like(Z[..., -1])], eta=1e-3, cache_root=False, ) # set the MockPosterior to use samples over baseline points and new # candidates mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 0.0, places=4) # test >2 objectives ref_point = [0.0, 0.0, 0.0] baseline_samples = torch.tensor( [ [4.0, 5.0, 1.0], [5.0, 5.0, 1.0], [8.5, 3.5, 1.0], [8.5, 3.0, 1.0], [9.0, 1.0, 1.0], ], **tkwargs, ) mm._posterior._samples = baseline_samples sampler = IIDNormalSampler(sample_shape=torch.Size([1])) acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])], eta=1e-3, cache_root=False, ) # set the MockPosterior to use samples over baseline points and new # candidates samples = torch.cat( [ baseline_samples.unsqueeze(0), torch.tensor([[[6.5, 4.5, 1.0]]], **tkwargs), ], dim=1, ) mm._posterior._samples = samples res = acqf(X) self.assertAlmostEqual(res.item(), 1.5, places=4) def test_prune_baseline(self): # test prune_baseline no = "botorch.utils.testing.MockModel.num_outputs" prune = ( "botorch.acquisition.multi_objective.monte_carlo." "prune_inferior_points_multi_objective" ) for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} ref_point = [0.0, 0.0] pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) X_baseline = torch.zeros(pareto_Y.shape[0], 1, **tkwargs) baseline_samples = pareto_Y X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype) sampler = IIDNormalSampler(sample_shape=torch.Size([1])) with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 2 # Reduce samples to same shape as X_pruned. mm = MockModel(MockPosterior(samples=baseline_samples[:1])) with mock.patch(prune, return_value=X_pruned) as mock_prune: acqf = qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, prune_baseline=True, cache_root=False, ) mock_prune.assert_called_once() self.assertTrue(torch.equal(acqf.X_baseline, X_pruned)) def test_cache_root(self): sample_cached_path = ( "botorch.acquisition.cached_cholesky.sample_cached_cholesky" ) state_dict = { "likelihood.noise_covar.raw_noise": torch.tensor( [[0.0895], [0.2594]], dtype=torch.float64 ), "mean_module.raw_constant": torch.tensor( [-0.4545, -0.1285], dtype=torch.float64 ), "covar_module.raw_outputscale": torch.tensor( [1.4876, 1.4897], dtype=torch.float64 ), "covar_module.base_kernel.raw_lengthscale": torch.tensor( [[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64 ), } # test batched models (e.g. for MCMC) for train_batch_shape in (torch.Size([]), torch.Size([3])): if len(train_batch_shape) > 0: for k, v in state_dict.items(): state_dict[k] = v.unsqueeze(0).expand(*train_batch_shape, *v.shape) for dtype, ref_point in product( (torch.float, torch.double), ([-5.0, -5.0], [10.0, 10.0]), ): tkwargs = {"device": self.device, "dtype": dtype} for k, v in state_dict.items(): state_dict[k] = v.to(**tkwargs) all_close_kwargs = ( {"atol": 1e-1, "rtol": 1e-2} if dtype == torch.float else {"atol": 1e-4, "rtol": 1e-6} ) torch.manual_seed(1234) train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs) train_Y = torch.sin(train_X * 2 * pi) + torch.randn( *train_batch_shape, 3, 2, **tkwargs ) train_Y = standardize(train_Y) model = SingleTaskGP(train_X, train_Y) if len(train_batch_shape) > 0: X_baseline = train_X[0] else: X_baseline = train_X model.load_state_dict(state_dict, strict=False) sampler = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0) torch.manual_seed(0) acqf = qNoisyExpectedHypervolumeImprovement( model=model, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler, prune_baseline=False, cache_root=True, ) sampler2 = IIDNormalSampler(sample_shape=torch.Size([5]), seed=0) torch.manual_seed(0) acqf_no_cache = qNoisyExpectedHypervolumeImprovement( model=model, ref_point=ref_point, X_baseline=X_baseline, sampler=sampler2, prune_baseline=False, cache_root=False, ) # load CBD acqf_no_cache.cell_lower_bounds = acqf.cell_lower_bounds.clone() acqf_no_cache.cell_upper_bounds = acqf.cell_upper_bounds.clone() for q, batch_shape in product( (1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3])) ): torch.manual_seed(0) acqf.q_in = -1 test_X = ( 0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs) ).requires_grad_(True) with mock.patch( sample_cached_path, wraps=sample_cached_cholesky ) as mock_sample_cached: torch.manual_seed(0) val = acqf(test_X) mock_sample_cached.assert_called_once() val.sum().backward() base_samples = acqf.sampler.base_samples.detach().clone() X_grad = test_X.grad.clone() test_X2 = test_X.detach().clone().requires_grad_(True) acqf_no_cache.sampler.base_samples = base_samples with mock.patch( sample_cached_path, wraps=sample_cached_cholesky ) as mock_sample_cached: torch.manual_seed(0) val2 = acqf_no_cache(test_X2) mock_sample_cached.assert_not_called() self.assertAllClose(val, val2, **all_close_kwargs) val2.sum().backward() if dtype == torch.double: # The gradient computation is very unstable in single precision # so we only check the gradient when using torch.double. self.assertTrue( torch.allclose(X_grad, test_X2.grad, **all_close_kwargs) ) if ref_point == [-5.0, -5.0]: self.assertTrue((X_grad != 0).any()) # test we fall back to standard sampling for # ill-conditioned covariances acqf._baseline_L = torch.zeros_like(acqf._baseline_L) with warnings.catch_warnings(record=True) as ws, settings.debug(True): with torch.no_grad(): acqf(test_X) self.assertEqual( sum(issubclass(w.category, BotorchWarning) for w in ws), 1 ) def test_cache_root_w_standardize(self): # Test caching with standardize transform. train_x = torch.rand(3, 2, dtype=torch.float64) train_y = torch.randn(3, 2, dtype=torch.float64) model = SingleTaskGP(train_x, train_y, outcome_transform=Standardize(m=2)) acqf = qNoisyExpectedHypervolumeImprovement( model=model, X_baseline=train_x, ref_point=torch.ones(2), sampler=IIDNormalSampler(sample_shape=torch.Size([1])), cache_root=True, ) self.assertIsNotNone(acqf._baseline_L) self.assertEqual(acqf(train_x[:1]).shape, torch.Size([1])) self.assertEqual(acqf(train_x.unsqueeze(-2)).shape, torch.Size([3])) def test_with_set_valued_objectives(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} tx = torch.rand(5, 2, **tkwargs) ty = torch.randn(5, 2, **tkwargs) perturbation = InputPerturbation( perturbation_set=torch.randn(3, 2, **tkwargs) ).eval() baseline_samples = perturbation(ty) class DummyObjective(MultiOutputRiskMeasureMCObjective): r"""A dummy set valued objective.""" _verify_output_shape = False def forward(self, samples, X=None): samples = self._prepare_samples(samples) return samples[..., :2, :].reshape( *samples.shape[:-3], -1, samples.shape[-1] ) model = MockModel(MockPosterior(samples=baseline_samples)) acqf = qNoisyExpectedHypervolumeImprovement( model=model, ref_point=torch.tensor([0.0, 0.0], **tkwargs), X_baseline=tx, sampler=SobolQMCNormalSampler(sample_shape=torch.Size([2])), objective=DummyObjective(n_w=3), prune_baseline=False, cache_root=False, ) test_x = torch.rand(3, 2, 2, **tkwargs) samples = torch.cat( [baseline_samples.expand(3, -1, -1), torch.zeros(3, 6, 2, **tkwargs)], dim=1, ) acqf.model._posterior._samples = samples res = acqf(test_x) self.assertTrue(torch.equal(res, torch.zeros(3, **tkwargs))) self.assertEqual(acqf.q_in, 6) self.assertEqual(acqf.q_out, 4) self.assertEqual(len(acqf.q_subset_indices.keys()), 4) def test_deterministic(self): for dtype, prune in ((torch.float, False), (torch.double, True)): tkwargs = {"device": self.device, "dtype": dtype} model = GenericDeterministicModel(f=lambda x: x, num_outputs=2) with self.assertWarnsRegex( RuntimeWarning, _get_cache_root_not_supported_message(GenericDeterministicModel), ): acqf = qNoisyExpectedHypervolumeImprovement( model=model, ref_point=torch.tensor([0.0, 0.0], **tkwargs), X_baseline=torch.rand(5, 2, **tkwargs), prune_baseline=prune, cache_root=True, ) self.assertFalse(acqf._cache_root) self.assertEqual( acqf(torch.rand(3, 2, 2, **tkwargs)).shape, torch.Size([3]) ) def test_with_multitask(self): # Verify that _set_sampler works with MTGP, KroneckerMTGP and HOGP. torch.manual_seed(1234) tkwargs = {"device": self.device, "dtype": torch.double} train_x = torch.rand(6, 2, **tkwargs) train_y = torch.randn(6, 2, **tkwargs) mtgp_task = torch.cat( [torch.zeros(6, 1, **tkwargs), torch.ones(6, 1, **tkwargs)], dim=0 ) mtgp_x = torch.cat([train_x.repeat(2, 1), mtgp_task], dim=-1) mtgp = MultiTaskGP(mtgp_x, train_y.view(-1, 1), task_feature=2).eval() kmtgp = KroneckerMultiTaskGP(train_x, train_y).eval() hogp = HigherOrderGP(train_x, train_y.repeat(6, 1, 1)).eval() hogp_obj = GenericMCMultiOutputObjective(lambda Y, X: Y.mean(dim=-2)) test_x = torch.rand(2, 3, 2, **tkwargs) def get_acqf(model): return qNoisyExpectedHypervolumeImprovement( model=model, ref_point=torch.tensor([0.0, 0.0], **tkwargs), X_baseline=train_x, sampler=IIDNormalSampler(sample_shape=torch.Size([2])), objective=hogp_obj if isinstance(model, HigherOrderGP) else None, prune_baseline=True, cache_root=False, ) for model in [mtgp, kmtgp, hogp]: acqf = get_acqf(model) posterior = model.posterior(acqf.X_baseline) base_evals = acqf.base_sampler(posterior) base_samples = acqf.base_sampler.base_samples with mock.patch.object( qNoisyExpectedHypervolumeImprovement, "_compute_qehvi", wraps=acqf._compute_qehvi, ) as wrapped_compute: acqf(test_x) wrapped_compute.assert_called_once() expected_shape = ( torch.Size([2, 2, 3, 6, 2]) if isinstance(model, HigherOrderGP) else torch.Size([2, 2, 3, 2]) ) self.assertEqual( wrapped_compute.call_args[-1]["samples"].shape, expected_shape ) new_base_samples = acqf.sampler.base_samples # Check that the base samples are the same. if model is mtgp: expected = new_base_samples[..., :-3, :].squeeze(-3) else: n_train = base_samples.shape[-1] // 2 expected = torch.cat( [new_base_samples[..., :n_train], new_base_samples[..., -n_train:]], dim=-1, ).squeeze(-2) self.assertTrue(torch.equal(base_samples, expected)) # Check that they produce the same f_X for baseline points. X_full = torch.cat( [match_batch_shape(acqf.X_baseline, test_x), test_x], dim=-2 ) posterior = acqf.model.posterior(X_full) samples = acqf.sampler(posterior) expected = samples[:, :, :-3] repeat_shape = [1, 2, 1, 1] if model is hogp: repeat_shape.append(1) self.assertTrue( torch.allclose( base_evals.unsqueeze(1).repeat(*repeat_shape), expected, atol=1e-2, rtol=1e-4, ) ) def test_with_transformed(self): # Verify that _set_sampler works with transformed posteriors. mm = MockModel( posterior=PosteriorList( TransformedPosterior( MockPosterior(samples=torch.rand(2, 3, 1)), lambda X: X ), TransformedPosterior( MockPosterior(samples=torch.rand(2, 3, 1)), lambda X: X ), ) ) sampler = ListSampler( IIDNormalSampler(sample_shape=torch.Size([2])), IIDNormalSampler(sample_shape=torch.Size([2])), ) # This calls _set_sampler which used to error out in # NormalMCSampler._update_base_samples with TransformedPosterior # due to the missing batch_shape (fixed in #1625). qNoisyExpectedHypervolumeImprovement( model=mm, ref_point=torch.tensor([0.0, 0.0]), X_baseline=torch.rand(3, 2), sampler=sampler, cache_root=False, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import product from unittest import mock import torch from botorch.acquisition.max_value_entropy_search import qMaxValueEntropy from botorch.acquisition.multi_objective.max_value_entropy_search import ( qLowerBoundMultiObjectiveMaxValueEntropySearch, qMultiObjectiveMaxValueEntropy, ) from botorch.acquisition.multi_objective.utils import compute_sample_box_decomposition from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.outcome import Standardize from botorch.sampling.normal import SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase def get_model(train_X, train_Y, use_model_list, standardize_model): num_objectives = train_Y.shape[-1] if standardize_model: if use_model_list: outcome_transform = Standardize(m=1) else: outcome_transform = Standardize(m=num_objectives) else: outcome_transform = None if use_model_list: model = ModelListGP( *[ SingleTaskGP( train_X=train_X, train_Y=train_Y[:, i : i + 1], outcome_transform=outcome_transform, ) for i in range(num_objectives) ] ) else: model = SingleTaskGP( train_X=train_X, train_Y=train_Y, outcome_transform=outcome_transform, ) return model def dummy_sample_pareto_frontiers(model): m = model.models[0] if isinstance(model, ModelListGP) else model return torch.rand( 3, 4, model.num_outputs, dtype=m.train_inputs[0].dtype, device=m.train_inputs[0].device, ) class TestMultiObjectiveMaxValueEntropy(BotorchTestCase): def test_multi_objective_max_value_entropy(self): for dtype, m in product((torch.float, torch.double), (2, 3)): torch.manual_seed(7) # test batched model train_X = torch.rand(1, 1, 2, dtype=dtype, device=self.device) train_Y = torch.rand(1, 1, m, dtype=dtype, device=self.device) model = SingleTaskGP(train_X, train_Y) with self.assertRaises(NotImplementedError): qMultiObjectiveMaxValueEntropy(model, dummy_sample_pareto_frontiers) # test initialization train_X = torch.rand(4, 2, dtype=dtype, device=self.device) train_Y = torch.rand(4, m, dtype=dtype, device=self.device) # test batched MO model model = SingleTaskGP(train_X, train_Y) mesmo = qMultiObjectiveMaxValueEntropy(model, dummy_sample_pareto_frontiers) self.assertEqual(mesmo.num_fantasies, 16) # Initialize the sampler. dummy_post = model.posterior(train_X[:1]) mesmo.get_posterior_samples(dummy_post) self.assertIsInstance(mesmo.sampler, SobolQMCNormalSampler) self.assertEqual(mesmo.sampler.sample_shape, torch.Size([128])) self.assertIsInstance(mesmo.fantasies_sampler, SobolQMCNormalSampler) self.assertEqual(mesmo.posterior_max_values.shape, torch.Size([3, 1, m])) # test conversion to single-output model self.assertIs(mesmo.mo_model, model) self.assertEqual(mesmo.mo_model.num_outputs, m) self.assertIsInstance(mesmo.model, SingleTaskGP) self.assertEqual(mesmo.model.num_outputs, 1) self.assertEqual( mesmo.model._aug_batch_shape, mesmo.model._input_batch_shape ) # test ModelListGP model = ModelListGP( *[SingleTaskGP(train_X, train_Y[:, i : i + 1]) for i in range(m)] ) mock_sample_pfs = mock.Mock() mock_sample_pfs.return_value = dummy_sample_pareto_frontiers(model=model) mesmo = qMultiObjectiveMaxValueEntropy(model, mock_sample_pfs) self.assertEqual(mesmo.num_fantasies, 16) # Initialize the sampler. dummy_post = model.posterior(train_X[:1]) mesmo.get_posterior_samples(dummy_post) self.assertIsInstance(mesmo.sampler, SobolQMCNormalSampler) self.assertEqual(mesmo.sampler.sample_shape, torch.Size([128])) self.assertIsInstance(mesmo.fantasies_sampler, SobolQMCNormalSampler) self.assertEqual(mesmo.posterior_max_values.shape, torch.Size([3, 1, m])) # test conversion to batched MO model self.assertIsInstance(mesmo.mo_model, SingleTaskGP) self.assertEqual(mesmo.mo_model.num_outputs, m) self.assertIs(mesmo.mo_model, mesmo._init_model) # test conversion to single-output model self.assertIsInstance(mesmo.model, SingleTaskGP) self.assertEqual(mesmo.model.num_outputs, 1) self.assertEqual( mesmo.model._aug_batch_shape, mesmo.model._input_batch_shape ) # test that we call sample_pareto_frontiers with the multi-output model mock_sample_pfs.assert_called_once_with(mesmo.mo_model) # test basic evaluation X = torch.rand(1, 2, device=self.device, dtype=dtype) with torch.no_grad(): vals = mesmo(X) igs = qMaxValueEntropy.forward(mesmo, X=X.view(1, 1, 1, 2)) self.assertEqual(vals.shape, torch.Size([1])) self.assertTrue(torch.equal(vals, igs.sum(dim=-1))) # test batched evaluation X = torch.rand(4, 1, 2, device=self.device, dtype=dtype) with torch.no_grad(): vals = mesmo(X) igs = qMaxValueEntropy.forward(mesmo, X=X.view(4, 1, 1, 2)) self.assertEqual(vals.shape, torch.Size([4])) self.assertTrue(torch.equal(vals, igs.sum(dim=-1))) # test set X pending to None mesmo.set_X_pending(None) self.assertIs(mesmo.mo_model, mesmo._init_model) fant_X = torch.cat( [ train_X.expand(16, 4, 2), torch.rand(16, 1, 2, device=self.device, dtype=dtype), ], dim=1, ) fant_Y = torch.cat( [ train_Y.expand(16, 4, m), torch.rand(16, 1, m, device=self.device, dtype=dtype), ], dim=1, ) fantasy_model = SingleTaskGP(fant_X, fant_Y) # test with X_pending is not None with mock.patch.object( SingleTaskGP, "fantasize", return_value=fantasy_model ) as mock_fantasize: qMultiObjectiveMaxValueEntropy( model, dummy_sample_pareto_frontiers, X_pending=torch.rand(1, 2, device=self.device, dtype=dtype), ) mock_fantasize.assert_called_once() class TestQLowerBoundMultiObjectiveMaxValueEntropySearch(BotorchTestCase): def _base_test_lb_moo_max_value_entropy_search(self, estimation_type): torch.manual_seed(1) tkwargs = {"device": self.device} for (dtype, num_objectives, use_model_list, standardize_model) in product( (torch.float, torch.double), (1, 2, 3), (False, True), (False, True), ): tkwargs["dtype"] = dtype input_dim = 2 train_X = torch.rand(4, input_dim, **tkwargs) train_Y = torch.rand(4, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) pareto_fronts = dummy_sample_pareto_frontiers(model) hypercell_bounds = compute_sample_box_decomposition(pareto_fronts) # test acquisition X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)] for X_pending in X_pending_list: acq = qLowerBoundMultiObjectiveMaxValueEntropySearch( model=model, hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, X_pending=X_pending, ) self.assertIsInstance(acq.sampler, SobolQMCNormalSampler) test_Xs = [ torch.rand(4, 1, input_dim, **tkwargs), torch.rand(4, 3, input_dim, **tkwargs), torch.rand(4, 5, 1, input_dim, **tkwargs), torch.rand(4, 5, 3, input_dim, **tkwargs), ] for test_X in test_Xs: acq_X = acq(test_X) # assess shape self.assertTrue(acq_X.shape == test_X.shape[:-2]) def test_lb_moo_max_value_entropy_search_0(self): self._base_test_lb_moo_max_value_entropy_search(estimation_type="0") def test_lb_moo_max_value_entropy_search_LB(self): self._base_test_lb_moo_max_value_entropy_search(estimation_type="LB") def test_lb_moo_max_value_entropy_search_LB2(self): self._base_test_lb_moo_max_value_entropy_search(estimation_type="LB2") def test_lb_moo_max_value_entropy_search_MC(self): self._base_test_lb_moo_max_value_entropy_search(estimation_type="MC")
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from typing import Optional import torch from botorch import settings from botorch.acquisition.multi_objective.multi_output_risk_measures import ( IndependentCVaR, IndependentVaR, MARS, MultiOutputExpectation, MultiOutputRiskMeasureMCObjective, MultiOutputWorstCase, MVaR, ) from botorch.acquisition.multi_objective.objective import ( IdentityMCMultiOutputObjective, WeightedMCMultiOutputObjective, ) from botorch.exceptions.errors import UnsupportedError from botorch.exceptions.warnings import BotorchWarning from botorch.models.deterministic import GenericDeterministicModel from botorch.models.transforms.input import InputPerturbation from botorch.utils.multi_objective.pareto import is_non_dominated from botorch.utils.testing import BotorchTestCase from torch import Tensor class NotSoAbstractMORiskMeasure(MultiOutputRiskMeasureMCObjective): def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor: prepared_samples = self._prepare_samples(samples) return prepared_samples.sum(dim=-2) class TestMultiOutputRiskMeasureMCObjective(BotorchTestCase): def test_multi_output_risk_measure_mc_objective(self): # abstract raises with self.assertRaises(TypeError): MultiOutputRiskMeasureMCObjective(n_w=3) for dtype in (torch.float, torch.double): samples = torch.tensor( [ [ [1.0, 1.2], [0.5, 0.7], [2.0, 2.2], [3.0, 3.4], [1.0, 1.2], [5.0, 5.6], ] ], device=self.device, dtype=dtype, ) obj = NotSoAbstractMORiskMeasure(n_w=3) # test _prepare_samples expected_samples = samples.view(1, 2, 3, 2) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, expected_samples)) # test batches samples = torch.rand(5, 3, 6, 3, device=self.device, dtype=dtype) expected_samples = samples.view(5, 3, 2, 3, 3) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, expected_samples)) # negating with preprocessing function obj = NotSoAbstractMORiskMeasure( n_w=3, preprocessing_function=WeightedMCMultiOutputObjective( weights=torch.tensor( [-1.0, -1.0, -1.0], device=self.device, dtype=dtype ) ), ) prepared_samples = obj._prepare_samples(samples) self.assertTrue(torch.equal(prepared_samples, -expected_samples)) class TestMultiOutputExpectation(BotorchTestCase): def test_mo_expectation(self): obj = MultiOutputExpectation(n_w=3) for dtype in (torch.float, torch.double): obj = MultiOutputExpectation(n_w=3) samples = torch.tensor( [ [ [1.0, 1.2], [0.5, 0.5], [1.5, 2.2], [3.0, 1.2], [1.0, 7.1], [5.0, 5.8], ] ], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.allclose( rm_samples, torch.tensor( [[[1.0, 1.3], [3.0, 4.7]]], device=self.device, dtype=dtype ), ) ) # w/ first output negated obj.preprocessing_function = WeightedMCMultiOutputObjective( torch.tensor([-1.0, 1.0], device=self.device, dtype=dtype) ) rm_samples = obj(samples) self.assertTrue( torch.allclose( rm_samples, torch.tensor( [[[-1.0, 1.3], [-3.0, 4.7]]], device=self.device, dtype=dtype ), ) ) class TestIndependentCVaR(BotorchTestCase): def test_independent_cvar(self): obj = IndependentCVaR(alpha=0.5, n_w=3) self.assertEqual(obj.alpha_idx, 1) with self.assertRaises(ValueError): IndependentCVaR(alpha=3, n_w=3) for dtype in (torch.float, torch.double): obj = IndependentCVaR(alpha=0.5, n_w=3) samples = torch.tensor( [ [ [1.0, 1.2], [0.5, 0.7], [2.0, 2.2], [3.0, 1.2], [1.0, 7.2], [5.0, 5.8], ] ], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.allclose( rm_samples, torch.tensor( [[[0.75, 0.95], [2.0, 3.5]]], device=self.device, dtype=dtype ), ) ) # w/ first output negated obj.preprocessing_function = WeightedMCMultiOutputObjective( torch.tensor([-1.0, 1.0], device=self.device, dtype=dtype) ) rm_samples = obj(samples) self.assertTrue( torch.allclose( rm_samples, torch.tensor( [[[-1.5, 0.95], [-4.0, 3.5]]], device=self.device, dtype=dtype ), ) ) class TestIndependentVaR(BotorchTestCase): def test_independent_var(self): for dtype in (torch.float, torch.double): obj = IndependentVaR(alpha=0.5, n_w=3) samples = torch.tensor( [ [ [1.0, 3.2], [0.5, 0.7], [2.0, 2.2], [3.0, 1.2], [1.0, 7.2], [5.0, 5.8], ] ], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor( [[[1.0, 2.2], [3.0, 5.8]]], device=self.device, dtype=dtype ), ) ) # w/ weights obj.preprocessing_function = WeightedMCMultiOutputObjective( torch.tensor([0.5, -1.0], device=self.device, dtype=dtype) ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor( [[[0.5, -2.2], [1.5, -5.8]]], device=self.device, dtype=dtype ), ) ) class TestMultiOutputWorstCase(BotorchTestCase): def test_multi_output_worst_case(self): for dtype in (torch.float, torch.double): obj = MultiOutputWorstCase(n_w=3) samples = torch.tensor( [ [ [1.0, 3.2], [5.5, 0.7], [2.0, 2.2], [3.0, 1.2], [5.0, 7.2], [5.0, 5.8], ] ], device=self.device, dtype=dtype, ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor( [[[1.0, 0.7], [3.0, 1.2]]], device=self.device, dtype=dtype ), ) ) # w/ weights obj.preprocessing_function = WeightedMCMultiOutputObjective( torch.tensor([-1.0, 2.0], device=self.device, dtype=dtype) ) rm_samples = obj(samples) self.assertTrue( torch.equal( rm_samples, torch.tensor( [[[-5.5, 1.4], [-5.0, 2.4]]], device=self.device, dtype=dtype ), ) ) class TestMVaR(BotorchTestCase): def test_mvar(self): with self.assertRaises(ValueError): MVaR(n_w=5, alpha=3.0) def set_equals(t1: Tensor, t2: Tensor) -> bool: r"""Check if two `k x m`-dim tensors are equivalent after possibly reordering the `k` dimension. Ignores duplicate entries. """ t1 = t1.unique(dim=0) t2 = t2.unique(dim=0) if t1.shape != t2.shape: return False equals_sum = (t1.unsqueeze(-2) == t2).all(dim=-1).sum(dim=-1) return torch.equal(equals_sum, torch.ones_like(equals_sum)) for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} mvar = MVaR(n_w=5, alpha=0.6) # a simple negatively correlated example Y = torch.stack( [torch.linspace(1, 5, 5), torch.linspace(5, 1, 5)], dim=-1, ).to(**tkwargs) expected_set = torch.stack( [torch.linspace(1, 3, 3), torch.linspace(3, 1, 3)], dim=-1, ).to(Y) # check that both versions produce the correct set cpu_mvar = mvar.get_mvar_set_cpu(Y) # For 2d input, returns k x m gpu_mvar = mvar.get_mvar_set_gpu(Y)[0] # returns a batch list of k x m self.assertTrue(set_equals(cpu_mvar, gpu_mvar)) self.assertTrue(set_equals(cpu_mvar, expected_set)) # check that the `filter_dominated` works correctly mvar = MVaR( n_w=5, alpha=0.4, filter_dominated=False, ) # negating the input to treat large values as undesirable Y = -torch.tensor( [ [1, 4], [2, 3], [3, 2], [4, 1], [3.5, 3.5], ], **tkwargs, ) cpu_mvar = mvar.get_mvar_set_cpu(Y) gpu_mvar = mvar.get_mvar_set_gpu(Y)[0] self.assertTrue(set_equals(cpu_mvar, gpu_mvar)) # negating here as well expected_w_dominated = -torch.tensor( [ [2, 4], [3, 3], [3.5, 3], [3, 3.5], [4, 2], ], **tkwargs, ) self.assertTrue(set_equals(cpu_mvar, expected_w_dominated)) expected_non_dominated = expected_w_dominated[ is_non_dominated(expected_w_dominated) ] mvar.filter_dominated = True cpu_mvar = mvar.get_mvar_set_cpu(Y) gpu_mvar = mvar.get_mvar_set_gpu(Y)[0] self.assertTrue(set_equals(cpu_mvar, gpu_mvar)) self.assertTrue(set_equals(cpu_mvar, expected_non_dominated)) # test batched w/ random input mvar = MVaR( n_w=10, alpha=0.5, filter_dominated=False, ) Y = torch.rand(4, 10, 2, **tkwargs) cpu_mvar = mvar.get_mvar_set_cpu(Y) gpu_mvar = mvar.get_mvar_set_gpu(Y) # check that the two agree self.assertTrue( all([set_equals(cpu_mvar[i], gpu_mvar[i]) for i in range(4)]) ) # check that the MVaR is dominated by `alpha` fraction (maximization). dominated_count = (Y[0].unsqueeze(-2) >= cpu_mvar[0]).all(dim=-1).sum(dim=0) expected_count = ( torch.ones(cpu_mvar[0].shape[0], device=self.device, dtype=torch.long) * 5 ) self.assertTrue(torch.equal(dominated_count, expected_count)) # test forward pass # with `expectation=True` mvar = MVaR( n_w=10, alpha=0.5, expectation=True, ) samples = torch.rand(2, 20, 2, **tkwargs) mvar_exp = mvar(samples) expected = [ mvar.get_mvar_set_cpu(Y).mean(dim=0) for Y in samples.view(4, 10, 2) ] self.assertTrue( torch.allclose(mvar_exp, torch.stack(expected).view(2, 2, 2)) ) # m > 2 samples = torch.rand(2, 20, 3, **tkwargs) mvar_exp = mvar(samples) expected = [ mvar.get_mvar_set_gpu(Y)[0].mean(dim=0) for Y in samples.view(4, 10, 3) ] self.assertTrue(torch.equal(mvar_exp, torch.stack(expected).view(2, 2, 3))) # with `expectation=False` mvar = MVaR( n_w=10, alpha=0.5, expectation=False, pad_to_n_w=True, ) samples = torch.rand(2, 20, 2, **tkwargs) mvar_vals = mvar(samples) self.assertTrue(mvar_vals.shape == samples.shape) expected = [mvar.get_mvar_set_cpu(Y) for Y in samples.view(4, 10, 2)] for i in range(4): batch_idx = i // 2 q_idx_start = 10 * (i % 2) expected_ = expected[i] # check that the actual values are there self.assertTrue( set_equals( mvar_vals[ batch_idx, q_idx_start : q_idx_start + expected_.shape[0] ], expected_, ) ) # check for correct padding self.assertTrue( torch.equal( mvar_vals[ batch_idx, q_idx_start + expected_.shape[0] : q_idx_start + 10, ], mvar_vals[ batch_idx, q_idx_start + expected_.shape[0] - 1 ].expand(10 - expected_.shape[0], -1), ) ) # Test the no-exact alpha level points case. # This happens when there are duplicates in the input. Y = torch.ones(10, 2, **tkwargs) cpu_mvar = mvar.get_mvar_set_cpu(Y) gpu_mvar = mvar.get_mvar_set_gpu(Y)[0] self.assertTrue(torch.equal(cpu_mvar, Y[:1])) self.assertTrue(torch.equal(gpu_mvar, Y[:1])) # Test grad warning with self.assertWarnsRegex(RuntimeWarning, "requires grad"): mvar(Y.requires_grad_()) # TODO: Test grad support once properly implemented. class TestMARS(BotorchTestCase): def test_init(self): # Init w/ defaults. mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], ) self.assertEqual(mars.alpha, 0.5) self.assertEqual(mars.n_w, 3) self.assertTrue(torch.equal(mars.chebyshev_weights, torch.tensor([0.5, 0.5]))) self.assertIsNone(mars.baseline_Y) self.assertIsNone(mars.ref_point) self.assertIsInstance( mars.preprocessing_function, IdentityMCMultiOutputObjective ) self.assertIsInstance(mars.mvar, MVaR) self.assertEqual(mars.mvar.alpha, 0.5) self.assertEqual(mars.mvar.n_w, 3) # Errors with Chebyshev weights. with self.assertRaisesRegex(UnsupportedError, "Negative"): MARS( alpha=0.5, n_w=3, chebyshev_weights=[-0.5, 0.5], ) with self.assertRaisesRegex(UnsupportedError, "Batched"): MARS( alpha=0.5, n_w=3, chebyshev_weights=[[0.5], [0.5]], ) # With optional arguments. baseline_Y = torch.rand(3, 2) ref_point = [3.0, 5.0] def dummy_func(Y): return Y mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], baseline_Y=baseline_Y, ref_point=ref_point, preprocessing_function=dummy_func, ) self.assertTrue(torch.equal(mars.baseline_Y, baseline_Y)) self.assertTrue(torch.equal(mars.ref_point, torch.tensor(ref_point))) self.assertIs(mars.preprocessing_function, dummy_func) def test_set_baseline_Y(self): mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], ) perturbation = InputPerturbation( perturbation_set=torch.tensor([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]) ) model = GenericDeterministicModel(f=lambda X: X, num_outputs=2) model.input_transform = perturbation X_baseline = torch.tensor([[0.0, 0.0], [1.0, 1.0]]) mars.set_baseline_Y(model=model, X_baseline=X_baseline) self.assertTrue(torch.equal(mars.baseline_Y, torch.tensor([[1.5, 1.5]]))) # With Y_samples. mars._baseline_Y = None Y_samples = model.posterior(X_baseline).mean with warnings.catch_warnings(record=True) as ws, settings.debug(True): mars.set_baseline_Y(model=model, X_baseline=X_baseline, Y_samples=Y_samples) self.assertTrue(torch.equal(mars.baseline_Y, torch.tensor([[1.5, 1.5]]))) self.assertTrue(any(w.category == BotorchWarning for w in ws)) # With pre-processing function. mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], preprocessing_function=lambda Y: -Y, ) mars.set_baseline_Y(model=model, X_baseline=X_baseline) self.assertTrue(torch.equal(mars.baseline_Y, torch.tensor([[-0.5, -0.5]]))) def test_get_Y_normalization_bounds(self): # Error if batched. with self.assertRaisesRegex(UnsupportedError, "Batched"): MARS._get_Y_normalization_bounds(Y=torch.rand(3, 5, 2)) for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} # Empty Y. bounds = MARS._get_Y_normalization_bounds(Y=torch.empty(0, 3, **tkwargs)) expected = torch.zeros(2, 3, **tkwargs) expected[1] = 1.0 self.assertAllClose(bounds, expected) # Single point in pareto_Y. bounds = MARS._get_Y_normalization_bounds(Y=torch.zeros(1, 3, **tkwargs)) self.assertAllClose(bounds, expected) # With reference point. bounds = MARS._get_Y_normalization_bounds( Y=torch.zeros(1, 3, **tkwargs), ref_point=-torch.ones(3) ) self.assertAllClose(bounds, expected - 1) # Check that dominated points are ignored. Y = torch.tensor([[0.0, 0.0], [0.5, 1.0], [1.0, 0.5]], **tkwargs) expected = expected[:, :2] expected[0] = 0.5 bounds = MARS._get_Y_normalization_bounds(Y=Y) self.assertAllClose(bounds, expected) # Multiple pareto with ref point. # Nothing better than ref. bounds = MARS._get_Y_normalization_bounds( Y=Y, ref_point=torch.ones(2) * 0.75 ) self.assertAllClose(bounds, expected) # W/ points better than ref. Y = torch.tensor( [[0.5, 1.0], [1.0, 0.5], [0.8, 0.8], [0.9, 0.7]], **tkwargs ) bounds = MARS._get_Y_normalization_bounds( Y=Y, ref_point=torch.ones(2) * 0.6 ) expected = torch.tensor([[0.6, 0.6], [0.9, 0.8]], **tkwargs) self.assertAllClose(bounds, expected) def test_chebyshev_objective(self): # Check that the objective is destroyed on setters. mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], baseline_Y=torch.empty(0, 2), ) self.assertIsNone(mars._chebyshev_objective) # Gets constructed on property access. self.assertIsNotNone(mars.chebyshev_objective) self.assertIsNotNone(mars._chebyshev_objective) # Destored on updating the weights. mars.chebyshev_weights = [0.5, 0.3] self.assertIsNone(mars._chebyshev_objective) # Destroyed on setting baseline_Y. mars.chebyshev_objective mars.baseline_Y = None self.assertIsNone(mars._chebyshev_objective) # Error if baseline_Y is not set. with self.assertRaisesRegex(RuntimeError, "baseline_Y"): MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], ).chebyshev_objective for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} # Without ref point or pre-processing. mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], baseline_Y=torch.tensor([[0.0, 0.5], [0.5, 0.0]], **tkwargs), ) obj = mars.chebyshev_objective Y = torch.ones(2, 2, **tkwargs) self.assertAllClose(obj(Y), torch.ones(2, **tkwargs)) # With pre-processing. mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], baseline_Y=torch.tensor([[0.0, 0.5], [0.5, 0.0]], **tkwargs), preprocessing_function=lambda Y: -Y, ) obj = mars.chebyshev_objective Y = -torch.ones(2, 2, **tkwargs) self.assertAllClose(obj(Y), torch.ones(2, **tkwargs)) # With ref point. mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], baseline_Y=torch.tensor([[0.0, 0.5], [0.5, 0.0]], **tkwargs), ref_point=[1.0, 1.0], ) obj = mars.chebyshev_objective Y = torch.ones(2, 2, **tkwargs) self.assertAllClose(obj(Y), torch.zeros(2, **tkwargs)) def test_end_to_end(self): for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} mars = MARS( alpha=0.5, n_w=3, chebyshev_weights=[0.5, 0.5], ref_point=[1.0, 1.0], baseline_Y=torch.randn(5, 2, **tkwargs), ) samples = torch.randn(5, 9, 2, **tkwargs) mars_vals = mars(samples) self.assertEqual(mars_vals.shape, torch.Size([5, 3])) self.assertEqual(mars_vals.dtype, dtype) self.assertEqual(mars_vals.device.type, self.device.type)
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.acquisition.multi_objective.analytic import ( ExpectedHypervolumeImprovement, MultiObjectiveAnalyticAcquisitionFunction, ) from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective from botorch.acquisition.objective import PosteriorTransform from botorch.exceptions.errors import BotorchError, UnsupportedError from botorch.posteriors import GPyTorchPosterior from botorch.utils.multi_objective.box_decompositions.non_dominated import ( NondominatedPartitioning, ) from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from torch import Tensor class DummyMultiObjectiveAnalyticAcquisitionFunction( MultiObjectiveAnalyticAcquisitionFunction ): def forward(self, X): pass class DummyPosteriorTransform(PosteriorTransform): def evaluate(self, Y: Tensor) -> Tensor: pass def forward(self, posterior: GPyTorchPosterior) -> GPyTorchPosterior: pass class TestMultiObjectiveAnalyticAcquisitionFunction(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): MultiObjectiveAnalyticAcquisitionFunction() def test_init(self): mm = MockModel(MockPosterior(mean=torch.rand(2, 1))) # test default init acqf = DummyMultiObjectiveAnalyticAcquisitionFunction(model=mm) self.assertTrue(acqf.posterior_transform is None) # is None by default # test custom init posterior_transform = DummyPosteriorTransform() acqf = DummyMultiObjectiveAnalyticAcquisitionFunction( model=mm, posterior_transform=posterior_transform ) self.assertEqual(acqf.posterior_transform, posterior_transform) # test unsupported objective with self.assertRaises(UnsupportedError): DummyMultiObjectiveAnalyticAcquisitionFunction( model=mm, posterior_transform=IdentityMCMultiOutputObjective() ) acqf = DummyMultiObjectiveAnalyticAcquisitionFunction(model=mm) # test set_X_pending with self.assertRaises(UnsupportedError): acqf.set_X_pending() class TestExpectedHypervolumeImprovement(BotorchTestCase): def test_expected_hypervolume_improvement(self): tkwargs = {"device": self.device} for dtype in (torch.float, torch.double): ref_point = [0.0, 0.0] tkwargs["dtype"] = dtype pareto_Y = torch.tensor( [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]], **tkwargs ) partitioning = NondominatedPartitioning( ref_point=torch.tensor(ref_point, **tkwargs) ) # the event shape is `b x q x m` = 1 x 1 x 1 mean = torch.zeros(1, 1, 2, **tkwargs) variance = torch.zeros(1, 1, 2, **tkwargs) mm = MockModel(MockPosterior(mean=mean, variance=variance)) # test error if there is not pareto_Y initialized in partitioning with self.assertRaises(BotorchError): ExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning ) partitioning.update(Y=pareto_Y) # test error if ref point has wrong shape with self.assertRaises(ValueError): ExpectedHypervolumeImprovement( model=mm, ref_point=ref_point[:1], partitioning=partitioning ) with self.assertRaises(ValueError): # test error if no pareto_Y point is better than ref_point ExpectedHypervolumeImprovement( model=mm, ref_point=[10.0, 10.0], partitioning=partitioning ) X = torch.zeros(1, 1, **tkwargs) # basic test acqf = ExpectedHypervolumeImprovement( model=mm, ref_point=ref_point, partitioning=partitioning ) res = acqf(X) self.assertEqual(res.item(), 0.0) # check ref point self.assertTrue( torch.equal(acqf.ref_point, torch.tensor(ref_point, **tkwargs)) ) # check bounds self.assertTrue(hasattr(acqf, "cell_lower_bounds")) self.assertTrue(hasattr(acqf, "cell_upper_bounds")) # check cached indices expected_indices = torch.tensor( [[0, 0], [0, 1], [1, 0], [1, 1]], dtype=torch.long, device=self.device ) self.assertTrue(torch.equal(acqf._cross_product_indices, expected_indices))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import warnings import torch from botorch.acquisition.multi_objective.multi_output_risk_measures import ( MultiOutputExpectation, ) from botorch.acquisition.multi_objective.objective import ( FeasibilityWeightedMCMultiOutputObjective, IdentityMCMultiOutputObjective, MCMultiOutputObjective, UnstandardizeMCMultiOutputObjective, WeightedMCMultiOutputObjective, ) from botorch.acquisition.objective import ( IdentityMCObjective, UnstandardizePosteriorTransform, ) from botorch.exceptions.errors import BotorchError, BotorchTensorDimensionError from botorch.models.transforms.outcome import Standardize from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior class TestMCMultiOutputObjective(BotorchTestCase): def test_abstract_raises(self): with self.assertRaises(TypeError): MCMultiOutputObjective() class TestIdentityMCMultiOutputObjective(BotorchTestCase): def test_identity_mc_multi_output_objective(self): objective = IdentityMCMultiOutputObjective() with self.assertRaises(BotorchTensorDimensionError): IdentityMCMultiOutputObjective(outcomes=[0]) # test negative outcome without specifying num_outcomes with self.assertRaises(BotorchError): IdentityMCMultiOutputObjective(outcomes=[0, -1]) for batch_shape, m, dtype in itertools.product( ([], [3]), (2, 3), (torch.float, torch.double) ): samples = torch.rand(*batch_shape, 2, m, device=self.device, dtype=dtype) self.assertTrue(torch.equal(objective(samples), samples)) class TestWeightedMCMultiOutputObjective(BotorchTestCase): def test_weighted_mc_multi_output_objective(self): with self.assertRaises(BotorchTensorDimensionError): WeightedMCMultiOutputObjective(weights=torch.rand(3, 1)) with self.assertRaises(BotorchTensorDimensionError): WeightedMCMultiOutputObjective( weights=torch.rand(3), outcomes=[0, 1], num_outcomes=3 ) for batch_shape, m, dtype in itertools.product( ([], [3]), (2, 3), (torch.float, torch.double) ): weights = torch.rand(m, device=self.device, dtype=dtype) objective = WeightedMCMultiOutputObjective(weights=weights) samples = torch.rand(*batch_shape, 2, m, device=self.device, dtype=dtype) self.assertTrue(torch.equal(objective(samples), samples * weights)) class TestFeasibilityWeightedMCMultiOutputObjective(BotorchTestCase): def test_feasibility_weighted_mc_multi_output_objective(self): for dtype in (torch.float, torch.double): tkwargs = {"dtype": dtype, "device": self.device} X = torch.zeros(5, 1, **tkwargs) # The infeasible cost will be 0.0. means = torch.tensor( [ [1.0, 0.5], [2.0, -1.0], [3.0, -0.5], [4.0, 1.0], [5.0, 1.0], ], **tkwargs, ) variances = torch.zeros(5, 2, **tkwargs) mm = MockModel(MockPosterior(mean=means, variance=variances)) feas_obj = FeasibilityWeightedMCMultiOutputObjective( model=mm, X_baseline=X, constraint_idcs=[-1], objective=None, ) feas_samples = feas_obj(means) expected = torch.tensor([[1.0], [0.0], [0.0], [4.0], [5.0]], **tkwargs) self.assertTrue(torch.allclose(feas_samples, expected)) self.assertTrue(feas_obj._verify_output_shape) # With an objective. preprocessing_function = WeightedMCMultiOutputObjective( weights=torch.tensor([2.0]) ) dummy_obj = MultiOutputExpectation( n_w=1, preprocessing_function=preprocessing_function ) dummy_obj._verify_output_shape = False # for testing feas_obj = FeasibilityWeightedMCMultiOutputObjective( model=mm, X_baseline=X, constraint_idcs=[1], objective=dummy_obj, ) feas_samples = feas_obj(means) self.assertTrue(torch.allclose(feas_samples, expected * 2.0)) self.assertFalse(feas_obj._verify_output_shape) # No constraints. feas_obj = FeasibilityWeightedMCMultiOutputObjective( model=mm, X_baseline=X, constraint_idcs=[], objective=None, ) feas_samples = feas_obj(means) self.assertIs(feas_samples, means) # With a single-output objective. feas_obj = FeasibilityWeightedMCMultiOutputObjective( model=mm, X_baseline=X, constraint_idcs=[1], objective=IdentityMCObjective(), ) feas_samples = feas_obj(means) self.assertTrue(torch.allclose(feas_samples, expected.squeeze(-1))) # Error with duplicate idcs. with self.assertRaisesRegex(ValueError, "duplicate"): FeasibilityWeightedMCMultiOutputObjective( model=mm, X_baseline=X, constraint_idcs=[1, -1], ) class TestUnstandardizeMultiOutputObjective(BotorchTestCase): def test_unstandardize_mo_objective(self): warnings.filterwarnings( "ignore", message=( "UnstandardizeAnalyticMultiOutputObjective is deprecated. " "Use UnstandardizePosteriorTransform instead." ), ) Y_mean = torch.ones(2) Y_std = torch.ones(2) with self.assertRaises(BotorchTensorDimensionError): UnstandardizeMCMultiOutputObjective( Y_mean=Y_mean, Y_std=Y_std, outcomes=[0, 1, 2] ) for objective_class in ( UnstandardizeMCMultiOutputObjective, UnstandardizePosteriorTransform, ): with self.assertRaises(BotorchTensorDimensionError): objective_class(Y_mean=Y_mean.unsqueeze(0), Y_std=Y_std) with self.assertRaises(BotorchTensorDimensionError): objective_class(Y_mean=Y_mean, Y_std=Y_std.unsqueeze(0)) objective = objective_class(Y_mean=Y_mean, Y_std=Y_std) for batch_shape, m, outcomes, dtype in itertools.product( ([], [3]), (2, 3), (None, [-2, -1]), (torch.float, torch.double) ): Y_mean = torch.rand(m, dtype=dtype, device=self.device) Y_std = torch.rand(m, dtype=dtype, device=self.device).clamp_min(1e-3) kwargs = {} if objective_class == UnstandardizeMCMultiOutputObjective: kwargs["outcomes"] = outcomes objective = objective_class(Y_mean=Y_mean, Y_std=Y_std, **kwargs) if objective_class == UnstandardizePosteriorTransform: objective = objective_class(Y_mean=Y_mean, Y_std=Y_std) if outcomes is None: # passing outcomes is not currently supported mean = torch.rand(2, m, dtype=dtype, device=self.device) variance = variance = torch.rand( 2, m, dtype=dtype, device=self.device ) mock_posterior = MockPosterior(mean=mean, variance=variance) tf_posterior = objective(mock_posterior) tf = Standardize(m=m) tf.means = Y_mean tf.stdvs = Y_std tf._stdvs_sq = Y_std.pow(2) tf._is_trained = torch.tensor(True) tf.eval() expected_posterior = tf.untransform_posterior(mock_posterior) self.assertTrue( torch.equal(tf_posterior.mean, expected_posterior.mean) ) self.assertTrue( torch.equal( tf_posterior.variance, expected_posterior.variance ) ) # testing evaluate specifically if objective_class == UnstandardizePosteriorTransform: Y = torch.randn_like(Y_mean) + Y_mean val = objective.evaluate(Y) val_expected = Y_mean + Y * Y_std self.assertTrue(torch.allclose(val, val_expected)) else: samples = torch.rand( *batch_shape, 2, m, dtype=dtype, device=self.device ) obj_expected = samples * Y_std.to(dtype=dtype) + Y_mean.to( dtype=dtype ) if outcomes is not None: obj_expected = obj_expected[..., outcomes] self.assertTrue(torch.equal(objective(samples), obj_expected))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import product import torch from botorch.acquisition.multi_objective.joint_entropy_search import ( LowerBoundMultiObjectiveEntropySearch, qLowerBoundMultiObjectiveJointEntropySearch, ) from botorch.acquisition.multi_objective.utils import compute_sample_box_decomposition from botorch.exceptions import UnsupportedError from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.transforms.outcome import Standardize from botorch.sampling.normal import SobolQMCNormalSampler from botorch.utils.testing import BotorchTestCase def get_model(train_X, train_Y, use_model_list, standardize_model): num_objectives = train_Y.shape[-1] if standardize_model: if use_model_list: outcome_transform = Standardize(m=1) else: outcome_transform = Standardize(m=num_objectives) else: outcome_transform = None if use_model_list: model = ModelListGP( *[ SingleTaskGP( train_X=train_X, train_Y=train_Y[:, i : i + 1], outcome_transform=outcome_transform, ) for i in range(num_objectives) ] ) else: model = SingleTaskGP( train_X=train_X, train_Y=train_Y, outcome_transform=outcome_transform, ) return model def dummy_sample_pareto_sets(model, num_pareto_samples, num_pareto_points): m = model.models[0] if isinstance(model, ModelListGP) else model input_dim = m.train_inputs[0].shape[-1] tkwargs = {"dtype": m.train_inputs[0].dtype, "device": m.train_inputs[0].device} return torch.rand( num_pareto_samples, num_pareto_points, input_dim, **tkwargs, ) def dummy_sample_pareto_fronts(model, num_pareto_samples, num_pareto_points): m = model.models[0] if isinstance(model, ModelListGP) else model num_objectives = model.num_outputs tkwargs = {"dtype": m.train_inputs[0].dtype, "device": m.train_inputs[0].device} return torch.rand( num_pareto_samples, num_pareto_points, num_objectives, **tkwargs, ) class DummyLowerBoundMultiObjectiveEntropySearch(LowerBoundMultiObjectiveEntropySearch): def _compute_posterior_statistics(self, X): pass def _compute_monte_carlo_variables(self, posterior): pass def forward(self, X): pass class TestLowerBoundMultiObjectiveEntropySearch(BotorchTestCase): def test_abstract_raises(self): torch.manual_seed(1) tkwargs = {"device": self.device} estimation_types = ("0", "LB", "LB2", "MC", "Dummy") for ( dtype, num_objectives, estimation_type, use_model_list, standardize_model, ) in product( (torch.float, torch.double), (1, 2, 3), estimation_types, (False, True), (False, True), ): tkwargs["dtype"] = dtype # test batched model train_X = torch.rand(4, 3, 2, **tkwargs) train_Y = torch.rand(4, 3, num_objectives, **tkwargs) model = SingleTaskGP(train_X, train_Y) num_pareto_samples = 3 num_pareto_points = 1 if num_objectives == 1 else 4 pareto_sets = dummy_sample_pareto_sets( model, num_pareto_samples, num_pareto_points ) pareto_fronts = dummy_sample_pareto_fronts( model, num_pareto_samples, num_pareto_points ) hypercell_bounds = torch.rand( num_pareto_samples, 2, 4, num_objectives, **tkwargs ) with self.assertRaises(NotImplementedError): DummyLowerBoundMultiObjectiveEntropySearch( model=model, pareto_sets=pareto_sets, pareto_fronts=pareto_fronts, hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, ) # test wrong Pareto shape and hypercell bounds train_X = torch.rand(1, 2, **tkwargs) train_Y = torch.rand(1, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) num_pareto_samples = 3 num_pareto_points = 4 pareto_sets = dummy_sample_pareto_sets( model, num_pareto_samples, num_pareto_points ) pareto_fronts = dummy_sample_pareto_fronts( model, num_pareto_samples, num_pareto_points ) hypercell_bounds = torch.rand( num_pareto_samples, 2, 4, num_objectives, **tkwargs ) with self.assertRaises(UnsupportedError): DummyLowerBoundMultiObjectiveEntropySearch( model=model, pareto_sets=pareto_sets.unsqueeze(0), pareto_fronts=pareto_fronts, hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, ) with self.assertRaises(UnsupportedError): DummyLowerBoundMultiObjectiveEntropySearch( model=model, pareto_sets=pareto_sets, pareto_fronts=pareto_fronts.unsqueeze(0), hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, ) with self.assertRaises(UnsupportedError): DummyLowerBoundMultiObjectiveEntropySearch( model=model, pareto_sets=pareto_sets, pareto_fronts=pareto_fronts, hypercell_bounds=hypercell_bounds.unsqueeze(0), estimation_type=estimation_type, num_samples=64, ) if estimation_type == "Dummy": with self.assertRaises(NotImplementedError): DummyLowerBoundMultiObjectiveEntropySearch( model=model, pareto_sets=pareto_sets, pareto_fronts=pareto_fronts, hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, ) else: DummyLowerBoundMultiObjectiveEntropySearch( model=model, pareto_sets=pareto_sets, pareto_fronts=pareto_fronts, hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, ) class TestQLowerBoundMultiObjectiveJointEntropySearch(BotorchTestCase): def _base_test_lb_moo_joint_entropy_search(self, estimation_type): torch.manual_seed(1) tkwargs = {"device": self.device} for (dtype, num_objectives, use_model_list, standardize_model,) in product( (torch.float, torch.double), (1, 2, 3), (False, True), (False, True), ): tkwargs["dtype"] = dtype input_dim = 2 train_X = torch.rand(4, input_dim, **tkwargs) train_Y = torch.rand(4, num_objectives, **tkwargs) model = get_model(train_X, train_Y, use_model_list, standardize_model) num_pareto_samples = 3 num_pareto_points = 4 pareto_sets = dummy_sample_pareto_sets( model, num_pareto_samples, num_pareto_points ) pareto_fronts = dummy_sample_pareto_fronts( model, num_pareto_samples, num_pareto_points ) hypercell_bounds = compute_sample_box_decomposition(pareto_fronts) # test acquisition X_pending_list = [None, torch.rand(2, input_dim, **tkwargs)] for X_pending in X_pending_list: acq = qLowerBoundMultiObjectiveJointEntropySearch( model=model, pareto_sets=pareto_sets, pareto_fronts=pareto_fronts, hypercell_bounds=hypercell_bounds, estimation_type=estimation_type, num_samples=64, X_pending=X_pending, ) self.assertIsInstance(acq.sampler, SobolQMCNormalSampler) test_Xs = [ torch.rand(4, 1, input_dim, **tkwargs), torch.rand(4, 3, input_dim, **tkwargs), torch.rand(4, 5, 1, input_dim, **tkwargs), torch.rand(4, 5, 3, input_dim, **tkwargs), ] for test_X in test_Xs: acq_X = acq(test_X) # assess shape self.assertTrue(acq_X.shape == test_X.shape[:-2]) def test_lb_moo_joint_entropy_search_0(self): self._base_test_lb_moo_joint_entropy_search(estimation_type="0") def test_lb_moo_joint_entropy_search_LB(self): self._base_test_lb_moo_joint_entropy_search(estimation_type="LB") def test_lb_moo_joint_entropy_search_LB2(self): self._base_test_lb_moo_joint_entropy_search(estimation_type="LB2") def test_lb_moo_joint_entropy_search_MC(self): self._base_test_lb_moo_joint_entropy_search(estimation_type="MC")
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.sampling.deterministic import DeterministicSampler from botorch.utils.testing import BotorchTestCase, MockPosterior class TestDeterministicSampler(BotorchTestCase): def test_deterministic_sampler(self): # Basic usage. samples = torch.rand(1, 2) posterior = MockPosterior(samples=samples) sampler = DeterministicSampler(sample_shape=torch.Size([2])) self.assertTrue(torch.equal(samples.repeat(2, 1, 1), sampler(posterior))) # Test _update_base_samples. sampler._update_base_samples( posterior=posterior, base_sampler=sampler, )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from unittest import mock import torch from botorch.exceptions.errors import UnsupportedError from botorch.posteriors.posterior_list import PosteriorList from botorch.sampling.list_sampler import ListSampler from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.sampling.stochastic_samplers import StochasticSampler from botorch.utils.testing import BotorchTestCase, MockPosterior class TestListSampler(BotorchTestCase): def test_list_sampler(self): # Test initialization. sampler = ListSampler( IIDNormalSampler(sample_shape=torch.Size([2])), StochasticSampler(sample_shape=torch.Size([2])), ) self.assertIsInstance(sampler.samplers[0], IIDNormalSampler) self.assertIsInstance(sampler.samplers[1], StochasticSampler) self.assertEqual(sampler.sample_shape, torch.Size([2])) # Test validation. with self.assertRaisesRegex(UnsupportedError, "all samplers to have the "): ListSampler( StochasticSampler(sample_shape=torch.Size([2])), StochasticSampler(sample_shape=torch.Size([3])), ) # Test basic usage. org_samples = torch.rand(1, 5) p1 = MockPosterior(samples=org_samples[:, :2]) p2 = MockPosterior(samples=org_samples[:, 2:]) p_list = PosteriorList(p1, p2) samples = sampler(p_list) self.assertAllClose(samples, org_samples.repeat(2, 1, 1)) # Test _update_base_samples. sampler = ListSampler( IIDNormalSampler(sample_shape=torch.Size([2])), SobolQMCNormalSampler(sample_shape=torch.Size([2])), ) sampler2 = ListSampler( IIDNormalSampler(sample_shape=torch.Size([2])), SobolQMCNormalSampler(sample_shape=torch.Size([2])), ) with mock.patch.object( sampler.samplers[0], "_update_base_samples" ) as update_0, mock.patch.object( sampler.samplers[1], "_update_base_samples" ) as update_1: sampler._update_base_samples(posterior=p_list, base_sampler=sampler2) update_0.assert_called_once_with( posterior=p1, base_sampler=sampler2.samplers[0] ) update_1.assert_called_once_with( posterior=p2, base_sampler=sampler2.samplers[1] )
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from botorch.posteriors.deterministic import DeterministicPosterior from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.posteriors.posterior_list import PosteriorList from botorch.posteriors.torch import TorchPosterior from botorch.posteriors.transformed import TransformedPosterior from botorch.sampling.get_sampler import get_sampler from botorch.sampling.list_sampler import ListSampler from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.sampling.stochastic_samplers import StochasticSampler from botorch.utils.testing import BotorchTestCase from gpytorch.distributions import MultivariateNormal from torch.distributions.gamma import Gamma class TestGetSampler(BotorchTestCase): def test_get_sampler(self): # Basic usage w/ gpytorch posterior. posterior = GPyTorchPosterior( distribution=MultivariateNormal(torch.rand(2), torch.eye(2)) ) sampler = get_sampler( posterior=posterior, sample_shape=torch.Size([10]), seed=2 ) self.assertIsInstance(sampler, SobolQMCNormalSampler) self.assertEqual(sampler.seed, 2) self.assertEqual(sampler.sample_shape, torch.Size([10])) # Fallback to IID sampler. posterior = GPyTorchPosterior( distribution=MultivariateNormal(torch.rand(22000), torch.eye(22000)) ) sampler = get_sampler(posterior=posterior, sample_shape=torch.Size([10])) self.assertIsInstance(sampler, IIDNormalSampler) self.assertEqual(sampler.sample_shape, torch.Size([10])) # Transformed posterior. tf_post = TransformedPosterior( posterior=posterior, sample_transform=lambda X: X ) sampler = get_sampler(posterior=tf_post, sample_shape=torch.Size([10])) self.assertIsInstance(sampler, IIDNormalSampler) self.assertEqual(sampler.sample_shape, torch.Size([10])) # PosteriorList with transformed & deterministic. post_list = PosteriorList( tf_post, DeterministicPosterior(values=torch.rand(1, 2)) ) sampler = get_sampler(posterior=post_list, sample_shape=torch.Size([5])) self.assertIsInstance(sampler, ListSampler) self.assertIsInstance(sampler.samplers[0], IIDNormalSampler) self.assertIsInstance(sampler.samplers[1], StochasticSampler) for s in sampler.samplers: self.assertEqual(s.sample_shape, torch.Size([5])) # Unknown torch posterior. posterior = TorchPosterior(distribution=Gamma(torch.rand(2), torch.rand(2))) with self.assertRaisesRegex(NotImplementedError, "A registered `MCSampler`"): get_sampler(posterior=posterior, sample_shape=torch.Size([5]))
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from unittest import mock import torch from botorch.posteriors.torch import TorchPosterior from botorch.sampling.stochastic_samplers import ForkedRNGSampler, StochasticSampler from botorch.utils.testing import BotorchTestCase, MockPosterior from torch.distributions.exponential import Exponential class TestForkedRNGSampler(BotorchTestCase): def test_forked_rng_sampler(self): posterior = TorchPosterior(Exponential(rate=torch.rand(1, 2))) sampler = ForkedRNGSampler(sample_shape=torch.Size([2]), seed=0) with mock.patch.object( posterior.distribution, "rsample", wraps=posterior.distribution.rsample ) as mock_rsample: samples = sampler(posterior) mock_rsample.assert_called_once_with(sample_shape=torch.Size([2])) with torch.random.fork_rng(): torch.manual_seed(0) expected = posterior.rsample(sample_shape=torch.Size([2])) self.assertAllClose(samples, expected) class TestStochasticSampler(BotorchTestCase): def test_stochastic_sampler(self): # Basic usage. samples = torch.rand(1, 2) posterior = MockPosterior(samples=samples) sampler = StochasticSampler(sample_shape=torch.Size([2])) self.assertTrue(torch.equal(samples.repeat(2, 1, 1), sampler(posterior))) # Test _update_base_samples. with self.assertRaisesRegex(NotImplementedError, "_update_base_samples"): sampler._update_base_samples(posterior=posterior, base_sampler=sampler)