python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import itertools
import logging
import os
import os.path as osp
from os.path import join as osj
from time import time
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
from inferece_utils import calc_ndcg_per_category, calc_performance, get_dataloaders
from lit.eval_utils import generate_test_set_hot_labels, get_unique_asins
from lit.lit_pcomp import LitPcomp
logger = logging.getLogger(__name__)
def model_inference(model, category, price_bin, img_emb):
category_emb = model.category_embs(category)
price_emb = model.price_embs(price_bin)
emb = model.img_encoder(img_emb, category_emb, price_emb)
return emb
def evalaute_pcomp(candidate_loader, inference_loader, cfg):
# Load weights
checkpoint = osj(cfg.model_weight_dir, "checkpoint.ckpt")
lit_model = LitPcomp.load_from_checkpoint(checkpoint)
lit_model.eval()
# Iterate on candidates
candidates, asins, category_candidates = [], [], []
with torch.no_grad():
for batch in tqdm(candidate_loader):
(
img_emb_candidate,
price_bin_candidate,
category_candidate,
asin_candidate,
) = batch
candidate = model_inference(
lit_model, category_candidate, price_bin_candidate, img_emb_candidate
)
candidates.append(candidate.detach().cpu())
asins.append(asin_candidate)
category_candidates.append(category_candidate.detach().cpu())
candidates = torch.vstack(candidates)
asins = np.array(list(itertools.chain(*asins)))
category_candidates = torch.hstack(category_candidates)
# Get valid cateogires
hot_labels, dists, valid_categories_list = [], [], []
with torch.no_grad():
for batch in tqdm(inference_loader):
(
img_emb_test,
price_bin_test,
category_test,
_,
valid_categories,
asin_targets,
) = batch
src = model_inference(
lit_model, category_test, price_bin_test, img_emb_test
)
# Transform test to valid categories (categories that apeared in the training set)
valid_categories_hstack = torch.hstack(valid_categories)
src_repeat = src.repeat(len(valid_categories), 1)
src_fbt = lit_model.fbt_categories(valid_categories_hstack) * src_repeat
dists_i = torch.cdist(src_fbt, candidates, p=2)
dists_i = dists_i.min(axis=0).values
# Create ground true label
hot_labels_i = np.in1d(
asins, np.array([asin_target[0] for asin_target in asin_targets])
)
# Save
valid_categories_list.append(valid_categories_hstack)
dists.append(dists_i)
hot_labels.append(hot_labels_i)
assert hot_labels_i.sum() > 0
# Calcualte probability
dists = torch.vstack(dists)
probs = torch.softmax(-dists, axis=-1)
hot_labels = np.vstack(hot_labels)
# Retrival metrics
calc_performance(hot_labels, probs, cfg)
def evaluate_pcomp_category_aware(dataset, cfg, out_dir):
path = cfg.model_weight_dir
model_base_dir = osp.basename(path)
asin_src = torch.load(osj(path, "asin_src.pth"))
asin_pos = torch.load(osj(path, "asin_pos.pth"))
category_src = torch.from_numpy(torch.load(osj(path, "category_src.pth")))
category_pos = torch.from_numpy(torch.load(osj(path, "category_pos.pth")))
src_fbt = torch.load(osj(path, "src_fbt.pth"))
src = torch.load(osj(path, "src.pth"))
pos = torch.load(osj(path, "pos.pth"))
set_name = torch.load(osj(path, "set_name.pth"))
# Test sources: have a unique pair of (source,target-category)
src_fbt_test = src_fbt[set_name == "test"]
asin_src_test = asin_src[set_name == "test"]
category_pos_test = category_pos[set_name == "test"]
locs = list(zip(asin_src_test, category_pos_test))
_, unique_idxs = np.unique(np.array(locs), axis=0, return_index=True)
src_fbt_test, asin_src_test, category_pos_test = (
src_fbt_test[unique_idxs],
asin_src_test[unique_idxs],
category_pos_test[unique_idxs],
)
# Candidate to compare with
asins = np.hstack([asin_src, asin_pos])
embs = torch.vstack([src, pos])
categories = torch.hstack([category_src, category_pos])
asins, embs, categories = get_unique_asins(asins, embs, categories)
# Build hot label
hot_labels, asin_src_test = generate_test_set_hot_labels(
asin_src_test=asin_src_test,
category_pos_test=category_pos_test.numpy(),
fbt_by_asin_src=dataset.fbt_by_asin_src,
asins=asins,
)
# Find distance of the candidates
dists = torch.cdist(src_fbt_test, embs, p=2)
probs = torch.softmax(-dists, axis=-1)
# Constrain to target
for n, cat in enumerate(category_pos_test):
probs[n, categories != cat] = 0
# Calculate retrival metrics
calc_performance(hot_labels, probs, cfg)
calc_ndcg_per_category(
hot_labels, probs, asin_src_test, category_pos_test, model_base_dir, out_dir
)
@hydra.main(version_base="1.2", config_path="../configs/", config_name="inference")
def execute_pcomp_inference(cfg: DictConfig):
t0 = time()
out_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
name = osp.basename(out_dir)
pl.seed_everything(cfg.seed)
wandb.init(
project=cfg.wandb.project,
dir=out_dir,
config=OmegaConf.to_container(cfg),
job_type="analysis",
name="analysis_" + name,
)
logger.info(f"out_dir={out_dir}")
logger.info(cfg)
logger.info(f"{torch.backends.mps.is_available()=}")
for category_name, model_pcomp_weight_dir in zip(
cfg.pcomp_category_names, cfg.model_pcomp_weight_dirs
):
t1 = time()
cfg.category_name = category_name
cfg.model_weight_dir = model_pcomp_weight_dir
candidate_loader, inference_loader, dataset = get_dataloaders(cfg)
evalaute_pcomp(candidate_loader, inference_loader, cfg)
evaluate_pcomp_category_aware(dataset, cfg, out_dir)
logger.info(f"Finish {category_name} in {time() - t1:.1f} s")
logger.info(f"Finish execute_pcomp_inference in {time() - t0:.1f} s")
if __name__ == "__main__":
execute_pcomp_inference()
| cycle_gan_for_complementary_item_recommendations-main | src/main_inference_pcomp.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import time
from os.path import join as osj
from time import time
from itertools import chain
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from sklearn.metrics import ndcg_score
from torch import nn, optim
from .eval_utils import (
calc_topk,
generate_test_set_hot_labels,
get_unique_asins,
save_run_results,
)
logger = logging.getLogger(__name__)
class LearnedEmbs(nn.Module):
def __init__(self, num_classes, emb_size: int = 16):
super().__init__()
self.num_classes, self.emb_size = num_classes, emb_size
self.embs = nn.Embedding(self.num_classes, self.emb_size, max_norm=1.0)
def forward(self, idx):
return self.embs(idx)
class ImageEncoder(nn.Module):
def __init__(
self,
input_emb_dim: int,
input_category_dim: int,
input_price_dim: int,
output_dim: int,
dropout_rate: float = 0.2,
):
super().__init__()
self.input_emb_dim = input_emb_dim
self.input_category_dim = input_category_dim
self.input_price_dim = input_price_dim
self.input_dim = input_emb_dim + input_category_dim + input_price_dim
self.layers = nn.Sequential(
nn.Linear(in_features=self.input_dim, out_features=256),
nn.BatchNorm1d(256),
nn.Dropout(p=dropout_rate),
nn.LeakyReLU(),
nn.Linear(in_features=256, out_features=64),
nn.BatchNorm1d(64),
nn.Dropout(p=dropout_rate),
nn.LeakyReLU(),
nn.Linear(in_features=64, out_features=output_dim),
)
def forward(self, img_embs, category_embs, price_embs):
return self.layers(torch.hstack([img_embs, category_embs, price_embs]))
class CategoryClassifier(nn.Module):
def __init__(
self,
emb_dim: int,
num_categories: int,
dropout_rate: float = 0.2,
):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(emb_dim, 8),
nn.Dropout(p=dropout_rate),
nn.LeakyReLU(),
nn.Linear(in_features=8, out_features=num_categories),
)
def forward(self, x):
return self.layers(x)
class FbtAutoEncoder(nn.Module):
def __init__(
self,
input_emb_dim: int,
input_category_dim: int,
dropout_rate: float = 0.1,
):
# Transfomratoion: source embs + dst category -> dst embs
super().__init__()
self.input_emb_dim = input_emb_dim
self.input_category_dim = input_category_dim
self.input_dim = input_emb_dim + input_category_dim
self.layers = nn.Sequential(
nn.Linear(in_features=self.input_dim, out_features=8),
nn.BatchNorm1d(8),
nn.Dropout(p=dropout_rate),
nn.LeakyReLU(),
nn.Linear(in_features=8, out_features=self.input_emb_dim),
)
def forward(self, src_embs, dst_category):
return self.layers(torch.hstack([src_embs, dst_category]))
class LitFbt(pl.LightningModule):
def __init__(
self,
input_emb_dim: int,
emb_dim: int,
num_categories: int,
category_emb_size: int,
num_price_bins: int,
price_emb_size: int,
cfg,
out_dir: str = ".",
):
self.cfg = cfg
self.out_dir = out_dir
self.save_hyperparameters()
super().__init__()
# Architecture
self.num_categories = num_categories
self.category_embs = LearnedEmbs(num_categories, category_emb_size)
self.price_embs = LearnedEmbs(num_price_bins, price_emb_size)
self.img_encoder = ImageEncoder(
input_emb_dim, category_emb_size, price_emb_size, emb_dim, cfg.dropout_rate
)
self.fbt_ae = FbtAutoEncoder(emb_dim, cfg.category_emb_size, cfg.dropout_rate)
self.fbt_ae_return = FbtAutoEncoder(
emb_dim, cfg.category_emb_size, cfg.dropout_rate
)
self.clf = CategoryClassifier(emb_dim, num_categories, cfg.dropout_rate)
logger.info(self.category_embs)
logger.info(self.price_embs)
logger.info(self.img_encoder)
logger.info(self.fbt_ae)
logger.info(self.clf)
# Losses
self.criterion_triplet = nn.TripletMarginLoss(
margin=cfg.triplet_loss_margin, p=2
)
self.criterion_category = nn.CrossEntropyLoss()
self.criterion_cycle = nn.MSELoss()
# Performance
self.ndcg_val_best = 0.0
wandb.define_metric("retrieval_metrics/ndcg", summary="max")
def log(
self,
*args,
**kwargs,
) -> None:
kwargs["on_epoch"] = True
kwargs["on_step"] = False
return super().log(*args, **kwargs)
def calc_triplet_acc(self, src_fbt, pos, neg):
pos_dist = torch.norm(src_fbt - pos, dim=-1)
neg_dist = torch.norm(src_fbt - neg, dim=-1)
return (pos_dist < neg_dist).float().mean().item()
def _clf_helper(self, emb, category):
category_hat = self.clf(emb)
loss = self.criterion_category(category_hat, category)
category_pred = torch.argmax(category_hat, dim=-1)
acc = (category_pred == category).float().mean()
return loss, acc
def _cycle_helper(self, src, category_src_emb, category_dst):
category_dst_emb = self.category_embs(category_dst)
src_fbt = self.fbt_ae(src, category_dst_emb)
cycle = self.fbt_ae_return(src_fbt, category_src_emb)
loss_cycle = self.criterion_cycle(cycle, src.detach())
return loss_cycle
def _loss_helper(
self, batch, phase="train", batch_idx: int = 0, optimizer_idx: int = 0
):
(
img_emb_src,
img_emb_pos,
img_emb_neg,
price_bin_src,
price_bin_pos,
price_bin_neg,
category_src,
category_pos,
random_valid_category,
asin_src,
asin_pos,
set_name,
) = batch
category_src_emb = self.category_embs(category_src)
category_dst_emb = self.category_embs(category_pos)
price_src_emb = self.price_embs(price_bin_src)
price_pos_emb = self.price_embs(price_bin_pos)
price_neg_emb = self.price_embs(price_bin_neg)
src = self.img_encoder(img_emb_src, category_src_emb, price_src_emb)
pos = self.img_encoder(img_emb_pos, category_dst_emb, price_pos_emb)
neg = self.img_encoder(img_emb_neg, category_dst_emb, price_neg_emb)
if self.cfg.is_autoecoder_detach is True:
src_fbt = self.fbt_ae(src.detach(), category_dst_emb)
else:
src_fbt = self.fbt_ae(src, category_dst_emb)
# Train generator
if optimizer_idx == 0:
# Classifier
loss_clf_src, acc_clf_src = self._clf_helper(src, category_src)
loss_clf_pos, acc_clf_pos = self._clf_helper(pos, category_pos)
loss_clf_src_fbt, acc_clf_src_fbt = self._clf_helper(src_fbt, category_pos)
loss_clf = (1 / 3) * (loss_clf_src + loss_clf_src_fbt + loss_clf_pos)
# Triplet
loss_triplet = self.criterion_triplet(src_fbt, pos, neg)
acc_triplet = self.calc_triplet_acc(src_fbt, pos, neg)
# Cycle
loss_cycle = self._cycle_helper(
src, category_src_emb, random_valid_category
)
loss_cycle_labeled_pairs = self._cycle_helper(
src, category_src_emb, category_pos
)
# Loss
loss = (1 / 3) * (
self.cfg.triplet_weight * loss_triplet
+ self.cfg.cycle_weight * loss_cycle
+ self.cfg.cycle_weight_labeled_pairs * loss_cycle_labeled_pairs
+ self.cfg.clf_weight * loss_clf
)
acc_genuine = (1 / 2) * (acc_clf_src + acc_clf_pos)
# Logger
self.log(f"loss/{phase}", loss)
self.log(f"clf/{phase}/acc/acc_genuine", acc_genuine)
self.log(f"clf/{phase}/acc/acc_clf_src_fbt", acc_clf_src_fbt)
self.log(f"clf/{phase}/loss/loss_clf", loss_clf)
self.log(f"triplet/{phase}/acc_triplet", acc_triplet)
self.log(f"triplet/{phase}/loss_triplet", loss_triplet)
self.log(f"cycle/{phase}/loss_cycle", loss_cycle)
# Logger
src_fbt_std_mean = src_fbt.mean(axis=-1).mean()
src_fbt_std = src_fbt.std(axis=-1).mean()
src_fbt_avg_norm = torch.norm(src_fbt, dim=-1).mean()
category_emb_mean = category_src_emb.mean(axis=-1).mean()
category_emb_avg_norm = torch.norm(category_src_emb, dim=-1).mean()
category_emb_max_val = torch.max(category_src_emb)
epoch = float(self.trainer.current_epoch)
self.log(f"epoch/{phase}", epoch)
self.log(f"src_fbt/avg", src_fbt_std_mean)
self.log(f"src_fbt/std", src_fbt_std)
self.log(f"src_fbt/avg_norm", src_fbt_avg_norm)
self.log(f"category_emb/avg", category_emb_mean)
self.log(f"category_emb/avg_norm", category_emb_avg_norm)
self.log(f"category_emb/max_val", category_emb_max_val)
return {
"loss": loss,
"asin_src": asin_src,
"asin_pos": asin_pos,
"src": src.detach().cpu(),
"src_fbt": src_fbt.detach().cpu(),
"pos": pos.detach().cpu(),
"category_src": category_src.detach().cpu(),
"category_pos": category_pos.detach().cpu(),
"set_name": set_name,
}
# Train discriminator
if optimizer_idx == 1 and self.cfg.discriminator_weight > 0.0:
loss_clf_src, acc_clf_src = self._clf_helper(src.detach(), category_src)
loss_clf_src_fbt, acc_clf_src_fbt = self._clf_helper(
src_fbt.detach(), category_pos
)
loss_clf_src_fbt = -loss_clf_src_fbt
loss = (
self.cfg.discriminator_weight * 0.5 * (loss_clf_src + loss_clf_src_fbt)
)
self.log(f"loss/{phase}/optimizer_idx_1/loss_clf_src", loss_clf_src)
self.log(f"loss/{phase}/optimizer_idx_1/loss_clf_src_fbt", loss_clf_src_fbt)
return loss
def configure_optimizers_w_discriminator(self):
optimizer = optim.Adam(
chain(
self.category_embs.parameters(),
self.price_embs.parameters(),
self.img_encoder.parameters(),
self.fbt_ae.parameters(),
self.fbt_ae_return.parameters(),
),
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
)
optimizer_discriminator = optim.Adam(
self.clf.parameters(),
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.cfg.milestones
)
lr_scheduler_discriminator = torch.optim.lr_scheduler.MultiStepLR(
optimizer_discriminator, milestones=self.cfg.milestones
)
return [optimizer, optimizer_discriminator], [
lr_scheduler,
lr_scheduler_discriminator,
]
def configure_optimizers_vanilla(self):
optimizer = optim.Adam(
self.parameters(),
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.cfg.milestones
)
return [optimizer], [lr_scheduler]
def configure_optimizers(self):
return (
self.configure_optimizers_vanilla()
if self.cfg.discriminator_weight > 0.0
else self.configure_optimizers_w_discriminator()
)
def training_step(self, batch, batch_idx, optimizer_idx: int = 0):
return self._loss_helper(
batch, phase="train", batch_idx=batch_idx, optimizer_idx=optimizer_idx
)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self._loss_helper(
batch, phase="test", batch_idx=batch_idx, optimizer_idx=0
)
def validation_epoch_end(self, outputs, phase: str = "test"):
self._calc_retrival_metrics(outputs[-1], phase)
def _calc_retrival_metrics(self, outputs, phase):
t1 = time()
# Get values from step
epoch = int(self.trainer.current_epoch)
src = torch.vstack([out["src"] for out in outputs])
src_fbt = torch.vstack([out["src_fbt"] for out in outputs])
pos = torch.vstack([out["pos"] for out in outputs])
asin_src = np.hstack([out["asin_src"] for out in outputs])
asin_pos = np.hstack([out["asin_pos"] for out in outputs])
category_src = torch.hstack([out["category_src"] for out in outputs]).numpy()
category_pos = torch.hstack([out["category_pos"] for out in outputs]).numpy()
set_name = np.hstack([out["set_name"] for out in outputs])
# Test sources: have a unique pair of (source,target-category)
src_fbt_test = src_fbt[set_name == "test"]
asin_src_test = asin_src[set_name == "test"]
category_pos_test = category_pos[set_name == "test"]
locs = list(zip(asin_src_test, category_pos_test))
_, unique_idxs = np.unique(np.array(locs), axis=0, return_index=True)
src_fbt_test, asin_src_test, category_pos_test = (
src_fbt_test[unique_idxs],
asin_src_test[unique_idxs],
category_pos_test[unique_idxs],
)
# Candidate to compare with
asins = np.hstack([asin_src, asin_pos])
embs = torch.vstack([src, pos])
categories = np.hstack([category_src, category_pos])
asins, embs, categories = get_unique_asins(asins, embs, categories)
# Build hot label
t1 = time()
fbt_by_asin_src = self.trainer.val_dataloaders[
-1
].dataset.fbt_by_asin_src.copy()
hot_labels, _ = generate_test_set_hot_labels(
asin_src_test=asin_src_test,
category_pos_test=category_pos_test,
fbt_by_asin_src=fbt_by_asin_src,
asins=asins,
)
logger.info(f"hot_labels in {time()-t1:.1f} s. {hot_labels.shape=}")
# Find distance of the candidates
t2 = time()
dists = torch.cdist(src_fbt_test, embs, p=2)
probs = torch.softmax(-dists, axis=-1)
# Constrain to target cateogry
for n, cat in enumerate(category_pos_test):
probs[n, categories != cat] = 0
# Calculate retrival metrics
ndcg_val = ndcg_score(hot_labels, probs)
self.logger.log_metrics({"retrieval_metrics/ndcg": ndcg_val}, step=epoch)
logger.info(f" {epoch=} {ndcg_val=:.6f} {probs.shape=}. {time()-t2:.1f}s")
# TopK
t2 = time()
topk_d = calc_topk(probs, hot_labels, self.cfg.top_k)
self.logger.log_metrics(topk_d, step=epoch)
logger.info(f"{epoch=} _epoch_end_helper. {topk_d}. in {time()-t2:.1f} s")
# Save tensors
if self.ndcg_val_best < ndcg_val:
self.ndcg_val_best = ndcg_val
save_run_results(
{
"src": src,
"src_fbt": src_fbt,
"pos": pos,
"asin_src": asin_src,
"asin_pos": asin_pos,
"category_src": category_src,
"category_pos": category_pos,
"set_name": set_name,
},
self.out_dir,
)
self.trainer.save_checkpoint(osj(self.out_dir, "checkpoint.ckpt"))
logger.info(f" {epoch=} {self.ndcg_val_best=:.5f}")
logger.info(f"_epoch_end_helper. {time()-t1:.1f} s")
| cycle_gan_for_complementary_item_recommendations-main | src/lit/lit_utils.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import pytorch_lightning as pl
import torch
from torch import nn, optim
logger = logging.getLogger(__name__)
class LitImgClassifier(pl.LightningModule):
def __init__(self, input_dim, num_classes, cfg):
super().__init__()
self.cfg = cfg
self.classifier = nn.Sequential(
nn.Linear(input_dim, 128),
nn.Dropout(0.2),
nn.LeakyReLU(0.1),
nn.Linear(128, 16),
nn.Dropout(0.2),
nn.LeakyReLU(0.1),
nn.Linear(16, num_classes),
)
self.criterion_category = nn.CrossEntropyLoss()
def training_step(self, batch, batch_idx, optimizer_idx):
return self._loss_helper(batch, "train", optimizer_idx)
def validation_step(self, batch, batch_idx: int):
return self._loss_helper(batch, "val")
def test_step(self, batch, batch_idx):
return self._loss_helper(batch, "test")
def _loss_helper(self, batch, phase: str = "train"):
emb, category = batch
category_hat = self.classifier(emb)
# Performance
category_int = torch.argmax(category, dim=-1)
category_int_pred = torch.argmax(category_hat, dim=-1)
acc = (category_int_pred == category_int).float().mean()
loss = self.criterion_category(category_hat, category.float())
# Log
self.log(f"loss/{phase}", loss)
self.log(f"acc/{phase}", acc)
self.log(f"epoch/{phase}", float(self.trainer.current_epoch))
return {"loss": loss}
def configure_optimizers(self):
optimizer = optim.Adam(
self.parameters(), lr=self.cfg.lr, weight_decay=self.cfg.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.cfg.milestones
)
return [optimizer], [lr_scheduler]
| cycle_gan_for_complementary_item_recommendations-main | src/lit/lit_img_classifier.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import time
from os.path import join as osj
from time import time
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from sklearn.metrics import ndcg_score
from torch import nn, optim
from torch.nn.functional import mse_loss
from .eval_utils import (
calc_topk,
generate_test_set_hot_labels,
get_unique_asins,
save_run_results,
)
from .lit_utils import LearnedEmbs, ImageEncoder
logger = logging.getLogger(__name__)
class LitPcomp(pl.LightningModule):
def __init__(
self,
input_emb_dim: int,
emb_dim: int,
num_categories: int,
category_emb_size: int,
num_price_bins: int,
price_emb_size: int,
cfg,
out_dir: str = ".",
):
self.cfg = cfg
self.out_dir = out_dir
self.save_hyperparameters()
super().__init__()
# Architecture
self.num_categories = num_categories
self.category_embs = LearnedEmbs(num_categories, category_emb_size)
self.price_embs = LearnedEmbs(num_price_bins, price_emb_size)
self.img_encoder = ImageEncoder(
input_emb_dim, category_emb_size, price_emb_size, emb_dim, cfg.dropout_rate
)
self.fbt_categories = LearnedEmbs(num_categories, emb_dim)
logger.info(self.category_embs)
logger.info(self.price_embs)
logger.info(self.img_encoder)
# Performance
self.ndcg_val_best = 0.0
wandb.define_metric("retrieval_metrics/ndcg", summary="max")
def log(
self,
*args,
**kwargs,
) -> None:
kwargs["on_epoch"] = True
kwargs["on_step"] = False
return super().log(*args, **kwargs)
def _loss_helper(
self, batch, phase="train", batch_idx: int = 0, optimizer_idx: int = 0
):
(
img_emb_src,
img_emb_pos,
img_emb_neg,
price_bin_src,
price_bin_pos,
price_bin_neg,
category_src,
category_pos,
random_valid_category,
asin_src,
asin_pos,
set_name,
) = batch
category_src_emb = self.category_embs(category_src)
category_dst_emb = self.category_embs(category_pos)
price_src_emb = self.price_embs(price_bin_src)
price_pos_emb = self.price_embs(price_bin_pos)
price_neg_emb = self.price_embs(price_bin_neg)
src = self.img_encoder(img_emb_src, category_src_emb, price_src_emb)
pos = self.img_encoder(img_emb_pos, category_dst_emb, price_pos_emb)
neg = self.img_encoder(img_emb_neg, category_dst_emb, price_neg_emb)
src_fbt = (self.fbt_categories(category_pos) + 1) * src
# Triplet
zeros = torch.zeros_like(src_fbt)
loss_pos = torch.maximum(
zeros,
self.cfg.epsilon
- (self.cfg.lamb - mse_loss(src_fbt, pos, reduction="none")),
).mean()
loss_neg = torch.maximum(
zeros,
self.cfg.epsilon
+ (self.cfg.lamb - mse_loss(src_fbt, neg, reduction="none")),
).mean()
loss = 0.5 * (loss_pos + loss_neg)
# Logger
self.log(f"loss/{phase}/loss_pos", loss_pos)
self.log(f"loss/{phase}/loss_neg", loss_neg)
self.log(f"loss/{phase}", loss)
# Logger
src_fbt_std_mean = src_fbt.mean(axis=-1).mean()
src_fbt_std = src_fbt.std(axis=-1).mean()
src_fbt_avg_norm = torch.norm(src_fbt, dim=-1).mean()
category_emb_mean = category_src_emb.mean(axis=-1).mean()
category_emb_avg_norm = torch.norm(category_src_emb, dim=-1).mean()
category_emb_max_val = torch.max(category_src_emb)
epoch = float(self.trainer.current_epoch)
self.log(f"epoch/{phase}", epoch)
self.log(f"src_fbt/avg", src_fbt_std_mean)
self.log(f"src_fbt/std", src_fbt_std)
self.log(f"src_fbt/avg_norm", src_fbt_avg_norm)
self.log(f"category_emb/avg", category_emb_mean)
self.log(f"category_emb/avg_norm", category_emb_avg_norm)
self.log(f"category_emb/max_val", category_emb_max_val)
return {
"loss": loss,
"asin_src": asin_src,
"asin_pos": asin_pos,
"src": src.detach().cpu(),
"src_fbt": src_fbt.detach().cpu(),
"pos": pos.detach().cpu(),
"category_src": category_src.detach().cpu(),
"category_pos": category_pos.detach().cpu(),
"set_name": set_name,
}
def configure_optimizers(self):
optimizer = optim.Adam(
self.parameters(),
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.cfg.milestones
)
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx, optimizer_idx: int = 0):
return self._loss_helper(
batch, phase="train", batch_idx=batch_idx, optimizer_idx=optimizer_idx
)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self._loss_helper(
batch, phase="test", batch_idx=batch_idx, optimizer_idx=0
)
def validation_epoch_end(self, outputs, phase: str = "test"):
self._calc_retrival_metrics(outputs[-1], phase)
def _calc_retrival_metrics(self, outputs, phase):
t1 = time()
# Get values from step
epoch = int(self.trainer.current_epoch)
src = torch.vstack([out["src"] for out in outputs])
src_fbt = torch.vstack([out["src_fbt"] for out in outputs])
pos = torch.vstack([out["pos"] for out in outputs])
asin_src = np.hstack([out["asin_src"] for out in outputs])
asin_pos = np.hstack([out["asin_pos"] for out in outputs])
category_src = torch.hstack([out["category_src"] for out in outputs]).numpy()
category_pos = torch.hstack([out["category_pos"] for out in outputs]).numpy()
set_name = np.hstack([out["set_name"] for out in outputs])
# Test sources: have a unique pair of (source,target-category)
src_fbt_test = src_fbt[set_name == "test"]
asin_src_test = asin_src[set_name == "test"]
category_pos_test = category_pos[set_name == "test"]
locs = list(zip(asin_src_test, category_pos_test))
_, unique_idxs = np.unique(np.array(locs), axis=0, return_index=True)
src_fbt_test, asin_src_test, category_pos_test = (
src_fbt_test[unique_idxs],
asin_src_test[unique_idxs],
category_pos_test[unique_idxs],
)
# Candidate to compare with
asins = np.hstack([asin_src, asin_pos])
embs = torch.vstack([src, pos])
categories = np.hstack([category_src, category_pos])
asins, embs, categories = get_unique_asins(asins, embs, categories)
# Build hot label
t1 = time()
fbt_by_asin_src = self.trainer.val_dataloaders[
-1
].dataset.fbt_by_asin_src.copy()
hot_labels, _ = generate_test_set_hot_labels(
asin_src_test=asin_src_test,
category_pos_test=category_pos_test,
fbt_by_asin_src=fbt_by_asin_src,
asins=asins,
)
logger.info(f"hot_labels in {time()-t1:.1f} s. {hot_labels.shape=}")
# Find distance of the candidates
t2 = time()
dists = torch.cdist(src_fbt_test, embs, p=2)
probs = torch.softmax(-dists, axis=-1)
# Constrain to target cateogry
for n, cat in enumerate(category_pos_test):
probs[n, categories != cat] = 0
# Calculate retrival metrics
ndcg_val = ndcg_score(hot_labels, probs)
self.logger.log_metrics({"retrieval_metrics/ndcg": ndcg_val}, step=epoch)
logger.info(f" {epoch=} {ndcg_val=:.6f} {probs.shape=}. {time()-t2:.1f}s")
# TopK
t2 = time()
topk_d = calc_topk(probs, hot_labels, self.cfg.top_k)
self.logger.log_metrics(topk_d, step=epoch)
logger.info(f"{epoch=} _epoch_end_helper. {topk_d}. in {time()-t2:.1f} s")
# Save tensors
if self.ndcg_val_best < ndcg_val:
self.ndcg_val_best = ndcg_val
save_run_results(
{
"src": src,
"src_fbt": src_fbt,
"pos": pos,
"asin_src": asin_src,
"asin_pos": asin_pos,
"category_src": category_src,
"category_pos": category_pos,
"set_name": set_name,
},
self.out_dir,
)
self.trainer.save_checkpoint(osj(self.out_dir, "checkpoint.ckpt"))
logger.info(f" {epoch=} {self.ndcg_val_best=:.5f}")
logger.info(f"_epoch_end_helper. {time()-t1:.1f} s")
| cycle_gan_for_complementary_item_recommendations-main | src/lit/lit_pcomp.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import numpy as np
import torch
import pandas as pd
from os.path import join as osj
logger = logging.getLogger(__name__)
def generate_test_set_hot_labels(
asin_src_test: np.ndarray,
category_pos_test: np.ndarray,
fbt_by_asin_src: pd.DataFrame,
asins: np.ndarray,
) -> torch.Tensor:
"""_summary_
Args:
asin_src_test (_type_): asin to use as source
category_pos_test (_type_): target category of asin_src_test
fbt_df (_type_): all dataset of also_buy
asins (_type_): what to consider as also buy
"""
hot_label_series = fbt_by_asin_src.apply(lambda x: np.in1d(asins, x))
locs = list(zip(asin_src_test, category_pos_test))
hot_labels = torch.from_numpy(np.vstack(hot_label_series.loc[locs].values))
return hot_labels, asin_src_test
def vectorize_sort(x, permutation):
# Order tensor by indecis
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
d1, d2 = x.size()
ret = x[
torch.arange(d1).unsqueeze(1).repeat((1, d2)).flatten(),
permutation.flatten(),
].view(d1, d2)
return ret
def get_unique_asins(asins, embs, category_pos):
_, idx_of_unique = np.unique(asins, return_index=True)
idx_of_unique = np.sort(idx_of_unique)
asins_unique = asins[idx_of_unique]
embs_unique = embs[idx_of_unique]
category_pos_unique = category_pos[idx_of_unique]
return asins_unique, embs_unique, category_pos_unique
def calc_topk(probs, hot_labels, top_k_list):
_, sort_idxs = torch.sort(probs, dim=-1, descending=True)
is_true = vectorize_sort(hot_labels, sort_idxs)
topk_d = {}
for k in top_k_list:
topk = torch.any(is_true[:, :k], axis=-1).float().mean().item()
topk_d[f"topk/top{k}"] = topk
return topk_d
def create_pop_cat_aware_predictor(
fbt_df: pd.DataFrame, candidate_asins: np.ndarray
) -> dict:
# All asins probability are set to 0
pred_init = pd.DataFrame(
{"asins": candidate_asins, "freq": [0] * len(candidate_asins)}
).set_index("asins")
pred_dicts = {}
for category_int_target, df_gb in fbt_df.groupby(by=["category_int_target"]):
s = df_gb["asin_target"].value_counts(ascending=False)
pred = pred_init.copy()
# Set prediction probability of an asins by its frequency
pred["freq"].loc[s.index] = s.values
pred = pred["freq"].to_numpy()
pred_dicts[category_int_target] = pred / pred.sum()
# pred_dicts = {category_int_target: [asin1_prob, asin2_prob, ... , asinsN_prob]}
return pred_dicts
def create_pop_predictor(fbt_df: pd.DataFrame, candidate_asins: np.ndarray) -> dict:
# All asins probability are set to 0
pred = pd.DataFrame(
{"asins": candidate_asins, "freq": [0] * len(candidate_asins)}
).set_index("asins")
# asins by popularity
s = fbt_df["asin_target"].value_counts(ascending=False)
pred["freq"].loc[s.index] = s.values
pred = pred["freq"].to_numpy()
return pred / pred.sum()
def save_run_results(tensor_dict: dict, out_dir: str):
for key, val in tensor_dict.items():
torch.save(val, osj(out_dir, f"{key}.pth"))
| cycle_gan_for_complementary_item_recommendations-main | src/lit/eval_utils.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import time
from os.path import join as osj
from time import time
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from sklearn.metrics import ndcg_score
from torch import nn, optim
from .eval_utils import (
calc_topk,
generate_test_set_hot_labels,
get_unique_asins,
save_run_results,
)
from .lit_utils import LearnedEmbs
logger = logging.getLogger(__name__)
class ImageEncoder(nn.Module):
def __init__(
self,
input_emb_dim: int,
input_category_dim: int,
input_price_dim: int,
output_dim: int,
dropout_rate: float = 0.2,
):
super().__init__()
self.input_emb_dim = input_emb_dim
self.input_category_dim = input_category_dim
self.input_price_dim = input_price_dim
self.input_dim = input_emb_dim + input_category_dim + input_price_dim
self.layers = nn.Sequential(
nn.Linear(in_features=self.input_dim, out_features=64),
nn.Dropout(p=dropout_rate),
nn.LeakyReLU(),
nn.Linear(in_features=64, out_features=output_dim),
nn.LeakyReLU(),
)
def forward(self, img_embs, category_embs, price_embs):
return self.layers(torch.hstack([img_embs, category_embs, price_embs]))
class FusionModel(nn.Module):
def __init__(
self,
input_emb_dim: int,
dropout_rate: float = 0.2,
):
super().__init__()
self.input_emb_dim = input_emb_dim
self.layers = nn.Sequential(
nn.Linear(in_features=self.input_emb_dim * 2, out_features=16),
nn.Dropout(p=dropout_rate),
nn.LeakyReLU(),
nn.Linear(16, out_features=1),
)
def forward(self, src_embs, candidate_embs):
embs = torch.hstack([src_embs, candidate_embs])
return self.layers(embs)
class LitDCF(pl.LightningModule):
def __init__(
self,
input_emb_dim: int,
emb_dim: int,
num_categories: int,
category_emb_size: int,
num_price_bins: int,
price_emb_size: int,
cfg,
out_dir: str = ".",
):
self.cfg = cfg
self.out_dir = out_dir
self.save_hyperparameters()
super().__init__()
# Architecture
self.num_categories = num_categories
self.category_embs = LearnedEmbs(num_categories, category_emb_size)
self.price_embs = LearnedEmbs(num_price_bins, price_emb_size)
self.src_encoder = ImageEncoder(
input_emb_dim, category_emb_size, price_emb_size, emb_dim, 0.1
)
self.candidate_encoder = ImageEncoder(
input_emb_dim, category_emb_size, price_emb_size, emb_dim, 0.1
)
self.fusion_model = FusionModel(emb_dim, 0.1) # cfg.dropout_rate
self.criterion = nn.BCEWithLogitsLoss()
# Performance
self.ndcg_val_best = 0.0
wandb.define_metric("retrieval_metrics/ndcg", summary="max")
def _loss_helper(
self, batch, phase="train", batch_idx: int = 0, optimizer_idx: int = 0
):
(
img_emb_src,
img_emb_pos,
img_emb_neg,
price_bin_src,
price_bin_pos,
price_bin_neg,
category_src,
category_pos,
random_valid_category,
asin_src,
asin_pos,
set_name,
) = batch
img_emb_neg_easy = torch.roll(img_emb_src, 1)
category_neg_easy = torch.roll(category_src, 1)
price_bin_neg_easy = torch.roll(price_bin_src, 1)
category_src_emb = self.category_embs(category_src)
category_pos_emb = self.category_embs(category_pos)
category_neg_emb = self.category_embs(category_pos)
category_neg_emb_easy = self.category_embs(category_neg_easy)
price_src_emb = self.price_embs(price_bin_src)
price_pos_emb = self.price_embs(price_bin_pos)
price_neg_emb = self.price_embs(price_bin_neg)
price_neg_emb_easy = self.price_embs(price_bin_neg_easy)
src = self.src_encoder(img_emb_src, category_src_emb, price_src_emb)
pos = self.candidate_encoder(img_emb_pos, category_pos_emb, price_pos_emb)
neg = self.candidate_encoder(img_emb_neg, category_neg_emb, price_neg_emb)
neg_easy = self.candidate_encoder(
img_emb_neg_easy, category_neg_emb_easy, price_neg_emb_easy
)
pred_pos = self.fusion_model(src, pos)
pred_neg = self.fusion_model(src, neg)
pred_neg_easy = self.fusion_model(src, neg_easy)
target_pos = torch.ones_like(pred_pos)
loss_pos = self.criterion(pred_pos.squeeze(), target_pos.squeeze())
target_neg = torch.zeros_like(pred_neg)
loss_neg = self.criterion(pred_neg.squeeze(), target_neg.squeeze())
target_neg_easy = torch.zeros_like(pred_neg_easy)
loss_neg_easy = self.criterion(
pred_neg_easy.squeeze(), target_neg_easy.squeeze()
)
pred_pos = torch.sigmoid(pred_pos)
pred_neg = torch.sigmoid(pred_neg)
# Loss
if self.cfg.hard_negative:
loss = 0.5 * (loss_pos + loss_neg)
else:
loss = 0.5 * (loss_pos + loss_neg_easy)
# Logger
self.log(f"loss/{phase}", loss)
self.log(f"clf/{phase}/acc/pred_pos", pred_pos.mean())
self.log(f"clf/{phase}/acc/pred_neg", pred_neg.mean())
self.log(f"clf/{phase}/loss/loss_pos", loss_pos)
self.log(f"clf/{phase}/loss/loss_neg", loss_neg)
# Logger
category_emb_mean = category_src_emb.mean(axis=-1).mean()
category_emb_avg_norm = torch.norm(category_src_emb, dim=-1).mean()
category_emb_max_val = torch.max(category_src_emb)
epoch = float(self.trainer.current_epoch)
self.log(f"epoch/{phase}", epoch)
self.log(f"category_emb/avg", category_emb_mean)
self.log(f"category_emb/avg_norm", category_emb_avg_norm)
self.log(f"category_emb/max_val", category_emb_max_val)
return {
"loss": loss,
"asin_src": asin_src,
"asin_pos": asin_pos,
"src": src.detach().cpu(),
"pos": pos.detach().cpu(),
"category_src": category_src.detach().cpu(),
"category_pos": category_pos.detach().cpu(),
"category_neg": category_pos.detach().cpu(),
"set_name": set_name,
}
def configure_optimizers(self):
optimizer = optim.Adam(
self.parameters(),
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.cfg.milestones
)
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx, optimizer_idx: int = 0):
return self._loss_helper(
batch, phase="train", batch_idx=batch_idx, optimizer_idx=optimizer_idx
)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self._loss_helper(
batch, phase="test", batch_idx=batch_idx, optimizer_idx=0
)
def validation_epoch_end(self, outputs, phase: str = "test"):
self._calc_retrival_metrics(outputs[-1], phase)
def _calc_retrival_metrics(self, outputs, phase):
t1 = time()
# Get values from step
epoch = int(self.trainer.current_epoch)
src = torch.vstack([out["src"] for out in outputs])
pos = torch.vstack([out["pos"] for out in outputs])
asin_src = np.hstack([out["asin_src"] for out in outputs])
asin_pos = np.hstack([out["asin_pos"] for out in outputs])
category_src = torch.hstack([out["category_src"] for out in outputs]).numpy()
category_pos = torch.hstack([out["category_pos"] for out in outputs]).numpy()
set_name = np.hstack([out["set_name"] for out in outputs])
# Test sources: have a unique pair of (source,target-category)
src_test = src[set_name == "test"]
asin_src_test = asin_src[set_name == "test"]
category_pos_test = category_pos[set_name == "test"]
locs = list(zip(asin_src_test, category_pos_test))
_, unique_idxs = np.unique(np.array(locs), axis=0, return_index=True)
src_test, asin_src_test, category_pos_test = (
src_test[unique_idxs],
asin_src_test[unique_idxs],
category_pos_test[unique_idxs],
)
# Candidate to compare with
asins = asin_pos
candidates = pos
categories = category_pos
asins, candidates, categories = get_unique_asins(asins, candidates, categories)
# Build hot label
t1 = time()
fbt_by_asin_src = self.trainer.val_dataloaders[
-1
].dataset.fbt_by_asin_src.copy()
hot_labels, _ = generate_test_set_hot_labels(
asin_src_test=asin_src_test,
category_pos_test=category_pos_test,
fbt_by_asin_src=fbt_by_asin_src,
asins=asins,
)
logger.info(f"hot_labels in {time()-t1:.1f} s. {hot_labels.shape=}")
# Find distance of the candidates: infernece of the source with each candidate. This is the row of dists
t2 = time()
probs = torch.vstack(
[
torch.sigmoid(
self.fusion_model(
(src_i.repeat(len(candidates), 1)), candidates
).squeeze()
)
for src_i in src_test
]
)
# Constrain to target cateogry
for n, cat in enumerate(category_pos_test):
probs[n, categories != cat] = 0
probs = probs / probs.sum(axis=1, keepdim=True)
# Calculate retrival metrics
ndcg_val = ndcg_score(hot_labels, probs)
self.logger.log_metrics({"retrieval_metrics/ndcg": ndcg_val}, step=epoch)
logger.info(f" {epoch=} {ndcg_val=:.6f} {probs.shape=}. {time()-t2:.1f}s")
# TopK
t2 = time()
topk_d = calc_topk(probs, hot_labels, self.cfg.top_k)
self.logger.log_metrics(topk_d, step=epoch)
logger.info(f"{epoch=} _epoch_end_helper. {topk_d}. in {time()-t2:.1f} s")
# Save tensors
if self.ndcg_val_best < ndcg_val:
self.ndcg_val_best = ndcg_val
save_run_results(
{
"src": src,
"pos": pos,
"asin_src": asin_src,
"asin_pos": asin_pos,
"category_src": category_src,
"category_pos": category_pos,
"set_name": set_name,
},
self.out_dir,
)
self.trainer.save_checkpoint(osj(self.out_dir, "checkpoint.ckpt"))
logger.info(f" {epoch=} {self.ndcg_val_best=:.5f}")
logger.info(f"_epoch_end_helper. {time()-t1:.1f} s")
| cycle_gan_for_complementary_item_recommendations-main | src/lit/lit_dcf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class FastGradConv2dFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, weight_param, bias_param, batch_size=1):
ctx.save_for_backward(weight_param)
weight = weight_param.repeat(batch_size, 1, 1, 1)
if bias_param is not None:
bias = bias_param.repeat(batch_size)
return weight, bias
@staticmethod
def backward(ctx, weight_grad, bias_grad):
weight_param, = ctx.saved_tensors
batch_size = int(weight_grad.size(0) / weight_param.size(0))
weight_grad = weight_grad.view(batch_size, -1, weight_grad.size(1),
weight_grad.size(2), weight_grad.size(3)).sum(0)
if bias_grad is not None:
bias_grad = bias_grad.view(batch_size, -1).sum(0)
return weight_grad, bias_grad, None
class FastGradConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True):
super(FastGradConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=1, bias=bias)
self.expanded_weight = None
self.expanded_bias = None
def forward(self, x):
if self.train:
del self.expanded_weight
del self.expanded_bias
batch_size = x.size(0)
self.expanded_weight, self.expanded_bias = FastGradConv2dFunction.apply(self.weight, self.bias, batch_size)
self.expanded_weight.requires_grad_(True)
self.expanded_weight.retain_grad()
if self.expanded_bias is not None:
self.expanded_bias.requires_grad_(True)
self.expanded_bias.retain_grad()
output = F.conv2d(x.view(1, -1, x.size(2), x.size(3)), self.expanded_weight, bias=self.expanded_bias,
stride=self.stride, padding=self.padding, dilation=self.dilation,
groups=batch_size)
return output.view(x.size(0), -1, output.size(2), output.size(3))
else:
return F.conv2d(x, self.weight, self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation)
| certified-removal-main | fast_grad_conv.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import argparse
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import argparse
import os
from sklearn.linear_model import LogisticRegression
from utils import load_features
parser = argparse.ArgumentParser(description='Training a removal-enabled linear model and testing removal')
parser.add_argument('--data-dir', type=str, required=True, help='data directory')
parser.add_argument('--result-dir', type=str, default='result', help='directory for saving results')
parser.add_argument('--extractor', type=str, default='resnet50', help='extractor type')
parser.add_argument('--dataset', type=str, default='SVHN', help='dataset')
parser.add_argument('--lam', type=float, default=1e-6, help='L2 regularization')
parser.add_argument('--std', type=float, default=10.0, help='standard deviation for objective perturbation')
parser.add_argument('--num-removes', type=int, default=1000, help='number of data points to remove')
parser.add_argument('--train-splits', type=int, default=1, help='number of training data splits')
parser.add_argument('--subsample-ratio', type=float, default=1.0, help='negative example subsample ratio')
parser.add_argument('--num-steps', type=int, default=100, help='number of optimization steps')
parser.add_argument('--train-mode', type=str, default='ovr', help='train mode [ovr/binary]')
parser.add_argument('--train-sep', action='store_true', default=False, help='train binary classifiers separately')
parser.add_argument('--verbose', action='store_true', default=False, help='verbosity in optimizer')
args = parser.parse_args()
device = torch.device("cuda")
def lr_loss(w, X, y, lam):
return -F.logsigmoid(y * X.mv(w)).mean() + lam * w.pow(2).sum() / 2
def lr_eval(w, X, y):
return X.mv(w).sign().eq(y).float().mean()
def lr_grad(w, X, y, lam):
z = torch.sigmoid(y * X.mv(w))
return X.t().mv((z-1) * y) + lam * X.size(0) * w
def lr_hessian_inv(w, X, y, lam, batch_size=50000):
z = torch.sigmoid(X.mv(w).mul_(y))
D = z * (1 - z)
H = None
num_batch = int(math.ceil(X.size(0) / batch_size))
for i in range(num_batch):
lower = i * batch_size
upper = min((i+1) * batch_size, X.size(0))
X_i = X[lower:upper]
if H is None:
H = X_i.t().mm(D[lower:upper].unsqueeze(1) * X_i)
else:
H += X_i.t().mm(D[lower:upper].unsqueeze(1) * X_i)
return (H + lam * X.size(0) * torch.eye(X.size(1)).float().to(device)).inverse()
def lr_optimize(X, y, lam, b=None, num_steps=100, tol=1e-10, verbose=False):
w = torch.autograd.Variable(torch.zeros(X.size(1)).float().to(device), requires_grad=True)
def closure():
if b is None:
return lr_loss(w, X, y, lam)
else:
return lr_loss(w, X, y, lam) + b.dot(w) / X.size(0)
optimizer = optim.LBFGS([w], tolerance_grad=tol, tolerance_change=1e-20)
for i in range(num_steps):
optimizer.zero_grad()
loss = lr_loss(w, X, y, lam)
if b is not None:
loss += b.dot(w) / X.size(0)
loss.backward()
if verbose:
print('Iteration %d: loss = %.6f, grad_norm = %.6f' % (i+1, loss.cpu(), w.grad.norm()))
optimizer.step(closure)
return w.data
def ovr_lr_loss(w, X, y, lam, weight=None):
z = batch_multiply(X, w).mul_(y)
if weight is None:
return -F.logsigmoid(z).mean(0).sum() + lam * w.pow(2).sum() / 2
else:
return -F.logsigmoid(z).mul_(weight).sum() + lam * w.pow(2).sum() / 2
def ovr_lr_optimize(X, y, lam, weight=None, b=None, num_steps=100, tol=1e-10, verbose=False):
w = torch.autograd.Variable(torch.zeros(X.size(1), y.size(1)).float().to(device), requires_grad=True)
def closure():
if b is None:
return ovr_lr_loss(w, X, y, lam, weight)
else:
return ovr_lr_loss(w, X, y, lam, weight) + (b * w).sum() / X.size(0)
optimizer = optim.LBFGS([w], tolerance_grad=tol, tolerance_change=1e-10)
for i in range(num_steps):
optimizer.zero_grad()
loss = ovr_lr_loss(w, X, y, lam, weight)
if b is not None:
if weight is None:
loss += (b * w).sum() / X.size(0)
else:
loss += ((b * w).sum(0) * weight.max(0)[0]).sum()
loss.backward()
if verbose:
print('Iteration %d: loss = %.6f, grad_norm = %.6f' % (i+1, loss.cpu(), w.grad.norm()))
optimizer.step(closure)
return w.data
def batch_multiply(A, B, batch_size=500000):
if A.is_cuda:
if len(B.size()) == 1:
return A.mv(B)
else:
return A.mm(B)
else:
out = []
num_batch = int(math.ceil(A.size(0) / float(batch_size)))
with torch.no_grad():
for i in range(num_batch):
lower = i * batch_size
upper = min((i+1) * batch_size, A.size(0))
A_sub = A[lower:upper]
A_sub = A_sub.to(device)
if len(B.size()) == 1:
out.append(A_sub.mv(B).cpu())
else:
out.append(A_sub.mm(B).cpu())
return torch.cat(out, dim=0).to(device)
def spectral_norm(A, num_iters=20):
x = torch.randn(A.size(0)).float().to(device)
norm = 1
for i in range(num_iters):
x = A.mv(x)
norm = x.norm()
x /= norm
return math.sqrt(norm)
# loads extracted features
X_train, X_test, y_train, y_train_onehot, y_test = load_features(args)
X_test = X_test.float().to(device)
y_test = y_test.to(device)
save_path = '%s/%s_%s_splits_%d_ratio_%.2f_std_%.1f_lam_%.0e.pth' % (
args.result_dir, args.extractor, args.dataset, args.train_splits, args.subsample_ratio, args.std, args.lam)
if os.path.exists(save_path):
# load trained models
checkpoint = torch.load(save_path)
w = checkpoint['w']
b = checkpoint['b']
weight = checkpoint['weight']
else:
# train removal-enabled linear model
start = time.time()
if args.subsample_ratio < 1.0:
# subsample negative examples
subsample_indices = torch.rand(y_train_onehot.size()).lt(args.subsample_ratio).float()
weight = (subsample_indices + y_train_onehot.gt(0).float()).gt(0).float()
weight = weight / weight.sum(0).unsqueeze(0)
weight = weight.to(device)
else:
weight = None
# sample objective perturbation vector
X_train = X_train.float().to(device)
y_train = y_train.float().to(device)
y_train_onehot = y_train_onehot.float().to(device)
if args.train_mode == 'ovr':
b = args.std * torch.randn(X_train.size(1), y_train_onehot.size(1)).float().to(device)
if args.train_sep:
# train K binary LR models separately
w = torch.zeros(b.size()).float().to(device)
for k in range(y_train_onehot.size(1)):
if weight is None:
w[:, k] = lr_optimize(X_train, y_train_onehot[:, k], args.lam, b=b[:, k], num_steps=args.num_steps, verbose=args.verbose)
else:
w[:, k] = lr_optimize(X_train[weight[:, k].gt(0)], y_train_onehot[:, k][weight[:, k].gt(0)], args.lam, b=b[:, k], num_steps=args.num_steps, verbose=args.verbose)
else:
# train K binary LR models jointly
w = ovr_lr_optimize(X_train, y_train_onehot, args.lam, weight, b=b, num_steps=args.num_steps, verbose=args.verbose)
else:
b = args.std * torch.randn(X_train.size(1)).float().to(device)
w = lr_optimize(X_train, y_train, args.lam, b=b, num_steps=args.num_steps, verbose=args.verbose)
print('Time elapsed: %.2fs' % (time.time() - start))
torch.save({'w': w, 'b': b, 'weight': weight}, save_path)
if args.train_mode == 'ovr':
pred = X_test.mm(w).max(1)[1]
print('Test accuracy = %.4f' % pred.eq(y_test).float().mean())
else:
pred = X_test.mv(w)
print('Test accuracy = %.4f' % pred.gt(0).squeeze().eq(y_test.gt(0)).float().mean())
grad_norm_approx = torch.zeros(args.num_removes).float()
times = torch.zeros(args.num_removes)
if args.train_mode == 'ovr':
y_train = y_train_onehot
w_approx = w.clone()
perm = torch.randperm(X_train.size(0)).to(y_train.device)
X_train = X_train.index_select(0, perm)
X_train = X_train.float().to(device)
y_train = y_train[perm].float().to(device)
# initialize K = X^T * X for fast computation of spectral norm
print('Preparing for removal')
if weight is None:
K = X_train.t().mm(X_train)
else:
weight = weight.index_select(0, perm.to(device))
Ks = []
for i in range(y_train_onehot.size(1)):
X_sub = X_train.cpu()[weight[:, i].gt(0).cpu()]
Ks.append(X_sub.t().mm(X_sub).to(device))
print('Testing removal')
for i in range(args.num_removes):
start = time.time()
if args.train_mode == 'ovr':
# removal from all one-vs-rest models
for k in range(y_train_onehot.size(1)):
if weight is None or weight[i, k] > 0:
X_rem = X_train[(i+1):]
y_rem = y_train[(i+1):, k]
if weight is not None:
X_rem = X_rem[weight[(i+1):, k].gt(0)]
y_rem = y_rem[weight[(i+1):, k].gt(0)]
H_inv = lr_hessian_inv(w_approx[:, k], X_rem, y_rem, args.lam)
grad_i = lr_grad(w_approx[:, k], X_train[i].unsqueeze(0), y_train[i, k].unsqueeze(0), args.lam)
# apply rank-1 down-date to K
if weight is None:
K -= torch.ger(X_train[i], X_train[i])
spec_norm = spectral_norm(K)
else:
Ks[k] -= torch.ger(X_train[i], X_train[i])
spec_norm = spectral_norm(Ks[k])
Delta = H_inv.mv(grad_i)
Delta_p = X_rem.mv(Delta)
w_approx[:, k] += Delta
grad_norm_approx[i] += (Delta.norm() * Delta_p.norm() * spec_norm / 4).cpu()
else:
# removal from a single binary logistic regression model
X_rem = X_train[(i+1):]
y_rem = y_train[(i+1):]
H_inv = lr_hessian_inv(w_approx[:], X_rem, y_rem, args.lam)
grad_i = lr_grad(w_approx, X_train[i].unsqueeze(0), y_train[i].unsqueeze(0), args.lam)
K -= torch.ger(X_train[i], X_train[i])
spec_norm = spectral_norm(K)
Delta = H_inv.mv(grad_i)
Delta_p = X_rem.mv(Delta)
w_approx += Delta
grad_norm_approx[i] += (Delta.norm() * Delta_p.norm() * spec_norm / 4).cpu()
times[i] = time.time() - start
print('Iteration %d: Grad norm bound = %.6f, time = %.2fs' % (i+1, grad_norm_approx[i], times[i]))
if args.train_mode == 'ovr':
pred = X_test.mm(w_approx).max(1)[1]
print('Test accuracy = %.4f' % pred.eq(y_test).float().mean())
else:
pred = X_test.mv(w)
print('Test accuracy = %.4f' % pred.gt(0).squeeze().eq(y_test.gt(0)).float().mean())
save_path = '%s/%s_%s_splits_%d_ratio_%.2f_std_%.1f_lam_%.0e_removal.pth' % (
args.result_dir, args.extractor, args.dataset, args.train_splits, args.subsample_ratio, args.std, args.lam)
torch.save({'grad_norm_approx': grad_norm_approx, 'times': times}, save_path)
| certified-removal-main | test_removal.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Extractor(nn.Module):
def __init__(self, num_channels, kernel_size, stride, pool_size, bias=True, normalize=False):
super(Extractor, self).__init__()
self.normalize = normalize
self.pool_size = pool_size
conv_layers = []
assert(len(num_channels) >= 2)
self.conv_layers = nn.ModuleList([nn.Conv2d(num_channels[i], num_channels[i+1],
kernel_size, stride, bias=bias) for i in range(len(num_channels)-1)])
def forward(self, x):
for _, conv in enumerate(self.conv_layers):
out = conv(x)
x = F.max_pool2d(F.relu(out), self.pool_size, self.pool_size)
out = x.view(x.size(0), -1)
if self.normalize:
out = F.normalize(out)
return out
class MLP(nn.Module):
def __init__(self, hidden_sizes):
super(MLP, self).__init__()
assert(len(hidden_sizes) >= 2)
self.input_size = hidden_sizes[0]
self.act = F.relu
if len(hidden_sizes) == 2:
self.hidden_layers = []
else:
self.hidden_layers = nn.ModuleList([nn.Linear(hidden_sizes[i], hidden_sizes[i+1]) for i in range(len(hidden_sizes) - 2)])
self.output_layer = nn.Linear(hidden_sizes[-2], hidden_sizes[-1])
def forward(self, x):
x = x.view(-1, self.input_size)
for layer in self.hidden_layers:
x = self.act(layer(x))
return self.output_layer(x)
| certified-removal-main | models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import math
import argparse
from models import Extractor, MLP
from fast_grad_models import FastGradExtractor, FastGradMLP
from train_func import train, train_private
from test_func import test, test_linear
import utils
import time
import os
from torchdp.privacy_analysis import compute_rdp, get_privacy_spent
def main():
parser = argparse.ArgumentParser(description='Training an SVHN model')
parser.add_argument('--data-dir', type=str, required=True, help='directory for SVHN data')
parser.add_argument('--save-dir', type=str, default='save', help='directory for saving trained model')
parser.add_argument('--batch-size', type=int, default=500, help='batch size for training')
parser.add_argument('--process-batch-size', type=int, default=500, help='batch size for processing')
parser.add_argument('--test-batch-size', type=int, default=1000, help='batch size for testing')
parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--lam', type=float, default=0, help='L2 regularization')
parser.add_argument('--std', type=float, default=6.0, help='noise multiplier for DP training')
parser.add_argument('--delta', type=float, default=1e-5, help='delta for DP training')
parser.add_argument('--num-filters', type=int, default=64, help='number of conv filters')
parser.add_argument('--seed', type=int, default=1, help='manual random seed')
parser.add_argument('--log-interval', type=int, default=10,
help='logging interval')
parser.add_argument('--train-mode', type=str, default='default', help='train mode [default/private/full_private]')
parser.add_argument('--test-mode', type=str, default='default', help='test mode [default/linear/extract]')
parser.add_argument('--save-suffix', type=str, default='', help='suffix for model name')
parser.add_argument('--normalize', action='store_true', default=False,
help='normalize extracted features')
parser.add_argument('--single-layer', action='store_true', default=False,
help='single convolutional layer')
parser.add_argument('--save-model', action='store_true', default=False,
help='for saving the trained model')
args = parser.parse_args()
torch.manual_seed(args.seed)
device = torch.device("cuda")
kwargs = {'num_workers': 1, 'pin_memory': True}
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (1.0, 1.0, 1.0)),
])
trainset = torchvision.datasets.SVHN(root=args.data_dir, split='train', download=True, transform=transform)
extraset = torchvision.datasets.SVHN(root=args.data_dir, split='extra', download=True, transform=transform)
trainset = torch.utils.data.ConcatDataset([trainset, extraset])
testset = torchvision.datasets.SVHN(root=args.data_dir, split='test', download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=True, **kwargs)
if args.single_layer:
extr = FastGradExtractor([3, args.num_filters], 9, 1, 2, normalize=args.normalize).to(device)
clf = FastGradMLP([12*12*args.num_filters, 10]).to(device)
else:
extr = FastGradExtractor([3, args.num_filters, args.num_filters], 5, 1, 2, normalize=args.normalize).to(device)
clf = FastGradMLP([5*5*args.num_filters, 10]).to(device)
loss_fn = lambda x, y: F.nll_loss(F.log_softmax(x, dim=1), y)
save_path = "%s/svhn_cnn_delta_%.2e_std_%.2f%s.pth" % (args.save_dir, args.delta, args.std, args.save_suffix)
if not os.path.exists(save_path):
optimizer = optim.Adam(list(extr.parameters()) + list(clf.parameters()), lr=args.lr, weight_decay=args.lam)
C = 4
n = len(train_loader.dataset)
q = float(args.batch_size) / float(n)
T = args.epochs * len(train_loader)
# compute privacy loss using RDP analysis
orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +
list(range(5, 64)) + [128, 256, 512, 1024, 2048, 4096])
epsilon, _ = get_privacy_spent(orders, compute_rdp(q, args.std, T, orders), args.delta)
print('RDP computed privacy loss: epsilon = %.2f at delta = %.2e' % (epsilon, args.delta))
start = time.time()
for epoch in range(1, args.epochs + 1):
if args.train_mode == 'private' or args.train_mode == 'full_private':
include_linear = (args.train_mode == 'full_private')
train_private(args, extr, clf, loss_fn, device, train_loader, optimizer, epoch, C, args.std, include_linear=include_linear)
else:
train(args, extr, clf, loss_fn, device, train_loader, optimizer, epoch)
test(args, extr, clf, loss_fn, device, test_loader)
print(time.time() - start)
if args.save_model:
torch.save({'extr': extr.state_dict(), 'clf': clf.state_dict()}, save_path)
else:
checkpoint = torch.load(save_path)
extr.load_state_dict(checkpoint['extr'])
clf.load_state_dict(checkpoint['clf'])
if args.test_mode == 'linear':
test_linear(args, extr, device, train_loader, test_loader)
elif args.test_mode == 'extract':
# this option can be used to extract features for training the removal-enabled linear model
X_train, y_train = utils.extract_features(extr, device, train_loader)
X_test, y_test = utils.extract_features(extr, device, test_loader)
torch.save({'X_train': X_train, 'y_train': y_train, 'X_test': X_test, 'y_test': y_test},
'%s/dp_delta_%.2e_std_%.2f_SVHN_extracted.pth' % (args.data_dir, args.delta, args.std))
else:
test(args, extr, clf, loss_fn, device, test_loader)
if __name__ == '__main__':
main()
| certified-removal-main | train_svhn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fast_grad_conv import FastGradConv2d
class FastGradExtractor(nn.Module):
def __init__(self, num_channels, kernel_size, stride, pool_size, normalize=False):
super(FastGradExtractor, self).__init__()
self.normalize = normalize
self.pool_size = pool_size
conv_layers = []
assert(len(num_channels) >= 2)
self.conv_layers = nn.ModuleList([FastGradConv2d(num_channels[i], num_channels[i+1],
kernel_size, stride) for i in range(len(num_channels)-1)])
def forward(self, x):
for _, conv in enumerate(self.conv_layers):
x = F.max_pool2d(F.relu(conv(x)), self.pool_size, self.pool_size)
out = x.view(x.size(0), -1)
if self.normalize:
out = F.normalize(out)
return out
# Code for FastGradMLP is adapted from the following repository:
# https://github.com/fKunstner/fast-individual-gradients-with-autodiff/tree/master/pytorch
class FastGradMLP(nn.Module):
"""
"Standard" MLP with support with goodfellow's backprop trick
"""
def __init__(self, hidden_sizes):
super(type(self), self).__init__()
assert(len(hidden_sizes) >= 2)
self.input_size = hidden_sizes[0]
self.act = F.relu
if len(hidden_sizes) == 2:
self.hidden_layers = []
else:
self.hidden_layers = nn.ModuleList([nn.Linear(hidden_sizes[i], hidden_sizes[i+1]) for i in range(len(hidden_sizes) - 2)])
self.output_layer = nn.Linear(hidden_sizes[-2], hidden_sizes[-1])
def forward(self, x):
"""
Forward pass that returns also returns
* the activations (H) and
* the linear combinations (Z)
of each layer, to be able to use the trick from [1].
Args:
- x : The inputs of the network
Returns:
- logits
- activations at each layer (including the inputs)
- linear combinations at each layer
> [1] EFFICIENT PER-EXAMPLE GRADIENT COMPUTATIONS
> by Ian Goodfellow
> https://arxiv.org/pdf/1510.01799.pdf
"""
x = x.view(-1, self.input_size)
out = x
# Save the model inputs, which are considered the activations of the 0'th layer.
activations = [out]
linearCombs = []
for layer in self.hidden_layers:
linearComb = layer(out)
out = self.act(linearComb)
# Save the activations and linear combinations from this layer.
activations.append(out)
linearComb.requires_grad_(True)
linearComb.retain_grad()
linearCombs.append(linearComb)
logits = self.output_layer(out)
logits.requires_grad_(True)
logits.retain_grad()
linearCombs.append(logits)
return (logits, activations, linearCombs)
| certified-removal-main | fast_grad_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
import os
import sys
from fast_grad.goodfellow_backprop import goodfellow_backprop
from torchvision import datasets, transforms
# extracts features into a tensor
def extract_features(extr, device, data_loader):
extr.eval()
features = None
labels = None
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
output = extr(data).data.cpu()
if features is None:
features = output.squeeze()
labels = target
else:
features = torch.cat([features, output.squeeze()], dim=0)
labels = torch.cat([labels, target], dim=0)
return features, labels
# constructs one-hot representations of labels
def onehot(y):
y_onehot = -torch.ones(y.size(0), y.max() + 1).float()
y_onehot.scatter_(1, y.long().unsqueeze(1), 1)
return y_onehot
# loads features from a saved checkpoint or directly as raw features
def load_features(args):
ckpt_file = '%s/%s_%s_extracted.pth' % (args.data_dir, args.extractor, args.dataset)
if os.path.exists(ckpt_file):
checkpoint = torch.load(ckpt_file)
X_train = checkpoint['X_train'].cpu()
y_train = checkpoint['y_train'].cpu()
X_test = checkpoint['X_test'].cpu()
y_test = checkpoint['y_test'].cpu()
else:
print('Extracted features not found, loading raw features.')
if args.dataset == 'MNIST':
trainset = datasets.MNIST(args.data_dir, train=True, transform=transforms.ToTensor())
testset = datasets.MNIST(args.data_dir, train=False, transform=transforms.ToTensor())
X_train = torch.zeros(len(trainset), 784)
y_train = torch.zeros(len(trainset))
X_test = torch.zeros(len(testset), 784)
y_test = torch.zeros(len(testset))
for i in range(len(trainset)):
x, y = trainset[i]
X_train[i] = x.view(784) - 0.5
y_train[i] = y
for i in range(len(testset)):
x, y = testset[i]
X_test[i] = x.view(784) - 0.5
y_test[i] = y
# load classes 3 and 8
train_indices = (y_train.eq(3) + y_train.eq(8)).gt(0)
test_indices = (y_test.eq(3) + y_test.eq(8)).gt(0)
X_train = X_train[train_indices]
y_train = y_train[train_indices].eq(3).float()
X_test = X_test[test_indices]
y_test = y_test[test_indices].eq(3).float()
else:
print("Error: Unknown dataset %s. Aborting." % args.dataset)
sys.exit(1)
# L2 normalize features
X_train /= X_train.norm(2, 1).unsqueeze(1)
X_test /= X_test.norm(2, 1).unsqueeze(1)
# convert labels to +/-1 or one-hot vectors
if args.train_mode == 'binary':
y_train_onehot = y_train
y_train = (2 * y_train - 1)
else:
y_train_onehot = onehot(y_train)
if len(y_train_onehot.size()) == 1:
y_train_onehot = y_train_onehot.unsqueeze(1)
return X_train, X_test, y_train, y_train_onehot, y_test
# computes per-example gradient of the extractor and classifier models
# clf must be a FastGradMLP
def per_example_gradient(extr, clf, x, y, loss_fn, include_linear=False):
logits, activations, linearCombs = clf(extr(x))
loss = loss_fn(logits, y)
loss.backward(retain_graph=True)
gradients = []
for module in list(next(extr.children()).children()):
grad = module.expanded_weight.grad * x.size(0)
gradients.append(grad.view(x.size(0), -1, grad.size(1), grad.size(2), grad.size(3)))
if module.expanded_bias is not None:
gradients.append(module.expanded_bias.grad.view(x.size(0), -1) * x.size(0))
if include_linear:
linearGrads = torch.autograd.grad(loss, linearCombs)
linearGrads = goodfellow_backprop(activations, linearGrads)
gradients = gradients + linearGrads
return loss, gradients
# clips each gradient to norm C and sum
def clip_and_sum_gradients(gradients, C):
grad_vec = batch_grads_to_vec(gradients)
grad_norm = grad_vec.norm(2, 1)
multiplier = grad_norm.new(grad_norm.size()).fill_(1)
multiplier[grad_norm.gt(C)] = C / grad_norm[grad_norm.gt(C)]
grad_vec *= multiplier.unsqueeze(1)
return grad_vec.sum(0)
# adds noise to computed gradients
# grad_vec should be average of gradients
def add_noisy_gradient(extr, clf, device, grad_vec, C, std, include_linear=False):
noise = torch.randn(grad_vec.size()).to(device) * C * std
grad_perturbed = grad_vec + noise
extr.zero_grad()
for param in extr.parameters():
size = param.data.view(1, -1).size(1)
param.grad = grad_perturbed[:size].view_as(param.data).clone()
grad_perturbed = grad_perturbed[size:]
if include_linear:
clf.zero_grad()
for param in clf.parameters():
size = param.data.view(1, -1).size(1)
param.grad = grad_perturbed[:size].view_as(param.data).clone()
grad_perturbed = grad_perturbed[size:]
return noise
# computes L2 regularized loss
def loss_with_reg(model, data, target, loss_fn, lam):
model.zero_grad()
loss = loss_fn(model(data), target)
if lam > 0:
for param in model.parameters():
loss += lam * param.pow(2).sum() / 2
loss.backward()
return loss
# computes average gradient of the full dataset
def compute_full_grad(model, device, data_loader, loss_fn, lam=0):
full_grad = None
model.zero_grad()
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
loss_with_reg(model, data, target, loss_fn, lam)
grad = params_to_vec(model.parameters(), grad=True)
if full_grad is None:
full_grad = grad * data.size(0) / len(data_loader.dataset)
else:
full_grad += grad * data.size(0) / len(data_loader.dataset)
model.zero_grad()
param_vec = params_to_vec(model.parameters())
return full_grad, param_vec
def params_to_vec(parameters, grad=False):
vec = []
for param in parameters:
if grad:
vec.append(param.grad.view(1, -1))
else:
vec.append(param.data.view(1, -1))
return torch.cat(vec, dim=1).squeeze()
def vec_to_params(vec, parameters):
param = []
for p in parameters:
size = p.view(1, -1).size(1)
param.append(vec[:size].view(p.size()))
vec = vec[size:]
return param
def batch_grads_to_vec(parameters):
N = parameters[0].shape[0]
vec = []
for param in parameters:
vec.append(param.view(N,-1))
return torch.cat(vec, dim=1)
def batch_vec_to_grads(vec, parameters):
grads = []
for param in parameters:
size = param.view(param.size(0), -1).size(1)
grads.append(vec[:, :size].view_as(param))
vec = vec[:, size:]
return grads
| certified-removal-main | utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from train_func import train
from utils import extract_features
from sklearn.linear_model import LogisticRegression
class Linear(nn.Module):
def __init__(self, input_size):
super(Linear, self).__init__()
self.fc = nn.Linear(input_size, 10)
def forward(self, x):
x = self.fc(x)
return F.log_softmax(x, dim=1)
# computes test accuracy
def test(args, extr, clf, loss_fn, device, test_loader, verbose=True):
if extr is not None:
extr.eval()
clf.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
if extr is not None:
output = clf(extr(data))
if len(output) == 3:
output = output[0]
else:
output = clf(data)
test_loss += output.size(0) * loss_fn(output, target).item()
if output.size(1) > 1:
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
else:
pred = output.gt(0).long()
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = float(correct) / len(test_loader.dataset)
if verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100 * test_acc))
return test_loss, test_acc
# computes test accuracy of a logistic regression model using features extracted from extr
def test_linear(args, extr, device, train_loader, test_loader, verbose=True):
X_train, y_train = extract_features(extr, device, train_loader)
X_test, y_test = extract_features(extr, device, test_loader)
clf = LogisticRegression(C=1/(X_train.size(0)*args.lam), solver='saga', multi_class='multinomial', verbose=int(verbose))
clf.fit(X_train.cpu().numpy(), y_train.cpu().numpy())
acc = clf.score(X_test.cpu().numpy(), y_test.cpu().numpy())
print('Test accuracy = %.4f' % acc)
return acc
| certified-removal-main | test_func.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
import math
from utils import per_example_gradient, clip_and_sum_gradients, add_noisy_gradient, batch_grads_to_vec, params_to_vec, vec_to_params, compute_full_grad, loss_with_reg
# trains a regular model for a single epoch
def train(args, extr, clf, loss_fn, device, train_loader, optimizer, epoch, verbose=True):
if extr is not None:
extr.train()
clf.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
if extr is not None:
output = clf(extr(data))
if len(output) == 3:
output = output[0]
else:
output = clf(data)
loss = loss_fn(output, target)
if args.lam > 0:
if extr is not None:
loss += args.lam * params_to_vec(extr.parameters()).pow(2).sum() / 2
loss += args.lam * params_to_vec(clf.parameters()).pow(2).sum() / 2
loss.backward()
optimizer.step()
if verbose and (batch_idx + 1) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item()))
# trains a private model for a single epoch using private SGD
# clf must be a FastGradMLP
def train_private(args, extr, clf, loss_fn, device, train_loader, optimizer, epoch, C, std, include_linear=False, verbose=True):
extr.train()
clf.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
# compute per-example gradients
num_batches = int(math.ceil(float(data.size(0)) / args.process_batch_size))
loss = 0
grad_vec = None
for i in range(num_batches):
start = i * args.process_batch_size
end = min((i+1) * args.process_batch_size, data.size(0))
data_batch = data[start:end]
target_batch = target[start:end]
loss_batch, gradients_batch = per_example_gradient(extr, clf, data_batch, target_batch, loss_fn, include_linear=include_linear)
loss += data_batch.size(0) * loss_batch.item()
if i == 0:
grad_vec = clip_and_sum_gradients(gradients_batch, C)
else:
grad_vec += clip_and_sum_gradients(gradients_batch, C)
loss /= data.size(0)
grad_vec /= data.size(0)
noise = add_noisy_gradient(extr, clf, device, grad_vec, C, std / data.size(0), include_linear=include_linear)
optimizer.step()
if verbose and (batch_idx + 1) % args.log_interval == 0:
print('Epoch %d [%d/%d]: loss = %.4f, grad_norm = %.4f, noise_norm = %.4f' % (
epoch, (batch_idx + 1) * len(data), len(train_loader.dataset), loss,
grad_vec.norm(), noise.norm()))
| certified-removal-main | train_func.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
import pdb #debugging
from goodfellow_backprop import goodfellow_backprop
def full(model, X, y):
"""
Computes the gradient of the complete objective function
"""
logits, _, _ = model.forward(X)
loss = F.binary_cross_entropy_with_logits(logits.view((-1,)), y)
grad = torch.autograd.grad(loss, model.parameters())
return grad
def naive(model, X, y):
"""
Computes the predictions in a full-batch fasion,
then call backward on the individual losses
"""
grad_list = []
logits, _, _ = model.forward(X)
N = X.shape[0]
for n in range(N):
model.zero_grad()
loss = F.binary_cross_entropy_with_logits(logits[n], y[n].view(-1,))
loss.backward(retain_graph=True)
grad_list.append(list([p.grad.clone() for p in model.parameters()]))
grads = []
for p_id in range(len(list(model.parameters()))):
grads.append(torch.cat([grad_list[n][p_id].unsqueeze(0) for n in range(N)]))
return grads
def goodfellow(model, X, y):
"""
Use Goodfellow's trick to compute individual gradients.
Ref: Efficient per-example gradient computations
at: https://arxiv.org/abs/1510.01799
"""
model.zero_grad()
logits, activations, linearCombs = model.forward(X)
loss = F.binary_cross_entropy_with_logits(logits.view((-1,)), y)
linearGrads = torch.autograd.grad(loss, linearCombs)
gradients = goodfellow_backprop(activations, linearGrads)
return gradients
| certified-removal-main | fast_grad/gradient_funcs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import pdb #debugging
def goodfellow_backprop(activations, linearGrads):
grads = []
for i in range(len(linearGrads)):
G, X = linearGrads[i], activations[i]
if len(G.shape) < 2:
G = G.unsqueeze(1)
G *= G.shape[0] # if the function is an average
grads.append(torch.bmm(G.unsqueeze(2), X.unsqueeze(1)))
grads.append(G)
return grads
| certified-removal-main | fast_grad/goodfellow_backprop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pdb
import helpers
from gradient_funcs import full, goodfellow, naive
def runWith(N, D, L):
X, y, model = helpers.make_data_and_model(N, D, L)
names = ["Goodf", "Naive"]
methods = [goodfellow, naive]
helpers.check_correctness(full, names, methods, model, X, y)
helpers.simpleTiming(full, names, methods, model, X, y, REPEATS=1)
#helpers.profiling(full, names, methods, model, X, y)
setups = [
[2,3,1],
[10,100,10],
[100,100,10],
[100,300,3],
[32,300,50],
[1000,100,10]
]
print("README:")
print()
print("Parameters:")
print("- N: Number of samples")
print("- D: Dimensionality of the inputs and hidden layers - width of the network")
print("- L: Number of hidden layers - depth of the network")
print()
print("Functions:")
print("- Full : Computes the averaged gradient")
print("- Naive: Compute each individual gradient by repeatedly calling backward")
print("- Goodf: Compute the individual gradients using Goodfellow's Trick,")
print(" which is equivalent to redefining the backward pass to _not_ aggregate individual gradients")
print()
print("Checking correctness is done with torch.norm()")
print("- For the diff. to the Full gradient, we first average over the sample")
print("- For the difference between individual gradient methods,")
print(" we take the L2 norm between [N x ...] matrices")
for setup in setups:
print()
print("Setup [N, D, L] =", setup)
print("---")
runWith(*setup)
| certified-removal-main | fast_grad/main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from time import time
from torch.nn.utils import parameters_to_vector, vector_to_parameters
import cProfile, pstats
import pdb #debugging
def batch_grads_to_vec(parameters):
r"""Convert parameters to one vector
Arguments:
parameters (Iterable[Tensor]): an iterator of Tensors that are of shape [N x the
parameters of a model].
Returns:
The parameters represented by a single Tensor of shape [N x number of parameters in the model]
"""
N = parameters[0].shape[0]
vec = []
for param in parameters:
vec.append(param.view(N,-1))
return torch.cat(vec, dim=1)
def check_correctness(full, names, approximations, model, X, y):
print()
print(" Checking correctness")
print(" ---")
true_value = parameters_to_vector(full(model, X, y))
approx_values = list()
for i in range(len(approximations)):
approx_value = batch_grads_to_vec(approximations[i](model, X, y))
approx_values.append(approx_value)
#pdb.set_trace()
print(" - Diff. to full batch for (%5s): %f" % (names[i], torch.norm(true_value - torch.mean(approx_value, dim=0))))
for i in range(len(approximations)):
for j in range(i):
if i != j:
print(" - Difference between (%5s) and (%5s): %f" % (names[i], names[j], torch.norm(approx_values[i] - approx_values[j])))
def simpleTiming(full, names, approximations, model, X, y, REPEATS=10):
print()
print(" Simple timing")
print(" ---")
def timeRun(method):
start = time()
for r in range(REPEATS):
method(model, X, y)
end = time()
return (end - start)/REPEATS
print(" - Full : %.3fs" % timeRun(full))
for i in range(len(approximations)):
print(" - (%5s) : %.3fs" % (names[i], timeRun(approximations[i])))
def profiling(full, names, approximations, model, X, y, REPEATS=1, Prec=20):
print("Profiling")
def profile(method):
pr = cProfile.Profile()
pr.enable()
for r in range(REPEATS):
method(model, X, y)
pr.disable()
pr.create_stats()
ps = pstats.Stats(pr).sort_stats("cumulative")
ps.print_stats(Prec)
print("Full:")
profile(full)
for i in range(len(approximations)):
print(names[i])
profile(approximations[i])
def make_data_and_model(N, D, L, seed=1):
"""
# N: Number of samples
# D: Dimension of input and of each Layer
# L: Number of hidden layers
"""
torch.manual_seed(seed)
hidden_sizes = list(D for l in range(L))
X = torch.Tensor(torch.randn(N, D))
y = torch.Tensor(torch.round(torch.rand(N))).view(-1,)
model = MLP(input_size = D, hidden_sizes = hidden_sizes)
model.train(True)
return X, y, model
| certified-removal-main | fast_grad/helpers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import scip_solver as scip
import xpress_solver as xp
# Wrap is the various MILP solvers including SCIP and Xpress under a unified API
# to ensure that we can easily try several solvers to check which one performs best
# on a given instance.
class ILPSolver:
def __init__(self, timeout_s=None, engine="xpress"):
if engine == "xpress":
self.solver = xp.XpressSolver(timeout_s)
else:
print(engine)
assert engine == "scip"
self.solver = scip.ScipSolver(timeout_s)
def create_integer_var(self, name, lower_bound=None, upper_bound=None):
assert name is not None
if type(name) != str:
name = str(name)
lb = -sys.maxsize if lower_bound is None else lower_bound
ub = sys.maxsize if upper_bound is None else upper_bound
return self.solver.create_integer_var(name, lb, ub)
def create_real_var(self, name, lower_bound=None, upper_bound=None):
assert name is not None
if type(name) != str:
name = str(name)
lb = -float("inf") if lower_bound is None else lower_bound
ub = float("inf") if upper_bound is None else upper_bound
return self.solver.create_real_var(name, lb, ub)
def create_binary_var(self, name):
assert name is not None
if type(name) != str:
name = str(name)
return self.solver.create_binary_var(name)
def set_objective_function(self, equation, maximize=True):
self.solver.set_objective_function(equation, maximize)
def add_constraint(self, cns):
self.solver.add_constraint(cns)
def disable_presolver(self):
self.solver.disable_presolver()
def disable_cuts(self):
self.solver.disable_cuts()
def disable_heuristics(self):
self.solver.disable_heuristics()
def solve(self):
return self.solver.solve()
# Returns the primal dual gap as the (upper bound, lower bound) tuple. This
# should only be called after the problem has been solved.
def primal_dual_gap(self):
return self.solver.primal_dual_gap()
# Returns the integral of the primal-dual gap over time. This
# should only be called after the problem has been solved.
def primal_dual_integral(self):
return self.solver.primal_dual_integral()
# Import the problem from the specified mps file
def load(self, mps_filename):
#self.solver.import(mps_filename)
return
# Export the problem in the specified mps file
def export(self, lp_output_filename):
return self.solver.export(lp_output_filename)
# Access the underlying scip.Model. Only valid if the engine is SCIP
def as_scip_model(self):
assert isinstance(self.solver, scip.ScipSolver)
return self.solver.as_scip_model()
# Access the underlying xpress.problem. Only valid if the engine is XPress
def as_xpress_problem(self):
assert isinstance(self.solver, xp.XpressSolver)
return self.solver.as_xp_problem()
| CL-LNS-main | ilp_solver.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from numpy.lib.utils import byte_bounds
class Solution:
def __init__(self, model, scip_solution, obj_value):
self.solution = {}
for v in model.getVars():
self.solution[v.name] = scip_solution[v]
self.obj_value = obj_value
def value(self, var):
return self.solution[var.name]
class Model:
def __init__(self, ecole_model, deep_copy=False):
assert not deep_copy
self.model = ecole_model.as_pyscipopt()
self.initial_vals = {}
for var in self.model.getVars():
self.initial_vals[var.getIndex()] = (var.getLbGlobal(), var.getUbGlobal())
def find_initial_solution(self, initial_time_limit=1):
old_time_limit = self.model.getParam('limits/time')
found = False
time_limit = initial_time_limit
while not found:
self.model.setParam('limits/time', time_limit)
self.model.optimize()
num_solutions_found = self.model.getNSols()
found = (num_solutions_found > 0)
time_limit *= 2
solution = self.model.getBestSol()
obj_value = self.model.getSolObjVal(solution)
self.model.setParam('limits/time', old_time_limit)
return Solution(self.model, solution, obj_value)
def get_primal_dual_bounds(self):
# Must have attempted to optimize the model before querying for bounds
if self.model.getNSols() == 0:
raise RuntimeError("Must find a solution before calling get_primal_dual_bounds()")
return (self.model.getPrimalbound(), self.model.getDualbound())
def improve_solution(self, solution, vars_to_unassign):
unassign_set = set()
for v in vars_to_unassign:
unassign_set.add(v.getIndex())
preserve_set = {}
for v in self.model.getVars():
preserve_set[v.getIndex()] = solution.value(v)
self.model.freeTransform()
self.model.freeReoptSolve()
for var in self.model.getVars():
if var.getIndex() in unassign_set:
#print("Unassigning " + str(var.getIndex()) + " with " + str(var.getLbGlobal()) + " / " + str(var.getUbGlobal()))
lb, ub = self.initial_vals[var.getIndex()]
self.model.chgVarLb(var, lb)
self.model.chgVarLbGlobal(var, lb)
self.model.chgVarUb(var, ub)
self.model.chgVarUbGlobal(var, ub)
else:
val = preserve_set[var.getIndex()]
self.model.chgVarLb(var, val)
self.model.chgVarLbGlobal(var, val)
self.model.chgVarUb(var, val)
self.model.chgVarUbGlobal(var, val)
self.model.optimize()
assert self.model.getNSols() > 0
solution = self.model.getBestSol()
obj_value = self.model.getObjVal()
return Solution(self.model, solution, obj_value)
| CL-LNS-main | ilp_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xpress as xp
# Wrap is the xpress solver (https://pypi.org/project/xpress/, doc available at
# https://www.fico.com/fico-xpress-optimization/docs/latest/solver/optimizer/python/HTML/GUID-616C323F-05D8-3460-B0D7-80F77DA7D046.html)
class XpressSolver:
def __init__(self, timeout_s=None):
self.vars = []
self.constraints = []
self.maximize = True
self.timeout = timeout_s
self.pd_gap = (None, None)
self.pd_integral = None
def create_integer_var(self, name, lower_bound, upper_bound):
v = xp.var(name=name, lb=lower_bound, ub=upper_bound, vartype=xp.integer)
self.vars.append(v)
return v
def create_real_var(self, name, lower_bound, upper_bound):
v = xp.var(name=name, lb=lower_bound, ub=upper_bound, vartype=xp.continuous)
self.vars.append(v)
return v
def create_binary_var(self, name):
v = xp.var(name=name, vartype=xp.binary)
self.vars.append(v)
return v
def set_objective_function(self, equation, maximize):
self.of = equation
self.maximize = maximize
def add_constraint(self, cns):
self.constraints.append(cns)
def disable_presolver(self):
# TBD
pass
def disable_cuts(self):
# TBD
pass
def disable_heuristics(self):
# TBD
pass
def solve(self):
# Solve the problem. Return the result as a dictionary of values
# indexed by the corresponding variables or an empty dictionary if the
# problem is infeasible.
p = self.as_xpress_problem()
# Make sure the problem is feasible
if p.iisfirst(0) == 0:
raise RuntimeError("Problem is not feasible")
# Solve and return the values for all the variables.
if self.timeout:
p.controls.maxtime = self.timeout
p.solve()
result = {}
for v in self.vars:
result[v] = p.getSolution(v)
# Record the value of the primal dual gap.
self.pd_gap = (p.getAttrib("mipbestobjval"), p.getAttrib("bestbound"))
self.pd_integral = p.getAttrib("primaldualintegral")
return result
def primal_dual_gap(self):
return self.pd_gap
def primal_dual_integral(self):
return self.pd_integral
def load(self, mps_filename):
# Not supported yet.
assert False
def export(self, lp_output_filename):
p = xp.problem(self.vars, self.of, self.constraints)
if self.maximize:
p.chgobjsense(xp.maximize)
else:
p.chgobjsense(xp.minimize)
p.write(lp_output_filename, "lp")
def as_xpress_problem(self):
p = xp.problem(self.vars, self.of, self.constraints)
if self.maximize:
p.chgobjsense(xp.maximize)
else:
p.chgobjsense(xp.minimize)
return p
| CL-LNS-main | xpress_solver.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import os.path
import tarfile
import zipfile
import ecole
import geco
import geco.generator
import glob
import re
import json
import pyscipopt
import hashlib
import string
import random
import pyscipopt
class InstanceLoader:
LOCAL_INSTANCE = {
"INDSET_test": "instances/INDSET_ER_6000/instance_ER4_*.cip",
"INDSET_train": "instances/INDSET_ER_6000/train/train_instance_ER4_*.cip",
}
ECOLE = {
# The settings are taken from the Gasse paper
# (https://papers.nips.cc/paper/2019/file/d14c2267d848abeb81fd590f371d39bd-Paper.pdf)
"SET_COVER_EASY": ecole.instance.SetCoverGenerator(n_rows=500, n_cols=500, density=0.05),
"SET_COVER_MEDIUM": ecole.instance.SetCoverGenerator(n_rows=500, n_cols=1000, density=0.05),
"SET_COVER_HARD": ecole.instance.SetCoverGenerator(n_rows=500, n_cols=2000, density=0.05),
"INDEPENDENT_SET_EASY": ecole.instance.IndependentSetGenerator(n_nodes=500),
"INDEPENDENT_SET_MEDIUM": ecole.instance.IndependentSetGenerator(n_nodes=1000),
"INDEPENDENT_SET_HARD": ecole.instance.IndependentSetGenerator(n_nodes=1500),
"AUCTION_EASY": ecole.instance.CombinatorialAuctionGenerator(n_items=100, n_bids=500),
"AUCTION_MEDIUM": ecole.instance.CombinatorialAuctionGenerator(n_items=200, n_bids=1000),
"AUCTION_HARD": ecole.instance.CombinatorialAuctionGenerator(n_items=300, n_bids=1500),
"FACILITY_EASY": ecole.instance.CapacitatedFacilityLocationGenerator(n_facilities=100, n_customers=100),
"FACILITY_MEDIUM": ecole.instance.CapacitatedFacilityLocationGenerator(n_facilities=100, n_customers=200),
"FACILITY_HARD": ecole.instance.CapacitatedFacilityLocationGenerator(n_facilities=100, n_customers=400),
}
GECO = {
# Instances from the GeCO generator
"KNAPSACK_UC": lambda seed: geco.mips.knapsack.pisinger.uncorrelated(n=1974, c=2864, seed=seed),
"KNAPSACK_WC": lambda seed: geco.mips.knapsack.pisinger.weakly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_SC": lambda seed: geco.mips.knapsack.pisinger.strongly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_ISC": lambda seed: geco.mips.knapsack.pisinger.inverse_strongly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_ASC": lambda seed: geco.mips.knapsack.pisinger.almost_strongly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_SUBSET_SUM": lambda seed: geco.mips.knapsack.pisinger.subset_sum(n=1974, c=2864, seed=seed),
"KNAPSACK_UWSW": lambda seed: geco.mips.knapsack.pisinger.uncorrelated_with_similar_weights(n=1974, c=2864, seed=seed),
#"KNAPSACK_SPANNER": lambda seed: geco.mips.knapsack.pisinger.spanner(v=345, m=2, n=995, distribution=**uncorrelated_distribution(), capacity=1720, seed=seed),
"KNAPSACK_PROFIT_CEILING": lambda seed: geco.mips.knapsack.pisinger.profit_ceiling(n=2974, c=1864, d=1.5, seed=seed),
"KNAPSACK_CIRCLE": lambda seed: geco.mips.knapsack.pisinger.circle(n=1974, c=2864, seed=seed),
"KNAPSACK_MSC": lambda seed: geco.mips.knapsack.pisinger.multiple_strongly_correlated(n=1974, c=2864, k1=1, k2=2, d=3, seed=seed),
"KNAPSACK_YANG": lambda seed: geco.mips.knapsack.yang.yang_instance(n=2368, seed=seed),
"SCHEDULING_HEINZ": lambda seed: geco.mips.scheduling.heinz.heinz_instance(number_of_facilities=43, number_of_tasks=114, seed=seed),
"SCHEDULING_HOOKER": lambda seed: geco.mips.scheduling.hooker.hooker_instance(number_of_facilities=23, number_of_tasks=73, time_steps=25, seed=seed),
"SET_PACKING": lambda seed: geco.mips.set_packing.yang.yang_instance(m=734, seed=seed),
"SET_COVER_SUN": lambda seed: geco.mips.set_cover.sun.sun_instance(n=1927, m=1467, seed=seed),
"SET_COVER_YANG": lambda seed: geco.mips.set_cover.yang.yang_instance(m=1513, seed=seed),
#"PRODUCTION_PLANNING": lambda seed: geco.mips.production_planning.tang.tang_instance(T=5, seed=seed),
"MAX_INDEPENDENT_SET": lambda seed: geco.mips.independent_set.barabasi_albert.barabasi_albert_instance(m=10, n=100, seed=seed),
"MAX_CUT": lambda seed: geco.mips.max_cut.tang.tang_instance(n=593, m=684, seed=seed),
"PACKING": lambda seed: geco.mips.packing.tang.tang_instance(n=321, m=428, seed=seed),
#"GRAPH_COLORING": lambda seed: geco.mips.graph_coloring.generic.assigment(seed=seed),
#"FACILITY_CORNUEJOLS": lambda seed: geco.mips.facility_location.cornuejols.cornuejols_instance(n_customers=385, n_facilities=683, ratio=.95, seed=seed),
}
GECO_MIPLIB = {
"MIPLIB_BENCHMARK": geco.mips.loading.miplib.benchmark_instances(),
"MIPLIB_EASY": geco.mips.loading.miplib.easy_instances(),
"MIPLIB_HARD": geco.mips.loading.miplib.hard_instances(),
"MIPLIB_OPEN": geco.mips.loading.miplib.open_instances(),
#"ORLIB": geco.mips.loading.orlib_load_instance(),
}
DATASETS = {
"BCOL": "mip_BCOL-CLS.tar.gz",
"CORLAT": "mip_COR-LAT.tar.gz",
"MIPLIB": "collection.zip",
"MIPLIB_FILTERED": "collection.zip",
"RCW2": "mip_RCW2.tar.gz",
"Regions200": "mip_Regions200.tar.gz",
}
COMPETITION = {
"ANONYMOUS": "anonymous.tar.gz",
"ITEM_PLACEMENT": "item_placement.tar.gz",
"LOAD_BALANCING": "load_balancing.tar.gz",
}
#DFLT_TMP_FILE_LOC = "/tmp/" + str(os.geteuid()) + "/"
#DFLT_TMP_FILE_LOC = "/tmp/" + str(2575) + "/"
DFLT_TMP_FILE_LOC = ""
def __init__(self, dataset_loc = "", tmp_file_loc = DFLT_TMP_FILE_LOC, mode="*", repeat=False, presolve=True, competition_settings=True, load_metadata=False, shard=0, shard_count=0, pprocess = False):
dataset_loc = os.path.expanduser(dataset_loc)
#try:
# os.mkdir(tmp_file_loc)
#except FileExistsError:
# pass
self.dataset_loc = dataset_loc
self.tmp_file_loc = tmp_file_loc
self.mode = mode
self.repeat = repeat
self.presolve = presolve
self.competition_settings=competition_settings
self.load_metadata = load_metadata
self.shard = shard
self.shard_count = shard_count
self.filtered_instances = []
self.post_process = pprocess
assert shard >= 0
assert (shard < shard_count or shard_count == 0)
@staticmethod
def hash_model(model):
letters = string.ascii_letters
tmp_file = '/tmp/' + ''.join(random.choice(letters) for i in range(10)) + '.lp'
model.as_pyscipopt().writeProblem(tmp_file)
with open(tmp_file, 'r') as f:
problem = f.read()
problem = problem.encode()
key = hashlib.blake2s(problem, digest_size=4).hexdigest()
return key
def load(self, dataset_name):
if not self.repeat:
for m in self.load_datasets(dataset_name):
yield m
else:
while True:
for m in self.load_datasets(dataset_name):
yield m
def load_datasets(self, dataset_name):
datasets = dataset_name.split('+')
for d in datasets:
for m in self.load_once(d):
yield m
print(self.filtered_instances)
def load_once(self, dataset_name):
if dataset_name in self.ECOLE:
return self.load_ecole(dataset_name)
elif dataset_name in self.GECO:
return self.load_geco(dataset_name)
elif dataset_name in self.GECO_MIPLIB:
return self.load_geco_miplib(dataset_name)
elif dataset_name in self.LOCAL_INSTANCE:
return self.load_local_instance(dataset_name)
elif dataset_name in self.COMPETITION:
return self.load_competition(dataset_name)
filename = self.DATASETS[dataset_name]
local_version = os.path.join(self.dataset_loc, filename)
if zipfile.is_zipfile(local_version):
return self.load_zip(local_version)
elif tarfile.is_tarfile(local_version):
filter = re.compile(".+mps|.+lp")
return self.load_tar(local_version, filter=filter)
else:
assert False
def setup(self, ecole_model):
if self.competition_settings:
#print("disabling")
# disable SCIP heuristics and restarts
scip_model = ecole_model.as_pyscipopt()
scip_model.setHeuristics(pyscipopt.scip.PY_SCIP_PARAMSETTING.OFF)
ecole_model.set_params({
'estimation/restarts/restartpolicy': 'n',
})
def preprocess(self, ecole_model):
self.setup(ecole_model)
#print(self.presolve)
if self.presolve:
return ## NEVER presolve
print("presolving mip")
ecole_model.presolve()
def load_zip(self, local_version):
with zipfile.ZipFile(local_version) as z:
if self.shard_count:
files = z.namelist()
shard = files[slice(self.shard, None, self.shard_count)]
else:
shard = z.namelist()
for member in shard:
f = z.extract(member, path=self.tmp_file_loc)
instance = os.path.join(self.tmp_file_loc, member)
#yield instance #bad coding :( this is just for loading MIPLIB instance
continue
#ecole_model = ecole.scip.Model.from_file(instance)
temp_model = pyscipopt.Model()
print(instance)
temp_model.readProblem(instance)
if temp_model.getNVars() != temp_model.getNBinVars():
continue
#self.filtered_instances.append(member)
#print(self.filtered_instances)
ecole_model = ecole.scip.Model.from_pyscipopt(temp_model)
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_tar(self, local_version, filter=None, presolved=False):
with tarfile.open(local_version) as t:
members = t.getmembers()
if self.shard:
members = members[slice(self.shard, None, self.shard_count)]
for member in members:
if not member.isfile():
continue
if filter and not filter.match(member.name):
continue
f = t.extract(member, path=self.tmp_file_loc)
instance = os.path.join(self.tmp_file_loc, member.name)
#ecole_model = ecole.scip.Model.from_file(instance)
temp_model = pyscipopt.Model()
temp_model.readProblem(instance)
ecole_model = ecole.scip.Model.from_pyscipopt(temp_model)
self.setup(ecole_model)
if self.presolve and not presolved:
ecole_model.presolve()
if ecole_model.is_solved:
continue
if not self.load_metadata:
yield ecole_model
else:
metadata_loc = member.name.replace('mps', 'json')
f = t.extract(metadata_loc, path=self.tmp_file_loc)
raw_metadata = os.path.join(self.tmp_file_loc, metadata_loc)
with open(raw_metadata) as f:
metadata = json.load(f)
yield (ecole_model, metadata)
def load_ecole(self, instance_type):
instances = self.ECOLE[instance_type]
instances.seed(self.shard)
for ecole_model in instances:
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_geco(self, instance_type):
generator = self.GECO[instance_type]
for m in geco.generator.generate(generator, seed=self.shard):
ecole_model = ecole.scip.Model.from_pyscipopt(m)
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_geco_miplib(self, instance_type):
# Sharding not supported yet
assert self.shard_count == 0
instances = self.GECO_MIPLIB[instance_type]
for m in instances:
ecole_model = ecole.scip.Model.from_pyscipopt(m)
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_local_instance(self, instance_type):
# Sharding not supported yet
assert self.shard_count == 0
dir = self.LOCAL_INSTANCE[instance_type]
for instance in glob.glob(dir):
print(instance)
temp_model = pyscipopt.Model()
if self.post_process:
yield temp_model
continue
temp_model.readProblem(instance)
ecole_model = ecole.scip.Model.from_pyscipopt(temp_model)
#ecole_model = ecole.scip.Model.from_file(instance)
self.preprocess(ecole_model)
#self.setup(ecole_model)
yield ecole_model
def load_competition(self, instance_type):
filename = self.COMPETITION[instance_type]
local_version = os.path.join(self.dataset_loc, filename)
filter = re.compile(".+mps")
return self.load_tar(local_version, filter=filter, presolved=True)
if __name__ == '__main__':
loader = InstanceLoader()
for m in loader.load("KNAPSACK_YANG"):
print(str(m))
break
| CL-LNS-main | instance_loader.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import multiprocessing
import os
import re
import subprocess
import sys
import sysconfig
from distutils.version import LooseVersion
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test
class CMakeExtension(Extension):
def __init__(self, name, src_dir=""):
super(CMakeExtension, self).__init__(name, sources=[])
self.src_dir = os.path.abspath(src_dir)
class CMakeBuild(build_ext):
def run(self):
try:
cmake_version = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", cmake_version.decode()).group(1))
if cmake_version < "3.14":
raise RuntimeError("CMake >= 3.14 is required.")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
ext_dir = os.path.abspath(os.path.dirname(
self.get_ext_fullpath(ext.name)))
cmake_args = ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" +
ext_dir, "-DPYTHON_EXECUTABLE=" + sys.executable]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", f"-j{multiprocessing.cpu_count()}"]
env = os.environ.copy()
env["CXXFLAGS"] = f'{env.get("CXXFLAGS", "")} \
-DVERSION_INFO="{self.distribution.get_version()}"'
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(["cmake", ext.src_dir] +
cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(["cmake", "--build", "."] +
build_args, cwd=self.build_temp)
print() # Add an empty line for cleaner output
setup(
name="ml4co",
version="0.1",
packages=["ml4co", "ml4co.ops"],
description="",
long_description="",
# add extension module
ext_modules=[CMakeExtension("ml4co", "./ml4co")],
# add custom build_ext command
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
url="",
)
| CL-LNS-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import submitit
import os
import argparse
from graph_datasets.bipartite_graph_loader import BipartiteGraphLoader
import torch
from torch import autograd
import glob
import torch.nn.functional as F
import torch_geometric
import time
from graph_datasets.bipartite_graph_dataset import BipartiteGraphDataset, BipartiteGraphDatasets
from neural_nets.gnn_policy import GNNPolicy
from neural_nets.losses import LogScoreLoss, LinearScoreLoss
from tensorboardX import SummaryWriter as SummaryWriter
import numpy as np
import math
from IPython import embed
from graph_datasets.bipartite_graph_observations import augment_variable_features_with_dynamic_ones
from torchmetrics.functional import auroc
from os.path import exists
import pickle
import sys
from pytorch_metric_learning import losses
from pytorch_metric_learning.distances import DotProductSimilarity
class Args:
pass
def multi_hot_encoding(input):
max_val = torch.max(input, -1, keepdim=True).values - 1.0e-10
multihot = input >= max_val
return multihot.float()
initial_solution = dict()
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#DEVICE = 'cpu'
log_score_loss_function = LogScoreLoss().to(DEVICE)
linear_score_loss_function = LinearScoreLoss().to(DEVICE)
bce_loss_function = torch.nn.BCEWithLogitsLoss(reduction="none").to(DEVICE)
infoNCE_loss_function = losses.NTXentLoss(temperature=0.07,distance=DotProductSimilarity()).to(DEVICE)
#data_loc = "training_data/"
def pad_tensor(input, pad_sizes, normalize, pad_value=-1e10):
"""
This utility function splits a tensor and pads each split to make them all the same size, then stacks them.
"""
max_pad_size = pad_sizes.max()
output = input.split(pad_sizes.cpu().numpy().tolist())
processed = []
for i in range(len(output)):
slice = output[i]
if normalize:
# Normalize the scores to ensure they fall in the [-1, 1] range
max_val = torch.max(abs(output[i]))
print(max_val)
slice /= max_val
processed.append(F.pad(slice, (0, max_pad_size-slice.size(0)), 'constant', pad_value))
output = torch.stack(processed, dim=0)
#output = torch.stack([F.pad(slice_, (0, max_pad_size-slice_.size(0)), 'constant', pad_value)
# for slice_ in output], dim=0)
return output
def load_policy_from_checkpoint(args):
policy = GNNPolicy(args.gnn_type)
try:
ckpt = torch.load(args.warmstart, map_location=DEVICE)
try_again = False
except Exception as e:
print("Checkpoint " + args.checkpoint + " not found, bailing out: " + str(e))
sys.exit(1)
policy.load_state_dict(ckpt.state_dict())
#policy = policy.to(DEVICE)
#model_version = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("Loaded checkpoint")
print(f"Will run evaluation on {DEVICE} device", flush=True)
#embed()
return policy
def process(args, policy, data_loader, optimizer=None):
"""
This function will process a whole epoch of training or validation, depending on whether an optimizer is provided.
"""
prefix = "Train" if optimizer else "Eval"
#embed()
if args.loss == "linear_score":
loss_function = linear_score_loss_function
elif args.loss == "log_score":
loss_function = log_score_loss_function
else:
loss_function = bce_loss_function
mean_loss = 0.0
mean_acc = 0.0
mean_auc = 0.0
mean_offby = 0.0
top_k = [1, 3, 5, 10]
k_acc = [0.0, 0.0, 0.0, 0.0]
n_iters = 0
n_samples_processed = 0
n_positive_samples = 0
n_negative_samples = 0
start = time.time()
n_samples_previously_processed = 0
history_window_size = 3
with torch.set_grad_enabled(optimizer is not None):
for batch in data_loader:
assert not torch.isnan(batch.constraint_features).any()
assert not torch.isnan(batch.edge_attr).any()
assert not torch.isnan(batch.variable_features).any()
assert not torch.isnan(batch.edge_index).any()
assert not torch.isinf(batch.constraint_features).any()
assert not torch.isinf(batch.edge_attr).any()
assert not torch.isinf(batch.variable_features).any()
assert not torch.isinf(batch.edge_index).any()
batch = batch.to(DEVICE)
# TO DO: Fix the dataset instead
if torch.isnan(batch.candidate_scores).any():
print("Skipping batch with NaN scores")
continue
global initial_solution
batch = augment_variable_features_with_dynamic_ones(batch, args, initial_solution)
# Compute the logits (i.e. pre-softmax activations) according to the policy on the concatenated graphs
try:
logits = policy(batch.constraint_features, batch.edge_index, batch.edge_attr, batch.variable_features)
except RuntimeError as e:
print("Skipping batch due to error: " + str(e))
continue
# Index the results by the candidates, and split and pad them
#pred_scores = pad_tensor(logits[batch.candidates], batch.nb_candidates, normalize=False)
pred_scores = pad_tensor(logits, batch.nb_candidates, normalize=False)
#pred_scores = torch.sigmoid(pred_scores)
true_scores = pad_tensor(batch.candidate_scores, batch.nb_candidates, normalize=False)
assert not torch.isnan(pred_scores).any()
assert not torch.isnan(true_scores).any()
#assert not torch.isnan(batch.candidate_choices).any()
if args.loss == "cross_entropy":
# Compute the usual cross-entropy classification loss
loss = F.cross_entropy(pred_scores, batch.candidate_choices)
elif args.loss == "bce":
multi_hot_labels = multi_hot_encoding(true_scores)
#print("lost function is bce")
raw_loss = bce_loss_function(pred_scores, multi_hot_labels)
batch_loss = torch.mean(raw_loss, 1)
loss_sum = torch.sum(torch.mul(batch_loss, batch.batch_weight))
loss = torch.div(loss_sum, torch.sum(batch.batch_weight))
elif args.loss == "nt_xent":
# # Try https://kevinmusgrave.github.io/pytorch-metric-learning/losses/#ntxentloss
# # Can also try https://kevinmusgrave.github.io/pytorch-metric-learning/losses/#supconloss.
# assert False # TBD
# loss = loss_function(pred_labels, true_labels)
#embed()
batch_size = pred_scores.shape[0]
multi_hot_labels = multi_hot_encoding(true_scores)
embeddings = torch.sigmoid(pred_scores)
anchor_positive = []
anchor_negative = []
positive_idx = []
negative_idx = []
total_sample = batch_size
#embed()
for i in range(batch_size):
if batch.batch_weight[i].item() == 1:
#embed()
#anchor.append(i)
if len(batch.info["positive_samples"][i]) == 0: #due to unknown bugs for SC
#embed()
continue
ground_truth_improvement = max(batch.info["positive_labels"][i])
for j in range(len(batch.info["positive_samples"][i])):
improvement_j = batch.info["positive_labels"][i][j]
if improvement_j >= ground_truth_improvement * 0.5:
anchor_positive.append(i)
positive_idx.append(total_sample)
embeddings = torch.cat([embeddings, torch.tensor([batch.info["positive_samples"][i][j]]).to(DEVICE)])
total_sample += 1
n_positive_samples += 1
for j in range(len(batch.info["negative_samples"][i])):
improvement_j = batch.info["negative_labels"][i][j]
if improvement_j <= ground_truth_improvement * 0.05:
anchor_negative.append(i)
negative_idx.append(total_sample)
embeddings = torch.cat([embeddings, torch.tensor([batch.info["negative_samples"][i][j]]).to(DEVICE)])
total_sample += 1
n_negative_samples += 1
triplets = (torch.tensor(anchor_positive).to(DEVICE), torch.tensor(positive_idx).to(DEVICE), torch.tensor(anchor_negative).to(DEVICE), torch.tensor(negative_idx).to(DEVICE))
loss = infoNCE_loss_function(embeddings, indices_tuple = triplets)
else:
# use the log or linear score loss
normalized_scores = normalize_tensor(batch.candidate_scores)
loss = loss_function(logits[batch.candidates], normalized_scores)
if math.isnan(loss.item()):
continue
assert not math.isnan(loss.item())
if not (loss.item() >= 0 or torch.sum(batch.batch_weight).item() == 0):
print("Error")
embed()
assert loss.item() >= 0 or torch.sum(batch.batch_weight).item() == 0, f"loss = {loss.item()}, #samples = {torch.sum(batch.batch_weight).item()}"
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
#embed()
mean_loss += loss.item() * torch.sum(batch.batch_weight).item()
#mean_loss += loss_sum.item()
n_samples_processed += torch.sum(batch.batch_weight).item()# batch.num_graphs
n_iters += 1
#embed()
for i in range(multi_hot_labels.shape[0]):
if batch.batch_weight[i].item() == 0:
continue
mean_auc += auroc(torch.sigmoid(pred_scores)[i], multi_hot_labels.int()[i], pos_label = 1).item()
if n_iters % args.checkpoint_every == 0:
end = time.time()
speed = (n_samples_processed - n_samples_previously_processed) / (end - start)
start = time.time()
n_samples_previously_processed = n_samples_processed
print(f"{prefix} loss: {mean_loss/n_samples_processed:0.3f}, auc: {mean_auc/n_samples_processed:0.3f}, speed: {speed} samples/s")
if optimizer:
print("Checkpointing model")
torch.save(policy, args.checkpoint)
if n_samples_processed > 0:
mean_loss /= n_samples_processed
mean_acc /= n_samples_processed
mean_auc /= n_samples_processed
mean_offby /= n_samples_processed
for i in range(len(k_acc)):
k_acc[i] /= n_samples_processed
else:
mean_loss = float("inf")
mean_acc = 0
mean_offby = float("inf")
mean_auc = 0
for i in range(len(k_acc)):
k_acc[i] = 0
print("n_samples_processed", n_samples_processed)
return mean_loss, mean_auc #, mean_offby, k_acc
def train_model(args):
train_loader = BipartiteGraphLoader(args.train_db, shuffle=True, first_k=args.train_db_first_k)
valid_loader = BipartiteGraphLoader(args.valid_db, shuffle=False)
print(f"Training on {train_loader.num_examples()} examples")
print(f"Evaluating on {valid_loader.num_examples()} examples")
#from IPython import embed; embed()
print(F"Using DEVICE {DEVICE}")
tb_writer = SummaryWriter(log_dir=args.tensorboard, comment="neural_LNS")
policy = GNNPolicy(args.gnn_type).to(DEVICE)
if not (args.warmstart is None):
print("Warnstarting training, loading from checkpoint %s"%(args.warmstart))
policy = load_policy_from_checkpoint(args)
policy = policy.to(DEVICE)
print(f"Checkpoint will be saved to {args.checkpoint}")
num_of_parameters = sum(p.numel() for p in policy.parameters() if p.requires_grad)
print("number of parameters =", num_of_parameters)
learning_rate = args.lr
best_valid_loss = float("inf")
last_improved = 0
optimizer = torch.optim.AdamW(policy.parameters(), lr=learning_rate, weight_decay=args.weight_decay, amsgrad=True)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 5, eta_min=learning_rate/10, verbose=False)
for epoch in range(args.num_epochs):
start = time.time()
print(f"Starting epoch {epoch+1}", flush=True)
with autograd.set_detect_anomaly(args.detect_anomalies):
train_iterator = train_loader.load(batch_size=args.batch_size) #32
train_loss, train_auc = process(args, policy, train_iterator, optimizer)
print(f"Train loss: {train_loss:0.3f}, Train auc: {train_auc:0.3f}")
valid_iterator = valid_loader.load(batch_size=args.batch_size) #32
valid_loss, valid_auc = process(args, policy, valid_iterator, None)
print(f"Valid loss: {valid_loss:0.3f}, Valid auc: {valid_auc:0.3f}")
end = time.time()
tb_writer.add_scalar("Train/Loss", train_loss, global_step=epoch)
tb_writer.add_scalar("Train/Auc", train_auc, global_step=epoch)
tb_writer.add_scalar("Valid/Loss", valid_loss, global_step=epoch)
tb_writer.add_scalar("Valid/Auc", valid_auc, global_step=epoch)
# Done with one epoch, we can freeze the normalization
policy.freeze_normalization()
# Anneal the learning rate if requested
if args.anneal_lr:
scheduler.step()
# Save the trained model
print(f"Done with epoch {epoch+1} in {end-start:.1f}s, checkpointing model", flush=True)
torch.save(policy, args.checkpoint+"_epoch%d"%(epoch))
# Check if we need to abort, adjust the learning rate, or just give up
if math.isnan(train_loss) or math.isnan(valid_loss):
print("NaN detected in loss, aborting")
break
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
last_improved = epoch
print("Checkpointing new best model in " + args.checkpoint + "_best")
torch.save(policy, args.checkpoint + "_best")
elif epoch - last_improved > args.give_up_after:
print("Validation loss didn't improve for too many epochs, giving up")
break
elif epoch - last_improved > args.decay_lr_after:
learning_rate /= 2
print(f"Adjusting the learning rate to {learning_rate}")
optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 5, eta_min=learning_rate/10, verbose=False)
# Give the model some time to improve with the new learning rate
last_improved = epoch
def train(problem, gnn_type = "gat", feature_set = "feat2", batch_size = 32, warmstart = None, loss = "bce", notes = '', data_loc = None):
print("Starting training model on " + problem, flush=True)
print("gnn_type = ", gnn_type, "feature_set=", feature_set)
assert not (data_loc is None), "no training data location provided"
save_to_folder = "model/model_%s_%s_%s_%s_%s/" % (problem, feature_set, "no" if warmstart is None else "warmstart", loss, notes)
try:
os.mkdir(save_to_folder)
except OSError as error:
print(error)
args = Args()
args.problem = problem
args.num_epochs=30
args.batch_size = batch_size
args.lr=0.001
args.anneal_lr = False
args.decay_lr_after=20
args.give_up_after=100
args.train_db_first_k=None
args.weight_decay=0.00005
args.window_size = 3
args.loss = loss
args.gnn_type = gnn_type
experiment = feature_set + "_" + args.gnn_type
args.experiment = experiment
args.warmstart = warmstart
args.tensorboard = save_to_folder + "neural_LNS_" + problem + "_" + experiment + ".tb"
args.checkpoint = save_to_folder + "neural_LNS_" + problem + "_" + experiment + ".pt"
args.checkpoint_every=40
train_dbs = []
valid_dbs = []
dir = data_loc+"/*.db"
num_data_file = 0
for dataset in glob.glob(dir):
num_data_file += 1
validation_cutoff = int( num_data_file * 0.125)
for i, dataset in enumerate(glob.glob(dir)):
try:
train_loader = BipartiteGraphLoader(dataset, shuffle=True)
except:
continue
if train_loader.num_examples() == 0:
continue
if i >= validation_cutoff:
train_dbs.append(dataset)
else:
valid_dbs.append(dataset)
args.train_db = "+".join(train_dbs)
args.valid_db = "+".join(valid_dbs)
args.detect_anomalies = False
train_model(args)
torch.cuda.empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--problem-set", default="INDSET_train",
help="Problem set")
parser.add_argument("--gnn-type", default="gat", type=str,
help="GNN type: gasse or gat")
parser.add_argument("--feature-set", default="feat2", type=str,
help="feat1: Gasse's feature only; feat2: Gasse+Khalil features; feat3: feat2+LB RELAX features")
parser.add_argument("--loss", default="nt_xent", type=str,
help="nt_xent: contrastive loss; bce: bce loss")
parser.add_argument("--data-loc", default=None, type=str,
help="Provide the dataset folder location")
parser.add_argument("--wind-size", default=3, type = int,
help="window size = the number of past incumbent features in features")
input_args = parser.parse_args()
if input_args.data_loc is None:
input_args.data_loc = "training_data/" + input_args.problem_set
train(input_args.problem_set, gnn_type = input_args.gnn_type, feature_set = input_args.feature_set, loss = input_args.loss, data_loc = input_args.data_loc)
| CL-LNS-main | train_neural_LNS.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from graph_datasets.bipartite_graph import *
from graph_datasets.bipartite_graph_dataset import BipartiteGraphDataset
import graph_datasets.bipartite_graph_observations as bgo
from instance_loader import InstanceLoader
from ilp_model import Solution
import argparse
import copy
import random
import pyscipopt
from neural_nets.gnn_policy import GNNPolicy
from pyscipopt import quicksum
import time
import ecole
import networkx as nx
import pickle
import statistics
from graph_datasets.featurization_test import make_obs
import os
import sys
from IPython import embed
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
global WIND_SIZE
WIND_SIZE = 3
COLLECT_SOLVE_TIME_LIMIT = 60 * 60 * 1 #2hours
STEP_PER_COLLECT = 1
class MyEvent(pyscipopt.Eventhdlr):
def eventinit(self):
print("init event")
self._start_time = time.monotonic()
self.scip_log = []
self.start_time = time.monotonic()
self.model.catchEvent(pyscipopt.SCIP_EVENTTYPE.BESTSOLFOUND, self)
def eventexit(self):
print("exit event")
#self.model.dropEvent(pyscipopt.SCIP_EVENTTYPE.BESTSOLFOUND, self)
def eventexec(self, event):
print("exec event")
self.end_time = time.monotonic()
#obj = self.model.getPrimalbound()
#print(obj, self.end_time - self._start_time)
sol = self.model.getBestSol()
obj = self.model.getSolObjVal(sol)
Sol = Solution(self.model, sol, obj)
log_entry = dict()
log_entry['best_primal_sol'] = Sol
log_entry['best_primal_scip_sol'] = sol
log_entry['primal_bound'] = obj
log_entry['solving_time'] = self.end_time - self.start_time
log_entry['iteration_time'] = self.end_time - self.start_time
log_entry['selection_time'] = 0
var_index_to_value = dict()
for v in self.model.getVars():
v_name = v.name
v_value = Sol.value(v)
var_index_to_value[v_name] = v_value
log_entry['var_index_to_value'] = copy.deepcopy(var_index_to_value)
self.scip_log.append(log_entry)
self.start_time = self.end_time
#print(log_entry['primal_bound'], log_entry['solving_time'], self.end_time - self._start_time)
def run_vanilla_scip(model, args):
model = model.__repr__.__self__
event = MyEvent()
model.includeEventhdlr(
event,
"",
""
)
model.setParam("limits/time", args.time_limit)
if "AGGR" in args.destroy_heuristic:
print("Enabled aggressive mode for BnB with SCIP")
model.setHeuristics(pyscipopt.scip.PY_SCIP_PARAMSETTING.AGGRESSIVE)
model.optimize()
return event.scip_log
NUM_OF_EXPERT_SAMPLES = 50
def isInteger(x):
return abs(x - round(x)) <=1e-8
def add_scip_config_to_mip_model(scip_config):
for param, value in scip_config.items():
model.setRealParam(param, value)
return model
def scip_solve(model, incumbent_solution = None, scip_config = None, timer = None, get_initial_solution = False, primal_bound = None, prev_LNS_log = None, get_num_solutions = 1, mute = False, isMIPLIB = False):
start_time = time.monotonic()
if primal_bound is not None:
objective_sense = model.getObjectiveSense()
if objective_sense == "minimize":
model.addCons(model.getObjective() <= primal_bound + 1e-8)
#if not mute:
#print("---added a new constraint using the primal bound for minimization")
else:
model.addCons(model.getObjective() >= primal_bound - 1e-8)
#if not mute:
#print("---added a new constraint using the primal bound for maximization")
#print("added a new constraint using the primal bound")
#model = add_scip_config_to_mip_model(model, scip_config)
if scip_config is not None:
for param, value in scip_config.items():
#print(param, value)
model.setParam(param, value)
found = True
init_time = None
if get_initial_solution == True:
found = False
#runtime_left = model.getParam('limits/time')
runtime_left = 900
#time_limit = 610 if isMIPLIB else 10
time_limit = model.getParam('limits/time')
while not found and time_limit <= runtime_left:
#if time_limit * 2 >= runtime_left:
# time_limit = runtime_left
#time_limit = min(time_limit, runtime_left)
model.setParam('limits/time', time_limit)
start_time = time.monotonic()
#embed()
model.optimize()
end_time = time.monotonic()
init_time = end_time - start_time
num_solutions_found = model.getNSols()
found = (num_solutions_found > 0)
#runtime_left -= time_limit
if time_limit >= runtime_left-1e3:
break
time_limit *= 2
time_limit = min(time_limit, runtime_left)
else:
model.optimize()
end_time = time.monotonic()
init_time = end_time - start_time
if not mute:
print("finished optimizing sub mip")
end_time = time.monotonic()
status = model.getGap()#model.getStatus()
log_entry = None
if found == True:
if model.getNSols() == 0: # if no solution in a LNS iteration, then return the same copy of the previous log but change the runtime
if prev_LNS_log is None:
return -1, None
log_entry = dict()
for k, v in prev_LNS_log.items():
log_entry[k] = v
#log_entry = copy.deepcopy(prev_LNS_log)
log_entry['solving_time'] = init_time
return status, log_entry
sol = model.getBestSol()
obj = model.getSolObjVal(sol)
Sol = Solution(model, sol, obj)
log_entry = {}
log_entry['best_primal_sol'] = Sol
log_entry['best_primal_scip_sol'] = sol
log_entry['primal_bound'] = obj
if not (init_time is None):
log_entry['solving_time'] = init_time
log_entry['iteration_time'] = init_time
else:
log_entry['solving_time'] = end_time - start_time
log_entry['iteration_time'] = end_time - start_time
log_entry['selection_time'] = 0
var_index_to_value = dict()
for v in model.getVars():
v_name = v.name
v_value = Sol.value(v)
var_index_to_value[v_name] = v_value
log_entry['var_index_to_value'] = copy.deepcopy(var_index_to_value)
if get_num_solutions > 1:
var_index_to_values = dict()
for v in model.getVars():
var_index_to_values[v.name] = []
#embed()
sol_list = model.getSols()
obj_list = []
sol_list.reverse()
#if len(sol_list) > 30:
# sol_list= sol_list[:30]
for sol in sol_list:
Sol = Solution(model, sol, obj)
obj = model.getSolObjVal(sol)
if primal_bound is not None:
objective_sense = model.getObjectiveSense()
if objective_sense == "minimize":
if obj >= primal_bound - 1e-8: continue
#model.addCons(model.getObjective() <= primal_bound + 1e-8)
else:
if obj <= primal_bound + 1e-8: continue
#model.addCons(model.getObjective() >= primal_bound - 1e-8)
for v in model.getVars():
v_name = v.name
v_value = Sol.value(v)
v_incumbent_value = incumbent_solution.value(v)
var_index_to_values[v_name].append(0 if round(v_value) == round(v_incumbent_value) else 1)
obj_list.append((obj, primal_bound))
log_entry['var_index_to_values'] = copy.deepcopy(var_index_to_values)
log_entry['primal_bounds'] = copy.deepcopy(obj_list)
#embed()
else:
log_entry['var_index_to_values'] = None
log_entry['primal_bounds'] = None
#log_entry['solving_time_calibrated'] = timer.elapsed_calibrated_time
#sol_data.write(log_entry, force_save_sol=True)
#print(sol)
return status, log_entry
def get_LP_relaxation_solution(model):
LP_relaxation = pyscipopt.Model(sourceModel = model, origcopy = True)
for var in LP_relaxation.getVars():
LP_relaxation.chgVarType(var, 'C')
scip_solve_LP_relaxation_config = {
'limits/time' : 300,
}
#status, log_entry = scip_solve(LP_relaxation, scip_config = scip_solve_LP_relaxation_config)
return scip_solve(LP_relaxation, scip_config = scip_solve_LP_relaxation_config)
def random_sample_variable_based(model, G, variables_to_nodes, neighborhood_size, pre_selected_pivot = None, pivot_num = 1):
all_int_variables = [v.name for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
pivot_node = []
for i in range(pivot_num):
sample_var = random.choice(all_int_variables)
while variables_to_nodes[sample_var] in pivot_node:
sample_var = random.choice(all_int_variables)
pivot_node.append(variables_to_nodes[sample_var])
if pre_selected_pivot is not None:
pivot_node = [variables_to_nodes[var] for var in pre_selected_pivot]
destroy_nodes = pivot_node
current_group = pivot_node
top = [v for v in G.nodes() if G.nodes[v]['bipartite'] == 0]
pos = nx.bipartite_layout(G, top)
for u, v in G.edges():
assert(G.nodes[u]['bipartite'] == 0)
assert(G.nodes[v]['bipartite'] == 1)
while len(destroy_nodes) < neighborhood_size:
new_group = []
for v in current_group:
for n in G.neighbors(v):
new_group.append(n)
#print(G.in_degree(n))
assert(G.nodes[n]['bipartite'] == 1)
new_group = list(set(new_group))
G_predecessors = []
for v in new_group:
for n in G.predecessors(v):
if not (G.nodes[n]["scip_variable"] in all_int_variables):
continue
G_predecessors.append(n)
assert(G.nodes[n]['bipartite'] == 0)
#new_group = [n for v in current_group for n in G.neighbors(v)]
#G_predecessors = [n for v in new_group for n in G.predecessors(v)]
G_predecessors = list(set(G_predecessors) - set(destroy_nodes))
if len(G_predecessors) == 0: break
for v in G_predecessors:
assert G.nodes[v]['bipartite'] == 0, str(v)
if len(G_predecessors) + len(destroy_nodes) <= neighborhood_size:
destroy_nodes = destroy_nodes + G_predecessors
else:
destroy_nodes = destroy_nodes + random.sample(G_predecessors, neighborhood_size - len(destroy_nodes))
current_group = copy.deepcopy(G_predecessors)
for v in destroy_nodes:
assert(G.nodes[v]["scip_variable"] in all_int_variables)
destroy_variables = [G.nodes[v]["scip_variable"] for v in destroy_nodes]
assert(len(destroy_variables) <= neighborhood_size)
return destroy_variables
def normalize_score(score, neighborhood_size):
l = 0
r = 100
while r - l > 1e-8:
m = (l + r) * 0.5
tp_score = torch.pow(score, m)
tp_sum = torch.sum(tp_score).item()
if tp_sum > neighborhood_size:
l = m
else:
r = m
return torch.pow(score, l)
def normalize_score2(logit, neighborhood_size):
l = 0
r = 1
while r - l > 1e-8:
m = (l + r) * 0.5
tp_logit = torch.mul(logit, m)
tp_score = torch.sigmoid(tp_logit)
tp_sum = torch.sum(tp_score).item()
if tp_sum < neighborhood_size:
r = m
else:
l = m
tp_logit = torch.mul(logit, l)
tp_score = torch.sigmoid(tp_logit)
return tp_score
#ML_info = (policy, observation, incumbent_history, LB_relaxation_history)
def create_neighborhood_with_heuristic(model, LNS_log, neighborhood_size = 20, heuristic = "RANDOM", bipartite_graph = None, variables_to_nodes = None, improved = None, num_samples = 30, eps_clip = 0.05, ML_info = None, original_neighborhood_size = None, get_num_solutions = 1):
if original_neighborhood_size is None:
original_neighborhood_size = neighborhood_size
all_variables = model.getVars()
all_int_variables = [v.name for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]] # currently only considering binary variables
objective_sense = model.getObjectiveSense()
obj_sense = 1 if objective_sense == "minimize" else -1
if heuristic == "RANDOM":
if neighborhood_size >= len(all_int_variables):
return all_int_variables, None
else:
return random.sample(all_int_variables, neighborhood_size), None
elif heuristic == "VARIABLE":
assert(bipartite_graph is not None)
assert(variables_to_nodes is not None)
return random_sample_variable_based(model, bipartite_graph, variables_to_nodes, neighborhood_size), None
elif "ML" in heuristic:
#embed()
ML_inference_start_time = time.monotonic()
assert ML_info is not None
local_branching_mip = pyscipopt.Model(sourceModel = model, origcopy = True)
#print(model)
#print(local_branching_mip)
incumbent_solution = LNS_log[-1]['best_primal_sol']
variables_equal_one = []
variables_equal_zero = []
all_int_variables = [v.name for v in local_branching_mip.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
for v in local_branching_mip.getVars():
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
if round(v_value) == 1:
variables_equal_one.append(v)
else:
variables_equal_zero.append(v)
#need to decide whether to use original neighborhood size or adaptive one
if "ORINH" in heuristic:
local_branching_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= original_neighborhood_size)
print("constructed mip for local branching with neighorhood size %d" % (original_neighborhood_size))
else:
local_branching_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= neighborhood_size)
print("constructed mip for local branching with neighorhood size %d" % (neighborhood_size))
int_var = [v for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
LB_relaxation_solution = []
if "feat1" in args.mode or "feat2" in args.mode:
#print("No LP solving")
for var in int_var:
LB_relaxation_solution.append(0)
LB_LP_relaxation_solution = LNS_log[-1]['best_primal_sol']
else:
LB_LP_relaxation_status, LB_LP_relaxation_log_entry = get_LP_relaxation_solution(local_branching_mip)
LB_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
for var in int_var:
LB_relaxation_solution.append(LB_LP_relaxation_solution.value(var))
#embed()
policy, observation, incumbent_history, _LB_relaxation_history = ML_info
LB_relaxation_history = copy.deepcopy(_LB_relaxation_history)
LB_relaxation_history.append(LB_relaxation_solution)
dynamic_features = torch.zeros((observation.column_features.shape[0], WIND_SIZE * 3), dtype = torch.float32)
number_of_history_added = 0
assert(len(incumbent_history) == len(LB_relaxation_history))
for i in reversed(range(len(LB_relaxation_history))):
dynamic_features[:, number_of_history_added*3] = torch.FloatTensor([1]*len(int_var))
dynamic_features[:, number_of_history_added*3+1] = torch.FloatTensor(incumbent_history[i])
if not ("feat1" in args.mode or "feat2" in args.mode):
dynamic_features[:, number_of_history_added*3+2] = torch.FloatTensor(LB_relaxation_history[i])
else:
dynamic_features[:, number_of_history_added*3+2] = torch.zeros(len(LB_relaxation_history[i]))
#print("No relaxation features")
number_of_history_added += 1
if number_of_history_added == WIND_SIZE:
break
observation.column_features[:, -WIND_SIZE * 3:] = dynamic_features
with torch.no_grad():
obs = (observation.row_features.to(DEVICE),
observation.edge_features.indices.to(DEVICE),
observation.edge_features.values.to(DEVICE),
observation.column_features.to(DEVICE))
logits = policy(*obs)
score = torch.sigmoid(logits)
info = dict()
#info["LB_gap"] = status
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
distribution_destroy_variable = []
all_int_variables = [v.name for v in int_var]
for i, v in enumerate(model.getVars()):
if v.name in all_int_variables:
v_value = score[i].item()
v_logit = logits[i].item()
distribution_destroy_variable.append((v.name, v_value, v_logit))
distribution_destroy_variable.sort(key = lambda x: x[2])
#from IPython import embed; embed();
num_cand = len(distribution_destroy_variable)
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
destroy_variables = []
ML_inference_end_time = time.monotonic()
print("ML inference time=", ML_inference_end_time-ML_inference_start_time)
info["ML_time"] = ML_inference_end_time-ML_inference_start_time
#embed()
best_primal_bound = None
if "SAMPLE" in heuristic:
#embed()
normalized_score = normalize_score(score, neighborhood_size)
if torch.sum(normalized_score).item() > neighborhood_size * 1.5: #numerical issues
normalized_score = normalize_score2(logits, neighborhood_size)
#embed()
for i, v in enumerate(model.getVars()):
if v.name in all_int_variables:
v_value = normalized_score[i].item() #score[i].item()
coin_flip = random.uniform(0, 1)
if coin_flip <= v_value:
destroy_variables.append(v.name)
return destroy_variables, info
elif "GREEDY" in heuristic:
return [v_name for v_name, _, __ in distribution_destroy_variable[-min(neighborhood_size, num_cand):]], info
else:
assert False, "Unknown sampling methods for ML"
return destroy_variables, info
elif heuristic.startswith("LOCAL"):
local_branching_mip = pyscipopt.Model(sourceModel = model, origcopy = True)
#print(model)
#print(local_branching_mip)
incumbent_solution = LNS_log[-1]['best_primal_sol']
variables_equal_one = []
variables_equal_zero = []
#embed()
all_int_variables = [v.name for v in local_branching_mip.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
for v in local_branching_mip.getVars():
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
if round(v_value) == 1:
variables_equal_one.append(v)
else:
variables_equal_zero.append(v)
original_LP_relaxation_status, original_LP_relaxation_log_entry = None, None # get_LP_relaxation_solution(local_branching_mip)
local_branching_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= neighborhood_size)
print("constructed mip for local branching")
scip_solve_local_branching_config = {
'limits/time' : 3600 if "LONG" in heuristic else 600,
}
if args.mode == "COLLECT" or args.collect_along_test == 1:
scip_solve_local_branching_config['limits/time'] = COLLECT_SOLVE_TIME_LIMIT
destroy_variables = []
if "RELAXATION" in heuristic:
LB_LP_relaxation_status, LB_LP_relaxation_log_entry = get_LP_relaxation_solution(local_branching_mip)
#original_LP_relaxation_solution = original_LP_relaxation_log_entry['best_primal_sol']
original_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
LB_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
both_integer = 0
LB_integer = 0
original_integer = 0
for v in all_variables:
if v.name in all_int_variables:
v_orignal_value = original_LP_relaxation_solution.value(v)
v_LB_value = LB_LP_relaxation_solution.value(v)
if isInteger(v_orignal_value) and isInteger(v_LB_value):
both_integer += 1
elif isInteger(v_orignal_value):
original_integer +=1
elif isInteger(v_LB_value):
LB_integer += 1
#print("---LB LP runtime", LB_LP_relaxation_log_entry['solving_time'])#, "original LP runtime", original_LP_relaxation_log_entry['solving_time'])
#print("---both integer", both_integer, "original integer", original_integer, "LB integer", LB_integer)
#print("---selecting using LP relaxation")
same_integer_value_inc_and_LB_LP = 0
same_integer_value_LB_and_LB_LP = 0
if "RS" in heuristic:
distribution_destroy_variable = []
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
#v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
distribution_destroy_variable.append((v.name, abs(v_LB_LP_value - v_value), v_value))
best_destroy_variables = None
best_primal_bound = None
NUM_OF_EXPERT_SAMPLES = num_samples
for _ in range(NUM_OF_EXPERT_SAMPLES):
tmp_destroy_variables = []
for v_name, prob, t in distribution_destroy_variable:
coin_flip = random.uniform(0, 1)
#if coin_flip <= max(min(1 - eps_clip, prob), eps_clip):
if coin_flip <= (1 - 2 * eps_clip) * prob + eps_clip:
tmp_destroy_variables.append(v_name)
if NUM_OF_EXPERT_SAMPLES == 1:
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
return tmp_destroy_variables, info
sub_mip = create_sub_mip(model, tmp_destroy_variables, LNS_log[-1]['best_primal_sol'])
scip_solve_destroy_config = {
'limits/time' : 120,
}
status, log_entry = scip_solve(sub_mip, primal_bound = LNS_log[-1]['primal_bound'],
scip_config = scip_solve_destroy_config)
print("sample improvement", log_entry['primal_bound'])
if best_destroy_variables is None or log_entry['primal_bound'] * obj_sense < best_primal_bound * obj_sense:
best_primal_bound = log_entry['primal_bound']
best_destroy_variables = copy.deepcopy(tmp_destroy_variables)
print("best destroy variable chosen with obj =", best_primal_bound)
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
info["num_ori_relax_integer"] = original_integer
info["num_LB_relax_integer"] = LB_integer
info["num_both_integer"] = both_integer
return best_destroy_variables, info
elif "MI" in heuristic or "LI" in heuristic:
distribution_destroy_variable = []
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
#v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
if True or abs(v_LB_LP_value - v_value) > 1e-8:
#distribution_destroy_variable.append((v.name, max(abs(v_LB_LP_value - v_value), 1 - abs(v_LB_LP_value - v_value)), v_value))
distribution_destroy_variable.append((v.name, abs(v_LB_LP_value - v_value), v_value))
distribution_destroy_variable.sort(key = lambda x: x[1])
#from IPython import embed; embed();
num_cand = len(distribution_destroy_variable)
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
info["num_ori_relax_integer"] = original_integer
info["num_LB_relax_integer"] = LB_integer
info["num_both_integer"] = both_integer
if "MI" in heuristic:
return [v_name for v_name, _, __ in distribution_destroy_variable[:min(num_cand, neighborhood_size)]], info
else:
return [v_name for v_name, _, __ in distribution_destroy_variable[-min(neighborhood_size, num_cand):]], info
#elif "LI" in heuristic:
# pass
else:
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
#v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) == round(v_value):
same_integer_value_inc_and_LB_LP += 1
#print("---selecting using LP relaxation")
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) != round(v_value):
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
else:
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
#print("---num same integer values LB and LB LP", same_integer_value_LB_and_LB_LP)
#print("---num same integer values inc and LB LP", same_integer_value_inc_and_LB_LP)
#print("---num destroy variables", len(destroy_variables))
if len(destroy_variables) > neighborhood_size:
destroy_variables = random.sample(destroy_variables, neighborhood_size)
#print("num of variables selected by LB relaxation", len(destroy_variables), "with LP obj =", LB_LP_relaxation_log_entry['primal_bound'])
info = dict()
#"num_LB_relax_integer"
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
info["num_ori_relax_integer"] = original_integer
info["num_LB_relax_integer"] = LB_integer
info["num_both_integer"] = both_integer
return destroy_variables, info
else:
status, log_entry = scip_solve(local_branching_mip,
incumbent_solution = incumbent_solution,
primal_bound = LNS_log[-1]['primal_bound'],
prev_LNS_log = LNS_log[-1],
scip_config = scip_solve_local_branching_config,
get_num_solutions = get_num_solutions)
local_branching_solution = log_entry['best_primal_sol']
LB_LP_relaxation_status, LB_LP_relaxation_log_entry = get_LP_relaxation_solution(local_branching_mip)
#original_LP_relaxation_solution = original_LP_relaxation_log_entry['best_primal_sol']
if LB_LP_relaxation_log_entry is None:
original_LP_relaxation_solution = local_branching_solution
LB_LP_relaxation_solution = local_branching_solution
else:
original_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
LB_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
tmp_observation = dict()
tmp_observation["selected_by_LB"] = []
tmp_observation["selected_by_LB_relax"] = []
same_integer_value_inc_and_LB_LP = 0
same_integer_value_LB_and_LB_LP = 0
all_variables = local_branching_mip.getVars()
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) == round(v_LB_value):
same_integer_value_LB_and_LB_LP += 1
if round(v_LB_LP_value) == round(v_value):
same_integer_value_inc_and_LB_LP += 1
if heuristic.endswith("RELAXATION"):
print("---selecting using LP relaxation")
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) != round(v_value):
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
else:
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
#tmp_observation.append((v.name, v_value, v_LB_value, v_LB_LP_value))
else:
if round(v_LB_value) == round(v_value): continue
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
tmp_observation["selected_by_LB"].append((v.name, v_value, v_LB_value, v_LB_LP_value))
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) != round(v_value):
#destroy_variables.append(v.getIndex())
tmp_observation["selected_by_LB_relax"].append((v.name, v_value, v_LB_value, v_LB_LP_value))
else:
#destroy_variables.append(v.getIndex())
#destroy_variables.append(v.name)
tmp_observation["selected_by_LB_relax"].append((v.name, v_value, v_LB_value, v_LB_LP_value))
#print("---num same integer values LB and LB LP", same_integer_value_LB_and_LB_LP)
#print("---num same integer values inc and LB LP", same_integer_value_inc_and_LB_LP)
#print("num of variables selected by LB", len(destroy_variables), "with obj =", log_entry['primal_bound'], "runtime =", log_entry['solving_time'])
#print("selected by LB =", tmp_observation["selected_by_LB"])
#print("selected by LB relax=", tmp_observation["selected_by_LB_relax"])
assert(heuristic.endswith("RELAXATION") or len(destroy_variables) <= neighborhood_size)
info = dict()
info["LB_primal_solution"] = log_entry["primal_bound"]
info["LB_gap"] = status
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
if get_num_solutions > 1:
info["multiple_solutions"] = copy.deepcopy(log_entry['var_index_to_values'])
info["multiple_primal_bounds"] = copy.deepcopy(log_entry['primal_bounds'])
#return random.sample(all_int_variables, neighborhood_size)
return destroy_variables, info
def create_sub_mip(model, destroy_variables, incumbent_solution, local_branching_distance = None, mute = False):
sub_mip = pyscipopt.Model(sourceModel = model, origcopy = True)
num_free_variables = 0
all_variables = sub_mip.getVars()
if len(destroy_variables) > 0:
if type(destroy_variables[0]) == type("string"):
destroy_variables_name = copy.deepcopy(destroy_variables)
else:
destroy_variables_name = [v.name for v in model.getVars() if v.getIndex() in destroy_variables]
else:
destroy_variables_name = []
variables_equal_one = []
variables_equal_zero = []
for v in all_variables:
if not (v.name in destroy_variables_name):
if not (v.vtype() in ["BINARY", "INTEGER"]):
continue
fixed_value = incumbent_solution.value(v)
sub_mip.chgVarLb(v, fixed_value)
sub_mip.chgVarLbGlobal(v, fixed_value)
sub_mip.chgVarUb(v, fixed_value)
sub_mip.chgVarUbGlobal(v, fixed_value)
#sub_mip.addCons(v >= fixed_value)
else:
assert v.vtype() in ["BINARY", "INTEGER"], "destroy variable %s not binary is instead %s"%(v.name, v.vtype())
v_value = incumbent_solution.value(v)
if round(v_value) == 1:
variables_equal_one.append(v)
else:
variables_equal_zero.append(v)
num_free_variables += 1
if not mute:
print("num_free_variables =", num_free_variables)
if not (local_branching_distance is None):
if not mute:
print("added local branching constraint in sub-mip")
sub_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= local_branching_distance)
return sub_mip
def get_bipartite_graph_representation(m, model): #m is a ecole mip model
model = m.as_pyscipopt()
bg = nx.DiGraph()
#don't know why ecole.observation.NodeBipartite() won't work properly
#implementing my own get_bipartite_graph_representation()
var_name_to_index = dict()
for var in model.getVars():
var_name_to_index[var.name] = var.getIndex()
num_var = model.getNVars()
num_cons = model.getNConss()
for i in range(num_var):
bg.add_node(i)
bg.nodes[i]['bipartite'] = 0
for i in range(num_cons):
bg.add_node(i+num_var)
bg.nodes[i+num_var]['bipartite'] = 1
all_constraints = model.getConss()
for i, cons in enumerate(all_constraints):
var_in_cons = model.getValsLinear(cons)
for key, value in var_in_cons.items():
var_index = var_name_to_index[key]
bg.add_edge(var_index, i + num_var)
all_variables = list(model.getVars())
variables_to_nodes = dict()
for i, feat_dict in bg.nodes(data = True):
if i < len(all_variables):
#assert(i == all_variables[i].getIndex())
feat_dict.update({"scip_variable": all_variables[i].name})
variables_to_nodes.update({all_variables[i].name: i})
else:
break
for u, v in bg.edges():
assert(bg.nodes[u]['bipartite'] == 0)
assert(bg.nodes[v]['bipartite'] == 1)
return bg, variables_to_nodes
def print_log_entry_to_file(save_to_file, LNS_log):
with open(save_to_file, "wb") as f:
for log_entry in LNS_log:
log_entry.pop('best_primal_sol', None)
log_entry.pop('best_primal_scip_sol', None)
pickle.dump(LNS_log, f)
def extract_root_features(m, args, id):
m.disable_presolve()
observation, khalil = make_obs((bgo.BipartiteGraphObservations(), ecole.observation.Khalil2016(pseudo_candidates = True)), m, branching = False)
extract_end_time = time.monotonic()
branching_vars = np.array([i for i in range(observation.column_features.shape[0])])
observation.add_khalil_features(khalil, branching_vars)
return observation
def load_policy_from_checkpoint(args):
policy = GNNPolicy(args.gnn_type)
try:
ckpt = torch.load(args.model, map_location=DEVICE)
try_again = False
except Exception as e:
print("Checkpoint " + args.model + " not found, bailing out: " + str(e))
sys.exit(1)
policy.load_state_dict(ckpt.state_dict())
#policy = policy.to(DEVICE)
#model_version = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("Loaded checkpoint")
print(f"Will run evaluation on {DEVICE} device", flush=True)
return policy
def get_perturbed_samples(args, model, destroy_variables, LNS_log, scip_solve_destroy_config, new_improvement, num_of_samples_to_generate, int_var):
var_name_to_index = dict()
fixed_variables = []
for i, var in enumerate(int_var):
var_name_to_index[var.name] = i
if not (var.name in destroy_variables):
fixed_variables.append(var.name)
primal_bound = LNS_log[-1]['primal_bound']
objective_sense = model.getObjectiveSense()
obj_sense = 1 if objective_sense == "minimize" else -1
collected_samples = []
primal_bounds = []
negative_labels = []
#embed()
for num_of_replaced_variables in range(5, len(destroy_variables)-1, 5):
no_negative_sample = 0
for t in range(90):
perturbed_destroy_variables = random.sample(destroy_variables, len(destroy_variables) - num_of_replaced_variables) + random.sample(fixed_variables, num_of_replaced_variables)
sub_mip = create_sub_mip(model, perturbed_destroy_variables, LNS_log[-1]['best_primal_sol'], mute = True)
scip_solve_destroy_config = {
'limits/time' : 240, # 240 for facilities 120 for the others
}
status, log_entry = scip_solve(sub_mip, incumbent_solution = LNS_log[-1]['best_primal_scip_sol'],
primal_bound = LNS_log[-1]['primal_bound'], scip_config = scip_solve_destroy_config, timer = None, prev_LNS_log = LNS_log[-1], mute = True)
improvement = abs(primal_bound - log_entry["primal_bound"])
improved = (obj_sense * (primal_bound - log_entry["primal_bound"]) > 1e-5)
new_primal_bound = log_entry["primal_bound"]
if (not improved) or (improvement < 0.05 * new_improvement):
print(f"Found negative samples with {num_of_replaced_variables} replaced, primal bound = {primal_bound}, new primal bound = {new_primal_bound}")
negative_sample = [0] * len(int_var)
for var_name in perturbed_destroy_variables:
negative_sample[var_name_to_index[var_name]] = 1
collected_samples.append(negative_sample)
primal_bounds.append((log_entry["primal_bound"], primal_bound))
negative_labels.append(improvement)
no_negative_sample = 0
else:
no_negative_sample += 1
if no_negative_sample >= 10:
print(f"No negative samples for 10 consecutive samples with {num_of_replaced_variables} variables replaced")
break
#print(f"This is not negative samples, primal bound = {primal_bound}, new primal bound = {new_primal_bound}")
if len(collected_samples) == num_of_samples_to_generate:
return collected_samples, primal_bounds, negative_labels
return collected_samples, primal_bounds, negative_labels
def run_LNS(m, args, id):
# m: ecole.scip.model, a mip model from ecole
instance_id = m
if type(instance_id) == int:
loader = InstanceLoader(presolve = args.presolve, competition_settings = False)
for i, _m in enumerate(loader.load(args.problem_set)):
if i == instance_id:
m = _m
break
observation = None
if (args.mode in ["COLLECT", "TEST_ML"]) or ("TEST_ML" in args.mode):
print("Initializing Ecole for feature extraction...This might take a few minutes")
observation = extract_root_features(m, args, id)
if type(instance_id) == int:
loader = InstanceLoader(presolve = args.presolve, competition_settings = False)
for i, _m in enumerate(loader.load(args.problem_set)):
if i == instance_id:
m = _m
break
model = m.as_pyscipopt()
int_var = [v for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
num_int_var = len(int_var) # currently only considering binary variables
args_neighborhood_size = args.neighborhood_size
if args.neighborhood_size == 0:
args.neighborhood_size = int(num_int_var * 0.2)
collection_local_branching_runtime = COLLECT_SOLVE_TIME_LIMIT
neighborhood_size = args.neighborhood_size
destroy_heuristic = args.destroy_heuristic
objective_sense = model.getObjectiveSense()
obj_sense = 1 if objective_sense == "minimize" else -1
print("Problem:",args.problem_set, instance_id)
print("Using destroy heuristics:", destroy_heuristic)
print("Neighborhood size:", neighborhood_size)
print("Preprocessing...")
if "VANILLA" in args.destroy_heuristic:
scip_log = run_vanilla_scip(model, args)
if args.save_log == 1:
print_log_entry_to_file("tmp/log/%s_%s_nhsize%d.txt"%(id, destroy_heuristic, 0), scip_log)
return
bg ,variables_to_nodes = get_bipartite_graph_representation(m, model)
if args.mode == "COLLECT" or args.collect_along_test == 1:
db = BipartiteGraphDataset(args.data_loc + "%s_%d.db"%(args.problem_set, instance_id))
#LB_relaxation_history = []
#incumbent_history = []
# find initial solution with SCIP
scip_solve_init_config = {
'limits/solutions' :10000,
'limits/time' : 610 if "MIPLIB" in args.problem_set else args.init_time_limit,
}
# scip_solve_init_config['limits/time'] = 300
timer = None
status, log_entry = scip_solve(model, scip_config = scip_solve_init_config, timer = timer,
get_initial_solution = True, isMIPLIB = "MIPLIB" in args.problem_set)
if log_entry is None:
print('Did not find incumbent solution for MIP: skipping this instance; try a longer runtime')
return
else:
print("initial solution obj =", log_entry['primal_bound'], "found in time", log_entry['solving_time'])
log_entry['limits/time'] = scip_solve_init_config['limits/time']
LNS_log = [log_entry]
improved = True
runtime_used = log_entry['solving_time']
count_no_improve = 0
print("solving steps limit =", args.num_solve_steps)
# initialize incumbent_history with the initial solution
if args.mode == "COLLECT" or "TEST_ML" in args.mode:
incumbent_solution = []
incumbent_history = []
improvement_history = []
LB_relaxation_history = []
for var in int_var:
incumbent_solution.append(log_entry["var_index_to_value"][var.name])
incumbent_history.append(incumbent_solution)
if "TEST_ML" in args.mode:
policy = load_policy_from_checkpoint(args)
policy = policy.to(DEVICE)
if "feat1" in args.mode:
observation.column_features[:, 23:] = torch.zeros(observation.column_features.shape[0], observation.column_features.shape[1]-23)
observation.column_features = torch.hstack((observation.column_features, torch.zeros(observation.column_features.shape[0], args.wind_size*3)))
#embed()
not_switched = True
if args.ml_neighborhood_size == 0:
args.ml_neighborhood_size = args.neighborhood_size
for s in range(args.num_solve_steps):
iteration_start_time = time.monotonic()
incumbent_solution = LNS_log[-1]['best_primal_scip_sol']
primal_bound = LNS_log[-1]['primal_bound']
ML_info = None
if "TEST_ML" in args.mode:
ML_info = (policy, observation, incumbent_history, LB_relaxation_history)
destroy_variables, info_destroy_heuristic = create_neighborhood_with_heuristic(model, LNS_log,
neighborhood_size = neighborhood_size, bipartite_graph = bg, variables_to_nodes = variables_to_nodes,
heuristic = destroy_heuristic, num_samples = args.num_samples, eps_clip = args.eps_clip,
ML_info = ML_info, original_neighborhood_size = args.ml_neighborhood_size, ## alert!!!
get_num_solutions = 20 if args.mode == "COLLECT" else 1)
#print("destroy variables =", destroy_variables)
if "CONSTRAINTED_REPAIR" in args.destroy_heuristic:
sub_mip = create_sub_mip(model, destroy_variables, LNS_log[-1]['best_primal_sol'], local_branching_distance = args.neighborhood_size)
else:
sub_mip = create_sub_mip(model, destroy_variables, LNS_log[-1]['best_primal_sol'])
#print("sub mip created =", sub_mip)
scip_solve_destroy_config = {
'limits/time' : 120,
}
if args.mode == "COLLECT":
scip_solve_destroy_config['limits/time'] = collection_local_branching_runtime
status, log_entry = scip_solve(sub_mip, incumbent_solution = incumbent_solution,
primal_bound = LNS_log[-1]['primal_bound'], scip_config = scip_solve_destroy_config, timer = timer, prev_LNS_log = LNS_log[-1])
iteration_end_time = time.monotonic()
log_entry["iteration_time"] = iteration_end_time - iteration_start_time
log_entry["selection_time"] = log_entry["iteration_time"] - log_entry["solving_time"]
if "ML" in args.mode and "ML" in destroy_heuristic:
log_entry["ML_time"] = info_destroy_heuristic["ML_time"]
else:
log_entry["ML_time"] = 0
log_entry["destroy_variables"] = destroy_variables
log_entry["destroy_heuristic"] = destroy_heuristic
log_entry["neighborhood_size"] = neighborhood_size
if info_destroy_heuristic and "num_LB_relax_integer" in info_destroy_heuristic:
log_entry["num_LB_relax_integer"] = info_destroy_heuristic["num_LB_relax_integer"]
if info_destroy_heuristic and "num_ori_relax_integer" in info_destroy_heuristic:
log_entry["num_ori_relax_integer"] = info_destroy_heuristic["num_ori_relax_integer"]
if info_destroy_heuristic and "num_both_integer" in info_destroy_heuristic:
log_entry["num_both_integer"] = info_destroy_heuristic["num_both_integer"]
improvement = abs(primal_bound - log_entry["primal_bound"])
improved = (obj_sense * (primal_bound - log_entry["primal_bound"]) > 1e-5)
if improved == False:
if round(neighborhood_size * args.adaptive) < round(num_int_var * 0.5):
neighborhood_size = round(neighborhood_size * args.adaptive)
count_no_improve = 0
else:
neighborhood_size = round(num_int_var * 0.5)
count_no_improve += 1
if "GREEDY" in destroy_heuristic:
destroy_heuristic = destroy_heuristic.replace("GREEDY", "SAMPLE")
else:
count_no_improve = 0
LNS_log.append(log_entry)
if "TEST_ML" in args.mode and improved == True:
LB_relaxation_solution = []
incumbent_solution = []
relaxation_value = info_destroy_heuristic["LB_LP_relaxation_solution"]
for var in int_var:
LB_relaxation_solution.append(relaxation_value.value(var))
incumbent_solution.append(log_entry["var_index_to_value"][var.name])
LB_relaxation_history.append(LB_relaxation_solution)
incumbent_history.append(incumbent_solution)
improvement_history.append(improvement)
if (args.mode == "COLLECT" and improved == True) or (args.collect_along_test == 1 and s % STEP_PER_COLLECT == 0):
if args.collect_along_test == 1:
destroy_variables, info_destroy_heuristic = create_neighborhood_with_heuristic(model, LNS_log[:-1],
neighborhood_size = args.neighborhood_size if args.ml_neighborhood_size == 0 else args.ml_neighborhood_size,
bipartite_graph = bg, variables_to_nodes = variables_to_nodes,
heuristic = "LOCAL_BRANCHING", num_samples = args.num_samples, eps_clip = args.eps_clip,
ML_info = ML_info, original_neighborhood_size = args.neighborhood_size if args.ml_neighborhood_size == 0 else args.ml_neighborhood_size,
get_num_solutions = 20)
print("destroy variables =", destroy_variables)
assert info_destroy_heuristic is not None
relaxation_value = info_destroy_heuristic["LB_LP_relaxation_solution"]
assert relaxation_value is not None
#candidate_scores = [0] * num_int_var
candidate_scores = []
LB_relaxation_solution = []
incumbent_solution = []
for var in int_var:
if var.name in destroy_variables:
candidate_scores.append(1)
else:
candidate_scores.append(0)
LB_relaxation_solution.append(relaxation_value.value(var))
incumbent_solution.append(log_entry["var_index_to_value"][var.name])
new_improvement = abs(primal_bound - info_destroy_heuristic["LB_primal_solution"])
new_improved = (obj_sense * (primal_bound - info_destroy_heuristic["LB_primal_solution"]) > 1e-5)
if args.mode == "COLLECT" or (args.collect_along_test == 1 and improved == False and new_improved == True):
LB_relaxation_history.append(LB_relaxation_solution)
incumbent_history.append(incumbent_solution)
improvement_history.append(improvement)
negative_samples, negative_info, negative_labels = get_perturbed_samples(args, model, destroy_variables, LNS_log[:-1], scip_solve_destroy_config, new_improvement, 90, int_var)
candidates = [str(var.name) for var in int_var]
candidate_choice = None
info = dict()
positive_samples = []
positive_labels = []
for i in range(len(info_destroy_heuristic["multiple_primal_bounds"])):
positive_sample = [0] * len(int_var)
for j, var in enumerate(int_var):
positive_sample[j] = info_destroy_heuristic["multiple_solutions"][var.name][i]
positive_samples.append(positive_sample)
obj_info = info_destroy_heuristic["multiple_primal_bounds"][i]
positive_labels.append( abs(obj_info[0] - obj_info[1]))
info["num_positive_samples"] = len(positive_samples)
info["positive_samples"] = positive_samples
info["positive_labels"] = positive_labels
info["num_negative_samples"] = len(negative_samples)
info["negative_samples"] = negative_samples
info["negative_labels"] = negative_labels
info["#iteration"] = s
info["instance_id"] = id
info["incumbent_history"] = incumbent_history
info["LB_relaxation_history"] = LB_relaxation_history
info["neighborhood_size"] = args.neighborhood_size
info["LB_gap"] = info_destroy_heuristic["LB_gap"]
info["primal_bound"] = log_entry["primal_bound"] if args.mode == "COLLECT" else info_destroy_heuristic["LB_primal_solution"]
info["LB_runtime"] = log_entry["iteration_time"]
candidate_scores = torch.LongTensor(np.array(candidate_scores, dtype = np.int32))
graph = BipartiteGraph(observation.row_features, observation.edge_features.indices,
observation.edge_features.values, observation.column_features[:,:95],
candidates, candidate_choice, candidate_scores, info,
iteration = i, instance_id = id, incumbent_history = incumbent_history, LB_relaxation_history = LB_relaxation_history, improvement_history = improvement_history,
neighborhood_size = neighborhood_size)
if args.mode == "COLLECT" or (args.collect_along_test == 1 and new_improved == True):
assert len(LB_relaxation_history) + 1 == len(incumbent_history)
assert len(LB_relaxation_history) > 0
rslt = db.add(graph)
if not rslt:
print("Skipping duplicate datapoint")
else:
print("Saving to database")
if (improved == False and args.collect_along_test == 1 and new_improved == True):
LB_relaxation_history.pop()
incumbent_history.pop()
improvement_history.pop()
runtime_used += log_entry['iteration_time']
print("Finished LNS iteration #%d: obj_val = %.2f with time %.2f (total time used %.2f)" % (s, log_entry['primal_bound'], log_entry['iteration_time'], runtime_used))# -log_entry["ML_time"]))
if runtime_used >= args.time_limit: break
if args.save_log == 1:
print_log_entry_to_file("tmp/log/%s_%s_nhsize%d.txt"%(id, args.destroy_heuristic, args_neighborhood_size), LNS_log)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", default=0, type = int,
help="random seed")
parser.add_argument("--problem-set", default="INDSET_test",
help="Problem set")
parser.add_argument("--adaptive", default=1, type = float,
help = "adaptive neighborhood size")
parser.add_argument("--num-solve-steps", default=100000, type=int,
help="Number of LNS iterations")
parser.add_argument("--neighborhood-size", default=100, type=int,
help="Upper bound on the neighborhood size")
parser.add_argument("--ml-neighborhood-size", default=0, type=int,
help="ML neighborhood size")
parser.add_argument("--eps-clip", default=0.05, type=float,
help="Clipping on LB_relax::RS probablity, will affect actual neighborhood size")
parser.add_argument("--time-limit", default=3600, type=int,
help="Time limit per instance")
parser.add_argument("--init-time-limit", default=10, type=int,
help="Initial solution time limit")
parser.add_argument("--destroy-heuristic", default="RANDOM", type=str,
help="Destroy heuristics: RANDOM, LOCAL_BRANCHING, LOCAL_BRANCHING::RELAXATION, VARIABLE")
parser.add_argument("--mode", default="TEST", type=str,
help="Solving mode: COLLECT, TEST, TEST_ML")
parser.add_argument("--gnn-type", default="gat", type=str,
help="GNN type: gasse or gat")
parser.add_argument("--model", default=None, type=str,
help="Path to the ML model")
parser.add_argument("--num-samples", default=30, type=int,
help="Number of samples with sample-and-select-best heuristics")
parser.add_argument("--save-log", default=0, type = int,
help="save log (1) or not (0)")
parser.add_argument("--collect-along-test", default=0, type=int,
help="collect data along the trajectory generated by this one")
parser.add_argument("--wind-size", default=3, type = int,
help="window size = the number of past incumbent features in features")
parser.add_argument("--presolve", default=False, type = bool,
help="presolve or not")
args = parser.parse_args()
WIND_SIZE = args.wind_size
if args.mode == "COLLECT" or args.collect_along_test == 1:
if args.mode == "COLLECT":
args.destroy_heuristic = "LOCAL_BRANCHING"
try:
os.mkdir("training_data")
except OSError as error:
print(error)
try:
os.mkdir("training_data/" + args.problem_set)
except OSError as error:
print(error)
args.data_loc = "training_data/" + args.problem_set + "/"
print(args)
random.seed(args.seed)
loader = InstanceLoader(presolve = args.presolve, competition_settings = False) # default False if change presolve here, also
if args.destroy_heuristic == "VANILLA":
args.adaptive = 1
for i, m in enumerate(loader.load(args.problem_set)):
model = m.as_pyscipopt()
#all_int_variables = [v.getIndex() for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
name = args.problem_set + str(i)
if args.adaptive > 1:
name = args.problem_set + str(round(args.adaptive*100)) + "_" + str(i)
if args.mode == "COLLECT" or args.collect_along_test == 1:
name += "COLLECT"
run_LNS(i, args, id = name)
print("Finish LNS for MIP solving")
| CL-LNS-main | LNS.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pyscipopt as scip
# Wrap is the scip solver under a common API
class ScipSolver:
def __init__(self, timeout_s=None):
self.constraints = []
self.maximize = True
self.timeout = timeout_s
self.model = scip.Model()
def create_integer_var(self, name, lower_bound, upper_bound):
v = self.model.addVar(name=name, lb=lower_bound, ub=upper_bound, vtype="I")
return v
def create_real_var(self, name, lower_bound, upper_bound):
v = self.model.addVar(name=name, lb=lower_bound, ub=upper_bound, vtype="C")
return v
def create_binary_var(self, name):
v = self.model.addVar(name=name, vtype="B")
return v
def set_objective_function(self, equation, maximize=True):
self.model.setObjective(equation)
if maximize:
self.model.setMaximize()
else:
self.model.setMinimize()
def add_constraint(self, cns):
self.model.addCons(cns)
def disable_presolver(self):
self.model.setPresolve(scip.SCIP_PARAMSETTING.OFF)
self.model.setBoolParam("lp/presolving", False)
def disable_cuts(self):
self.model.setSeparating(scip.SCIP_PARAMSETTING.OFF)
def disable_heuristics(self):
self.model.setHeuristics(scip.SCIP_PARAMSETTING.OFF)
def solve(self):
# Solve the problem. Return the result as a dictionary of values
# indexed by the corresponding variables or an empty dictionary if the
# problem is infeasible.
if self.timeout:
self.model.setParam('limits/time', self.timeout)
self.model.optimize()
sol = None
if self.model.getNSols() > 0:
sol = self.model.getBestSol()
return sol
def primal_dual_gap(self):
return (self.model.getObjVal(), self.model.getDualbound())
def primal_dual_integral(self):
# TBD
return None
def load(self, mps_filename):
self.model.readProblem(mps_filename)
def export(self, lp_output_filename):
assert False
def as_scip_model(self):
return self.model
| CL-LNS-main | scip_solver.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import torch.nn.init as init
#from neural_nets import prenorm
# GINConv network derived from https://arxiv.org/abs/1810.00826
# Added the ability to embed edge information as well.
class GINConv(torch_geometric.nn.MessagePassing):
def __init__(self, eps: float = 0.5, train_eps: bool = True):
#kwargs.setdefault('aggr', 'add')
#super(GINEConv, self).__init__(**kwargs)
super().__init__('add')
emb_size = 64
#self.final_norm = prenorm.Prenorm(emb_size, shift=False)
#self.feature_module_final = torch.nn.Sequential(
# self.final_norm,
# torch.nn.ReLU(),
# torch.nn.Linear(emb_size, emb_size)
#)
#self.feature_module_final = torch.nn.ReLU()
#self.post_conv_module = prenorm.Prenorm(emb_size, shift=False)
# output_layers
self.output_module = torch.nn.Sequential(
torch.nn.Linear(emb_size, emb_size * 2),
torch.nn.ReLU(),
torch.nn.Linear(emb_size * 2, emb_size * 4),
torch.nn.ReLU(),
torch.nn.Linear(emb_size * 4, emb_size * 2),
torch.nn.ReLU(),
torch.nn.Linear(emb_size * 2, emb_size),
)
#self.nn = nn
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def reset_parameters(self):
for t in self.parameters():
if len(t.shape) == 2:
init.orthogonal_(t)
else:
init.normal_(t)
self.eps.data.fill_(self.initial_eps)
# def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
# edge_attr: OptTensor = None, size: Size = None) -> Tensor:
# """"""
# if isinstance(x, Tensor):
# x: OptPairTensor = (x, x)
# # Node and edge feature dimensionalites need to match.
# if isinstance(edge_index, Tensor):
# assert edge_attr is not None
# assert x[0].size(-1) == edge_attr.size(-1)
# elif isinstance(edge_index, SparseTensor):
# assert x[0].size(-1) == edge_index.size(-1)
# # propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
# out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
# x_r = x[1]
# if x_r is not None:
# out += (1 + self.eps) * x_r
# return self.nn(out)
#def message(self, x_j: Tensor, edge_attr: Tensor) -> Tensor:
#return F.relu(x_j + edge_attr)
def freeze_normalization(self):
pass
#self.final_norm.freeze_normalization()
#self.post_conv_module.freeze_normalization()
def forward(self, left_features, edge_indices, edge_features, right_features):
"""
This method sends the messages, computed in the message method.
"""
output = self.propagate(edge_indices, size=(left_features.shape[0], right_features.shape[0]),
node_features=(left_features, right_features), edge_features=edge_features)
output += (1 + self.eps) * right_features
return self.output_module(output)
def message(self, node_features_j, edge_features):
output = torch.nn.functional.relu(node_features_j + edge_features)
return output
| CL-LNS-main | neural_nets/gin_convolution.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import torch.nn.init as init
from neural_nets import prenorm
# GATConvolution network derived https://arxiv.org/abs/2105.14491
# Added edge embedding as well
class GATConvolution(torch_geometric.nn.MessagePassing):
"""
Graph convolution layer. THis is the heart of our GNNPolicy
"""
def __init__(self,
negative_slope: float = 0.2, dropout: float = 0.,
**kwargs):
super().__init__('add')
emb_size = 64
self.heads = 8
self.in_channels = emb_size
self.out_channels = emb_size // self.heads
self.negative_slope = negative_slope
self.dropout = dropout
self.lin_l = torch.nn.Linear(self.in_channels, self.heads * self.out_channels, bias=True)
self.lin_r = torch.nn.Linear(self.in_channels, self.heads * self.out_channels, bias=True)
self.att = torch.nn.Parameter(torch.Tensor(1, self.heads, self.out_channels * 3))
# output_layer
self.output_module = torch.nn.Sequential(
torch.nn.Linear(2*emb_size, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
)
self.reset_parameters()
def reset_parameters(self):
init.orthogonal_(self.lin_l.weight)
init.orthogonal_(self.lin_r.weight)
init.orthogonal_(self.att)
def freeze_normalization(self):
pass
def reset_normalization(self):
pass
@property
def frozen(self):
return False
def forward(self, left_features, edge_indices, edge_features, right_features):
"""
This method sends the messages, computed in the message method.
"""
H, C = self.heads, self.out_channels
x_l = self.lin_l(left_features)
x_r = self.lin_r(right_features)
out = self.propagate(edge_indices, x=(x_l, x_r), size=(left_features.shape[0], right_features.shape[0]), edge_features=edge_features)
return self.output_module(torch.cat([out, right_features], dim=-1))
def message(self, x_j, x_i,
index,
edge_features):
x = torch.cat([x_i, x_j, edge_features], dim=-1)
x = torch.nn.functional.leaky_relu(x, self.negative_slope)
x = x.view(-1, self.heads, self.out_channels * 3)
alpha = (x * self.att).sum(dim=-1)
alpha = torch_geometric.utils.softmax(alpha, index)
alpha = torch.nn.functional.dropout(alpha, p=self.dropout, training=self.training)
x = x_j.view(-1, self.heads, self.out_channels) * alpha.unsqueeze(-1)
return x.view(-1, self.heads * self.out_channels)
| CL-LNS-main | neural_nets/gat_convolution.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class LogScoreLoss(torch.nn.Module):
"""
Loss function to weight sample loss by confidence in the target value
"""
def __init__(self):
super().__init__()
self.register_buffer("eps", torch.tensor([1e-6]))
def weight(self, input, target):
max_tgt = torch.max(target, dim=-1, keepdim=True).values
return torch.maximum(input, target) / max_tgt
def forward(self, input, target):
# Avoid division by zero
target = torch.maximum(target, self.eps)
main_loss = torch.log(input / target).abs()
# Handle predictions smaller than eps
neg_domain = (input / target - self.eps).abs() + torch.log(self.eps).abs()
loss = torch.where(input / target < self.eps, neg_domain, main_loss)
assert not torch.isnan(loss).any()
weighted = loss * self.weight(input, target)
assert not torch.isnan(weighted).any()
return weighted.mean()
class LinearScoreLoss(torch.nn.Module):
"""
Loss function to weight sample loss by confidence in the target value
"""
def __init__(self):
super().__init__()
self.register_buffer("eps", torch.tensor([1e-6]))
def weight(self, input, target):
max_tgt = torch.max(target, dim=-1, keepdim=True).values
return torch.maximum(input, target) / max_tgt
def forward(self, input, target):
# Avoid division by zero
target = torch.maximum(target, self.eps)
loss = (input - target).abs() / target
weighted = loss * self.weight(input, target)
return weighted.mean()
| CL-LNS-main | neural_nets/losses.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn.init as init
from neural_nets import gat_convolution
from neural_nets import gin_convolution
from neural_nets import gasse_convolution
from neural_nets import prenorm
# Implements the branching policy described in
# https://papers.nips.cc/paper/2019/hash/d14c2267d848abeb81fd590f371d39bd-Abstract.html
class GNNPolicy(torch.nn.Module):
def __init__(self, gnn_type="gasse"):
super().__init__()
emb_size = 64
cons_nfeats = 10
edge_nfeats = 2
var_nfeats = 104 # hard-coded no good
# Constraint embedding
self.cons_norm = prenorm.Prenorm(cons_nfeats)
self.cons_embedding = torch.nn.Sequential(
self.cons_norm,
torch.nn.Linear(cons_nfeats, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
torch.nn.ReLU(),
)
# Edge embedding
self.edge_norm = prenorm.Prenorm(edge_nfeats)
self.edge_embedding = torch.nn.Sequential(
self.edge_norm,
torch.nn.Linear(edge_nfeats, emb_size),
)
#self.edge_embedding = torch.nn.Linear(edge_nfeats, emb_size)
# Variable embedding
self.var_norm = prenorm.Prenorm(var_nfeats, preserve_features=[2])
self.var_embedding = torch.nn.Sequential(
self.var_norm,
torch.nn.Linear(var_nfeats, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
torch.nn.ReLU(),
)
if gnn_type == "gasse":
self.conv_v_to_c = gasse_convolution.GasseConvolution()
self.conv_c_to_v = gasse_convolution.GasseConvolution()
elif gnn_type == "gin":
self.conv_v_to_c = gin_convolution.GINConv()
self.conv_c_to_v = gin_convolution.GINConv()
else:
self.conv_v_to_c = gat_convolution.GATConvolution()
self.conv_c_to_v = gat_convolution.GATConvolution()
self.output_module = torch.nn.Sequential(
torch.nn.Linear(emb_size, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, 1, bias=False),
)
self.reset_parameters()
def reset_parameters(self):
for t in self.parameters():
if len(t.shape) == 2:
init.orthogonal_(t)
else:
init.normal_(t)
def freeze_normalization(self):
if not self.cons_norm.frozen:
self.cons_norm.freeze_normalization()
self.edge_norm.freeze_normalization()
self.var_norm.freeze_normalization()
self.conv_v_to_c.reset_normalization()
self.conv_c_to_v.reset_normalization()
return False
if not self.conv_v_to_c.frozen:
self.conv_v_to_c.freeze_normalization()
self.conv_c_to_v.reset_normalization()
return False
if not self.conv_c_to_v.frozen:
self.conv_c_to_v.freeze_normalization()
return False
return True
def forward(self, constraint_features, edge_indices, edge_features, variable_features):
reversed_edge_indices = torch.stack([edge_indices[1], edge_indices[0]], dim=0)
# First step: linear embedding layers to a common dimension (64)
constraint_features = self.cons_embedding(constraint_features)
edge_features = self.edge_embedding(edge_features)
variable_features = self.var_embedding(variable_features)
# Two half convolutions
constraint_features = self.conv_v_to_c(variable_features, reversed_edge_indices, edge_features, constraint_features)
variable_features = self.conv_c_to_v(constraint_features, edge_indices, edge_features, variable_features)
# A final MLP on the variable features
output = self.output_module(variable_features).squeeze(-1)
return output
| CL-LNS-main | neural_nets/gnn_policy.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class PrenormOld(torch.nn.Module):
def __init__(self, num_features, shift=True, scale=True, eps=1e-5):
super().__init__()
self.num_features = num_features
self.register_buffer("eps", torch.tensor([eps], requires_grad=False))
self.reset_normalization()
def _check_input_dim(self, input):
if input.dim() != 2:
raise ValueError("expected a 2D input (got {}D input)".format(input.dim()))
def freeze_normalization(self):
self.frozen = torch.tensor([True], dtype=torch.bool)
def reset_normalization(self):
self.register_buffer("running_mean", torch.zeros([num_features]))
self.register_buffer("running_var", torch.ones([num_features]))
self.register_buffer("num_batches_tracked", torch.tensor(0, dtype=torch.long, requires_grad=False))
self.register_buffer("frozen", torch.tensor([False], dtype=torch.bool, requires_grad=False))
# The input format should be [batch, features]
def forward(self, input):
self._check_input_dim(input)
running_mean = self.running_mean
if self.training and not self.frozen:
batch_mean = torch.mean(input, dim=[0])
batch_var = torch.mean((input - batch_mean).pow(2), dim=[0])
if self.num_batches_tracked < 10000:
momentum = (
float(self.num_batches_tracked) / (self.num_batches_tracked + 1)
)
else:
momentum = 0.9999
self.num_batches_tracked += 1
running_mean = (
momentum * self.running_mean + (1.0 - momentum) * batch_mean
).detach()
self.running_mean = running_mean.detach()
self.running_var = (
momentum * self.running_var + (1.0 - momentum) * batch_var
).detach()
return (input - self.running_mean) / torch.sqrt(
torch.max(self.eps, self.running_var)
)
class Prenorm(torch.nn.Module):
def __init__(self, num_features, shift=True, scale=True, preserve_features=[]):
super().__init__()
self.num_features = num_features
self.preserve_features = preserve_features
self.register_buffer("avg", torch.zeros([num_features], dtype=torch.double))
self.register_buffer("var", torch.zeros([num_features], dtype=torch.double))
self.register_buffer("count", torch.zeros([1]))
self.register_buffer("frozen", torch.tensor([False], dtype=torch.bool, requires_grad=False))
if shift:
self.register_buffer("shift", torch.zeros([num_features]))
else:
self.shift = None
if scale:
self.register_buffer("scale", torch.ones([num_features]))
else:
self.scale = None
def freeze_normalization(self):
self.frozen = torch.tensor([True], dtype=torch.bool).detach()
def reset_normalization(self):
self.avg.zero_()
self.var.zero_()
self.count.zero_()
self.count += 1
self.frozen.zero_()
def forward(self, input):
if self.training and not self.frozen:
# Online mean and variance estimation from Chan et al
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
assert len(input.shape) == 2
assert self.num_features == input.shape[-1], f"Expected input dimension of size {self.num_features}, got {input.shape[-1]}."
with torch.no_grad():
assert not torch.isnan(input).any()
assert not torch.isnan(self.var).any()
assert not torch.isnan(self.scale).any()
assert not torch.isnan(self.count).any()
sample_count = float(input.shape[0])
sample_var, sample_avg = torch.var_mean(input.to(torch.float64), dim=0)
assert not torch.isnan(sample_avg).any()
assert not torch.isnan(sample_var).any()
delta = sample_avg - self.avg
assert self.count + sample_count > 0
m2 = (self.var * self.count + sample_var * sample_count + torch.square(delta) * self.count * sample_count / (
self.count + sample_count))
assert not torch.isnan(m2).any()
self.avg = (self.avg * self.count + sample_avg * sample_count) / (self.count + sample_count)
assert not torch.isnan(self.avg).any()
self.count += sample_count
self.var = m2 / self.count
if self.shift is not None:
self.shift = -self.avg.to(torch.float32)
assert not torch.isnan(self.shift).any()
if self.scale is not None:
var = torch.where(torch.eq(self.var, 0), self.var.new_ones([self.num_features]), self.var)
assert not torch.isnan(var).any()
#assert not torch.isinf(var).any()
assert (var > 0).all()
self.scale = torch.rsqrt(var).to(torch.float32)
assert not torch.isnan(self.scale).any()
for f in self.preserve_features:
self.shift[f] = 0.0
self.scale[f] = 1.0
output = input
if self.shift is not None:
output = output + self.shift
if self.scale is not None:
output = output * self.scale
assert not torch.any(torch.isnan(output))
return output
| CL-LNS-main | neural_nets/prenorm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import torch.nn.init as init
from neural_nets import prenorm
# Implements the graph convolution described in
# https://papers.nips.cc/paper/2019/hash/d14c2267d848abeb81fd590f371d39bd-Abstract.html
class GasseConvolution(torch_geometric.nn.MessagePassing):
"""
Graph convolution layer. THis is the heart of our GNNPolicy
"""
def __init__(self):
super().__init__('add')
emb_size = 64
self.feature_module_left = torch.nn.Linear(emb_size, emb_size)
self.feature_module_edge = torch.nn.Linear(emb_size, emb_size, bias=False)
self.feature_module_right = torch.nn.Linear(emb_size, emb_size, bias=False)
self.final_norm = prenorm.Prenorm(emb_size, shift=False)
self.feature_module_final = torch.nn.Sequential(
self.final_norm,
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size)
)
self.post_conv_module = prenorm.Prenorm(emb_size, shift=False)
# output_layers
self.output_module = torch.nn.Sequential(
torch.nn.Linear(2*emb_size, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
)
self.reset_parameters()
def reset_parameters(self):
for t in self.parameters():
if len(t.shape) == 2:
init.orthogonal_(t)
else:
init.normal_(t)
def reset_normalization(self):
self.final_norm.reset_normalization()
self.post_conv_module.reset_normalization()
@property
def frozen(self):
return self.final_norm.frozen and self.post_conv_module.frozen
def freeze_normalization(self):
if not self.final_norm.frozen:
self.final_norm.freeze_normalization()
self.post_conv_module.reset_normalization()
return False
if not self.post_conv_module.frozen:
self.post_conv_module.freeze_normalization()
return False
return True
def forward(self, left_features, edge_indices, edge_features, right_features):
"""
This method sends the messages, computed in the message method.
"""
output = self.propagate(edge_indices, size=(left_features.shape[0], right_features.shape[0]),
node_features=(left_features, right_features), edge_features=edge_features)
return self.output_module(torch.cat([self.post_conv_module(output), right_features], dim=-1))
def message(self, node_features_i, node_features_j, edge_features):
output = self.feature_module_final(self.feature_module_left(node_features_i)
+ self.feature_module_edge(edge_features)
+ self.feature_module_right(node_features_j))
return output
| CL-LNS-main | neural_nets/gasse_convolution.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| CL-LNS-main | ml4co/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hypothesis
import hypothesis.strategies as st
import unittest
import torch
import torch.nn.functional as F
from ml4co.ops.split_and_pad import SplitAndPadFunction
def split_and_pad_ref(input, sizes, feature_size=0, padding_value=0):
feature_size = max(feature_size, sizes.max().item())
inputs = input.split(sizes.detach().cpu().tolist())
outputs = [
F.pad(x, (0, feature_size - x.size(0)), "constant", padding_value)
for x in inputs
]
return torch.stack(outputs, dim=0)
class SplitAndPadFunctionTest(unittest.TestCase):
def setUp(self):
self.split_and_pad = SplitAndPadFunction.apply
@hypothesis.given(
batch_size=st.integers(1, 200),
inner_size=st.integers(10, 500),
feature_size=st.sampled_from([0, 500]),
padding_value=st.floats(min_value=-10.0, max_value=10.0),
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]))
@hypothesis.settings(deadline=None)
def test_forward(self, batch_size, inner_size, feature_size, padding_value,
device):
sizes = torch.randint(low=1,
high=inner_size,
size=(batch_size, ),
device=device)
input_size = sizes.sum().item()
x = torch.randn(input_size, device=device)
y = self.split_and_pad(x, sizes, feature_size, padding_value)
y_ref = split_and_pad_ref(x, sizes, feature_size, padding_value)
torch.testing.assert_allclose(y, y_ref)
@hypothesis.given(
batch_size=st.integers(1, 100),
inner_size=st.integers(10, 500),
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]))
@hypothesis.settings(deadline=None)
def test_backward(self, batch_size, inner_size, device):
sizes = torch.randint(low=1,
high=inner_size,
size=(batch_size, ),
device=device)
input_size = sizes.sum().item()
x = torch.randn(input_size, device=device)
x_ref = x.detach().clone()
x.requires_grad_(True)
x_ref.requires_grad_(True)
y = self.split_and_pad(x, sizes)
y_ref = split_and_pad_ref(x_ref, sizes)
dy = torch.randn_like(y)
y.backward(dy)
y_ref.backward(dy)
torch.testing.assert_allclose(x.grad, x_ref.grad)
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | ml4co/ops/split_and_pad_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hypothesis
import hypothesis.strategies as st
import unittest
import torch
from ml4co.ops.prenorm import PrenormFunction
def prenorm_ref(input, running_m0, running_m1, running_m2, eps):
m0 = input.size(0)
m2, m1 = torch.var_mean(input, dim=0, unbiased=False)
n = m0 + running_m0
c = 0 if n == 0 else running_m0 / n
delta = running_m1 - m1
m1 += c * delta
m2 = m2 * m0 + running_m2 + delta.square() * c * m0
scale = (m2 / n + eps).rsqrt()
bias = -scale * m1
return m1, m2, scale, bias
class PrenormFunctionTest(unittest.TestCase):
def setUp(self):
self.prenorm = PrenormFunction.apply
@hypothesis.given(
outer_size=st.integers(2, 100),
inner_size=st.integers(1, 200),
running_m0=st.integers(0, 10),
eps=st.floats(min_value=0, max_value=1e-3),
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]))
@hypothesis.settings(deadline=None)
def test_prenorm(self, outer_size, inner_size, running_m0, eps, device):
x = torch.randn(outer_size, inner_size, device=device)
if running_m0 == 0:
running_m1 = torch.zeros((inner_size, ), device=device)
running_m2 = torch.zeros((inner_size, ), device=device)
else:
running_m1 = torch.randn((inner_size, ), device=device)
running_m2 = torch.randn((inner_size, ), device=device)
m1, m2, scale, bias = self.prenorm(x, running_m0, running_m1,
running_m2, eps)
m1_ref, m2_ref, scale_ref, bias_ref = prenorm_ref(
x, running_m0, running_m1, running_m2, eps)
torch.testing.assert_allclose(m1, m1_ref)
torch.testing.assert_allclose(m2, m2_ref)
torch.testing.assert_allclose(scale, scale_ref)
torch.testing.assert_allclose(bias, bias_ref)
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | ml4co/ops/prenorm_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| CL-LNS-main | ml4co/ops/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
import ml4co
torch.ops.load_library(
os.path.join(os.path.dirname(os.path.dirname(ml4co.__file__)),
"libml4co_ops.so"))
class SplitAndPadFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, sizes, feature_size=0, padding_value=0.0):
ctx.save_for_backward(sizes)
return torch.ops.ml4co_ops.split_and_pad(input, sizes, feature_size,
padding_value)
@staticmethod
def backward(ctx, grad_output):
sizes, = ctx.saved_tensors
grad_input = torch.ops.ml4co_ops.split_and_pad_backward(
grad_output, sizes)
return grad_input, None, None, None
| CL-LNS-main | ml4co/ops/split_and_pad.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
import ml4co
torch.ops.load_library(
os.path.join(os.path.dirname(os.path.dirname(ml4co.__file__)),
"libml4co_ops.so"))
class PrenormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, running_m0, running_m1, running_m2, eps=0.0):
return torch.ops.ml4co_ops.prenorm(input, running_m0, running_m1,
running_m2, eps)
@staticmethod
def backward(ctx, grad_m1, grad_m2, grad_scale, grad_bias):
raise NotImplementedError
| CL-LNS-main | ml4co/ops/prenorm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole
import ilp_model
import numpy as np
import torch
from typing import Any, Callable, Optional, Tuple
import rloptim.utils.data_utils as data_utils
from rloptim.core.types import Tensor, NestedTensor
from rloptim.envs.env import Env
from rloptim.envs.wrappers import TimeLimitWrapper
import competition.common.environments as competition_env
import competition.common.rewards as competition_rewards
from instance_loader import InstanceLoader
class EcoleWrapper(Env):
def __init__(self,
dataset: str,
problem_set: str,
observation_function: ecole.observation.NodeBipartite,
timeout: int = 900):
super(EcoleWrapper, self).__init__()
self._env = None
self._dataset = dataset
self._problem_set = problem_set
self._observation_function = observation_function
self._timeout = timeout
self._instance_loader = InstanceLoader(dataset_loc=self._dataset,
load_metadata=True)
@property
def env(self) -> Optional[ecole.environment.Environment]:
return self._env
@property
def dataset(self) -> str:
return self._dataset
@property
def problem_set(self) -> str:
return self._problem_set
@property
def observation_function(self) -> ecole.observation.NodeBipartite:
return self._observation_function
def reset(self, **kwargs) -> NestedTensor:
instance_data = self._instance_loader(self._problem_set)
if isinstance(instance_data, ecole.core.scip.Model):
instance = instance_data
model = ilp_model.Model(instance)
model.find_initial_solution()
bounds = model.get_primal_dual_bounds()
initial_primal_bound = bounds[0]
initial_dual_bound = bounds[1]
else:
instance, metadata = instance_data
initial_primal_bound = metadata["primal_bound"]
initial_dual_bound = metadata["dual_bound"]
reward_function = competition_rewards.TimeLimitDualIntegral()
reward_function.set_parameters(
initial_primal_bound=initial_primal_bound,
initial_dual_bound=initial_dual_bound,
objective_offset=0)
self._env = competition_env.Branching(
time_limit=self._timeout,
observation_function=(self._observation_function,
ecole.observation.Khalil2016(
pseudo_candidates=True)),
reward_function=-reward_function,
information_function={
"nb_nodes": ecole.reward.NNodes(),
"time": ecole.reward.SolvingTime()
})
obs, action_set, reward, done, _ = self._env.reset(
instance, objective_limit=initial_primal_bound)
obs = self._parse_obs(obs, action_set)
action_set = torch.from_numpy(action_set.astype(np.int64))
reward = torch.tensor([reward])
done = torch.tensor([done])
return {
"obs": obs,
"action_set": action_set,
"reward": reward,
"done": done
}
def step(self, action: NestedTensor) -> NestedTensor:
if isinstance(action, dict):
action = action["action"]
action = data_utils.to_numpy(action)
obs, action_set, reward, done, _ = self._env.step(action)
obs = self._parse_obs(obs, action_set)
action_set = torch.from_numpy(action_set.astype(np.int64))
reward = torch.tensor([reward], dtype=torch.float32)
done = torch.tensor([done])
return {
"obs": obs,
"action_set": action_set,
"reward": reward,
"done": done
}
def _parse_obs(self, obs: Any, action_set: Any) -> Tuple[torch.Tensor]:
bgo, khalil = obs
bgo.add_khalil_features(khalil, action_set)
bgo.check_features()
obs = (bgo.row_features, bgo.edge_features.indices,
bgo.edge_features.values, bgo.column_features)
return obs
def close(self) -> None:
if self._env is not None:
self._env.close()
def seed(self, seed: Optional[int] = None) -> None:
if self._env is not None:
self._env.seed(seed)
| CL-LNS-main | ml4co/rl/env/ecole_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import numpy as np
import string
import random
import os
import sys
import graph_datasets.evaluation_data as ed
import ilp_solver
class EvaluationDataMiningTest(unittest.TestCase):
def setUp(self):
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
solver.add_constraint(10 * x1 + 15 * x2 >= 100)
solver.add_constraint(20 * x1 + 16 * x2 >= 160)
solver.add_constraint(17 * x1 + 11 * x2 >= 130)
# Minimize the objective
solver.set_objective_function(80 * x1 + 95 * x2, maximize=False)
self.instance1 = solver.as_scip_model()
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
x3 = solver.create_integer_var("x3")
solver.add_constraint(10 * x1 + 15 * x2 >= 100)
solver.add_constraint(20 * x1 + 16 * x2 >= 160)
solver.add_constraint(17 * x3 + 11 * x2 >= 130)
# Minimize the objective
solver.set_objective_function(80 * x1 + 95 * x2 + 17 * x3, maximize=False)
self.instance2 = solver.as_scip_model()
letters = string.ascii_letters
self.db = []
for i in range(3):
self.db.append('/tmp/' + ''.join(random.choice(letters) for i in range(10)))
def tearDown(self):
for db in self.db:
try:
os.remove(db)
except:
pass
def testSingleVersion(self):
data = ed.EvaluationData(self.db[0])
# Format is instance, model_version, step_id, primal, dual, nb_nodes, timestamp
data.add(self.instance1, "v1", 1, 0, 123, 2, 1.0)
data.add(self.instance1, "v1", 2, 0, 125, 4, 1.5)
data.commit()
miner = ed.EvaluationDataMining(self.db[0], ["v1"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5]})
def testMultipleVersions(self):
data = ed.EvaluationData(self.db[1])
# Format is instance, model_version, step_id, primal, dual, nb_nodes, timestamp
data.add(self.instance1, "v1", 1, 0, 123, 2, 1.0)
data.add(self.instance1, "v1", 2, 0, 125, 4, 1.5)
data.add(self.instance1, "v2", 4, 0, 321, 3, 2.0)
data.add(self.instance1, "v2", 5, 0, 432, 7, 2.7)
data.commit()
miner = ed.EvaluationDataMining(self.db[1], ["v1"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5]})
miner = ed.EvaluationDataMining(self.db[1], ["v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v2": [7]})
self.assertEqual(integrals_over_nodes, {"v2": [1284.0]})
self.assertEqual(integrals_over_time, {"v2": [224.70000000000005]})
miner = ed.EvaluationDataMining(self.db[1], ["v1", "v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4], "v2": [4]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0], "v2": [321.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5], "v2": [0.0]})
def testMultipleVersionsMultipleInstances(self):
data = ed.EvaluationData(self.db[2])
# Format is instance, model_version, step_id, primal, dual, nb_nodes, timestamp
data.add(self.instance1, "v1", 1, 0, 123, 2, 1.0)
data.add(self.instance1, "v1", 2, 0, 125, 4, 1.5)
data.add(self.instance1, "v2", 4, 0, 321, 3, 2.0)
data.add(self.instance1, "v2", 5, 0, 432, 7, 2.7)
data.add(self.instance2, "v1", 11, 0, 1123, 12, 11.0)
data.add(self.instance2, "v1", 12, 0, 1125, 14, 11.5)
data.add(self.instance2, "v2", 14, 0, 1321, 13, 12.0)
data.add(self.instance2, "v2", 15, 0, 1432, 17, 12.7)
data.commit()
miner = ed.EvaluationDataMining(self.db[2], ["v1"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4, 14]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0, 2246.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5, 561.5]})
miner = ed.EvaluationDataMining(self.db[2], ["v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v2": [7, 17]})
self.assertEqual(integrals_over_nodes, {"v2": [1284.0, 5284.0]})
self.assertEqual(integrals_over_time, {"v2": [224.70000000000005, 924.699999999999]})
miner = ed.EvaluationDataMining(self.db[2], ["v1", "v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4, 14], "v2": [4, 14]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0, 2246.0], "v2": [321.0, 1321.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5, 561.5], "v2": [0, 0]})
def _testRealResults(self):
miner = ed.EvaluationDataMining("/data/home/benoitsteiner/ml4co-dev/ml4co/results.db", ["SCIP", "09/09/2021 17:26:14"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
print(str(nb_nodes))
print(str(integrals_over_nodes))
print(str(integrals_over_time))
self.assertEqual(1, 2) | CL-LNS-main | graph_datasets/evaluation_data_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
import pickle
from pathlib import Path
import hashlib
import string
import random
import base64
import functools
class SolvedMilpDataset():
"""
This class stores the best solution found for a collection of milp instances.
"""
def __init__(self, sample_db, read_only=False, best_solution_only=True):
self.best_solution_only = best_solution_only
if best_solution_only:
self.sql_insert = "REPLACE"
else:
self.sql_insert = "INSERT"
p = Path(sample_db)
if not p.parent.exists():
p.parent.mkdir(exist_ok=True)
already_created = p.exists()
assert already_created or not read_only
uri = "file:" + sample_db
if read_only:
uri += "?mode=ro"
self.db = sqlite3.connect(uri, uri=True)
self.cur = self.db.cursor()
# Create table if needed
if not already_created:
if best_solution_only:
self.cur.execute('''CREATE TABLE milp (id text primary key, problem text, solution text, objective_sense text, objective_value float, gap float)''')
else:
self.cur.execute('''CREATE TABLE milp (id text key, problem text, solution text, objective_sense text, objective_value float, gap float)''')
self.cur.execute('''CREATE INDEX id_index ON milp (id)''')
def __del__(self):
self.db.close()
@functools.lru_cache(maxsize=16)
def _model_to_key_pb(self, model):
letters = string.ascii_letters
tmp_file = '/tmp/' + ''.join(random.choice(letters) for i in range(10)) + '.lp'
model.writeProblem(tmp_file)
with open(tmp_file, 'r') as f:
problem = f.read()
problem = problem.encode()
key = hashlib.sha256(problem).hexdigest()
return key, problem
def _better_solution_exists(self, key, obj_sense, obj_value):
try:
query = f"SELECT objective_value FROM milp WHERE id = \'{key}\'"
self.cur.execute(query)
rslt = self.cur.fetchone()
old_value = rslt[0]
found = True
except:
found = False
if found and ((obj_sense == "minimize" and old_value < obj_value) or (obj_sense == "maximize" and old_value > obj_value)):
return True
else:
return False
def get_one(self, model):
"""
Load the solution(s) and variable assignment(s) for the specified model.
Encodes the solutions as the ({key, value}, obj_value) tuple, where key is the
index of a variable in the array returned by model.getVars(transformed=True),
value is the value of this variable in the solution, and obj_value is the
objective value of the solution.
"""
key, _ = self._model_to_key_pb(model)
query = f"SELECT solution, objective_value FROM milp WHERE id = \'{key}\'"
self.cur.execute(query)
rslt = self.cur.fetchone()
solution = base64.b64decode(rslt[0].encode())
solution = pickle.loads(solution)
obj_value = rslt[1]
return (solution, obj_value)
def get_all(self, model):
"""
Load the solution(s) and variable assignment(s) for the specified model.
Encodes the solutions as the ({key, value}, obj_value) tuple, where key is the
index of a variable in the array returned by model.getVars(transformed=True),
value is the value of this variable in the solution, and obj_value is the
objective value of the solution.
"""
key, _ = self._model_to_key_pb(model)
query = f"SELECT solution, objective_value FROM milp WHERE id = \'{key}\'"
self.cur.execute(query)
rslts = self.cur.fetchmany()
while len(rslts) > 0:
for rslt in rslts:
solution = base64.b64decode(rslt[0].encode())
solution = pickle.loads(solution)
obj_value = rslt[1]
yield (solution, obj_value)
rslts = self.cur.fetchmany()
def add(self, model, solution, objective_value, gap):
"""
Stores the solution and variable assignment for the specified model.
"""
# Insert a row of data or replace it if a better solution is found
key, problem = self._model_to_key_pb(model)
obj_sense = model.getObjectiveSense()
if self.best_solution_only and self._better_solution_exists(key, obj_sense, objective_value):
return
sol = {}
vars = model.getVars(transformed=True)
for i in range(len(vars)):
v = vars[i]
val = solution[v]
sol[i] = val
sol = pickle.dumps(sol)
problem = base64.b64encode(problem).decode()
sol = base64.b64encode(sol).decode()
query = f"{self.sql_insert} INTO milp VALUES (\'{key}\', \'{problem}\', \'{sol}\', \'{obj_sense}\', {objective_value}, {gap})"
self.cur.execute(query)
self.db.commit()
def merge(self, dataset):
"""
Add another dataset to the current one
"""
query = "SELECT id, problem, solution, objective_sense, objective_value, gap FROM milp"
sample_cnt = 0
for milp in dataset.cur.execute(query):
key = milp[0]
obj_sense = milp[3]
obj_value = milp[4]
if self.best_solution_only and self._better_solution_exists(key, obj_sense, obj_value):
continue
insert = f"{self.sql_insert} INTO milp VALUES (\'{milp[0]}\', \'{milp[1]}\', \'{milp[2]}\', \'{milp[3]}\', {milp[4]}, {milp[5]})"
self.cur.execute(insert)
sample_cnt += 1
if sample_cnt % 1000 == 0:
self.db.commit()
self.db.commit()
| CL-LNS-main | graph_datasets/solved_milp_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole
import torch
import numpy as np
import math
import time
def augment_variable_features_with_dynamic_ones(batch, args, initial_solution = {}):
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#DEVICE = 'cpu'
nh_size_threshold = dict() # filter out training data below certain neighborhood size threshold
window_size = args.window_size
# to add features of the last $window_size improving solutions in LNS
# each window contains 1. whether we have the solution 2. incumbent values 3. LB relax values
dynamic_feature_size = window_size * 3
static_feature_size = batch.variable_features.shape[-1]
dynamic_features = torch.zeros((batch.variable_features.shape[0], window_size * 3), dtype = torch.float32)
if "feat1" in args.experiment: #feat1: no Khalil's feature and no LB relax feature
batch.variable_features[:,23:] = torch.zeros(batch.variable_features.shape[0], batch.variable_features.shape[1] - 23)
assert len(batch.incumbent_history) == len(batch.LB_relaxation_history)
tot_variables = 0
batch_weight = []
batch_n_candidates = []
#embed()
for i in range(len(batch.LB_relaxation_history)):
#pop the incumbent solution
batch.incumbent_history[i].pop()
assert len(batch.incumbent_history[i]) == len(batch.LB_relaxation_history[i])
number_of_history_added = 0
number_of_variables = len(batch.LB_relaxation_history[i][0])
total_candidates = torch.sum(batch.candidate_scores[tot_variables:tot_variables+number_of_variables])
batch_n_candidates.append(total_candidates)
#print(total_candidates)
if args.problem in nh_size_threshold and total_candidates<nh_size_threshold[args.problem]:
batch_weight.append(0)
#print("============No positive labels=============")
else:
batch_weight.append(1)
for j in reversed(range(len(batch.LB_relaxation_history[i]))):
assert number_of_variables == len(batch.incumbent_history[i][j])
assert number_of_variables == len(batch.LB_relaxation_history[i][j])
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3] = torch.FloatTensor([1]*number_of_variables)
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3+1] = torch.FloatTensor(batch.incumbent_history[i][j])
if "feat1" in args.experiment or "feat2" in args.experiment:
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3+2] = torch.zeros(len(batch.LB_relaxation_history[i][j]))
else:
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3+2] = torch.FloatTensor(batch.LB_relaxation_history[i][j])
number_of_history_added += 1
if number_of_history_added == window_size:
break
#print(number_of_history_added)
tot_variables += number_of_variables
#embed()
assert tot_variables == batch.variable_features.shape[0]
dynamic_features = dynamic_features.to(DEVICE)
#this implementation is bad, again due to a bug during data collection
if batch.variable_features.shape[-1] == 104:
batch.variable_features[:,-9:] = dynamic_features
else:
all_features = torch.hstack((batch.variable_features, dynamic_features))
batch.variable_features = all_features
#print("batch valid sample %d / %d"% (sum(batch_weight), len(batch_weight)))
batch_weight = torch.tensor(batch_weight)
#embed()
batch.batch_weight = batch_weight.to(DEVICE)
return batch
class MilpEdgeFeatures():
def __init__(self, indices, values):
self.indices = indices
self.values = values
class MilpProblemObservation():
def __init__(self, column_features, row_features, edge_features):
self.column_features = column_features
self.row_features = row_features
self.edge_features = edge_features
def add_LB_relaxation_value(self, LB_relaxation_value):
pass
def add_khalil_features(self, khalil, action_set):
# Validate and cleanup the Khalil features
assert khalil.features.shape[0] == len(action_set)
khalil_features = np.nan_to_num(khalil.features.astype(np.float32),
posinf=1e6,
neginf=-1e6)
# Concatenate the khalil features with the existing features
column_feature_size = self.column_features.shape[-1]
khalil_feature_size = khalil_features.shape[-1]
total_feature_size = column_feature_size + khalil_feature_size
col_features = torch.zeros(
(self.column_features.shape[0], total_feature_size),
dtype=torch.float32)
col_features[:, :column_feature_size] = self.column_features
col_features[action_set.astype(np.int32),
column_feature_size:] = torch.from_numpy(khalil_features)
self.column_features = col_features
def check_features(self):
assert not torch.any(torch.isinf(self.row_features))
assert not torch.any(torch.isinf(self.column_features))
assert not torch.any(torch.isinf(self.edge_features.indices))
assert not torch.any(torch.isinf(self.edge_features.values))
assert not torch.any(torch.isnan(self.row_features))
assert not torch.any(torch.isnan(self.column_features))
assert not torch.any(torch.isnan(self.edge_features.indices))
assert not torch.any(torch.isnan(self.edge_features.values))
# Completement the basic Gasse features with some of our own:
# Lower and upper bound for each variable
# Coefficients associated with each variable in the objective function
# Lower and upper bound for each constraint
class BipartiteGraphObservations(ecole.observation.NodeBipartite):
def __init__(self, check_for_nans=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_for_nans = check_for_nans
self.num_calls = 0
self.feature_extraction_time = 0
self.feature_cleanup_time = 0
self.extra_col_feature_extraction_time = 0
self.extra_row_feature_extraction_time = 0
self.feature_normalization_time = 0
self.feature_merge_time = 0
self.total_time = 0
def before_reset(self, model):
super().before_reset(model)
#model.write_problem("/tmp/pb.lp")
def extract(self, model, done):
if done:
return None
start = time.monotonic()
# Extract the Gasse features
base_obs = super().extract(model, done)
stop = time.monotonic()
self.feature_extraction_time += stop - start
scip_model = model.as_pyscipopt()
#sense = scip_model.getObjectiveSense()
#assert(sense == "minimize")
# Delete the incubent column features. They are always NaN when the scip heuristics are turned off.
print(base_obs.variable_features.shape)
base_obs.variable_features = np.delete(base_obs.variable_features, 14, axis=1)
base_obs.variable_features = np.delete(base_obs.variable_features, 13, axis=1)
stop = time.monotonic()
self.feature_cleanup_time += stop - start
assert not np.isinf(base_obs.variable_features.astype(np.float32)).any()
#total_col_features = 3 + base_obs.column_features.shape[-1]
extra_col_features = np.empty((base_obs.variable_features.shape[0], 6), dtype=np.float32)
cols = scip_model.getLPColsData()
assert(len(cols) == base_obs.variable_features.shape[0])
vars = scip_model.getVars(transformed=True)
assert(len(vars) == base_obs.variable_features.shape[0])
for i in range(base_obs.variable_features.shape[0]):
col = cols[i]
assert i == col.getLPPos()
#print("BASIS = " + str(col.getBasisStatus()))
#print("POS = " + str(col.getLPPos()))
#print("POVArS = " + str(col.getVar()))
#print(str(base_obs.column_features[i]), flush=True)
#print(str(base_obs.column_features[i][6]))
#print("LB = " + str(col.getLb()))
#print("UB = " + str(col.getUb()))
extra_col_features[i, 0] = col.getLb()
extra_col_features[i, 1] = col.getUb()
var = vars[i]
assert i == var.getCol().getLPPos()
assert var.ptr() == col.getVar().ptr()
extra_col_features[i, 2] = var.getLbGlobal()
extra_col_features[i, 3] = var.getUbGlobal()
extra_col_features[i, 4] = var.getObj()
assert var.getLPSol() == col.getPrimsol()
extra_col_features[i, 5] = var.getLPSol()
#print("OBJ = " + str(var.getObj()))
#print("LP SOL = " + str(var.getLPSol()))
assert col.getLb() == var.getLbLocal()
assert col.getUb() == var.getUbLocal()
#var_map[var.getIndex()] = var
stop = time.monotonic()
self.extra_col_feature_extraction_time += stop - start
assert not np.isinf(extra_col_features).any()
#extra_col_features[:, 3:] = base_obs.column_features
#base_obs.column_features = torch.from_numpy(extra_col_features)
#total_row_features = 3 + base_obs.row_features.shape[-1]
extra_row_features = np.empty((base_obs.row_features.shape[0], 5), dtype=np.float32)
rows = scip_model.getLPRowsData()
assert len(rows) <= base_obs.row_features.shape[0]
ecole_cns_id = 0
for i in range(len(rows)):
row = rows[i]
assert i == row.getLPPos()
# If a constraint has both a lhs and a rhs, ecole will create 2 constraints under the hood
lhs_set = not scip_model.isInfinity(abs(row.getLhs()))
rhs_set = not scip_model.isInfinity(abs(row.getRhs()))
assert lhs_set or rhs_set
if lhs_set:
cns = -row.getLhs()
extra_row_features[ecole_cns_id, 0] = cns
extra_row_features[ecole_cns_id, 1] = math.copysign(1, cns)
extra_row_features[ecole_cns_id, 2] = row.getConstant()
extra_row_features[ecole_cns_id, 3] = row.getOrigintype()
extra_row_features[ecole_cns_id, 4] = row.isIntegral()
ecole_cns_id += 1
if rhs_set:
cns = row.getRhs()
extra_row_features[ecole_cns_id, 0] = cns
extra_row_features[ecole_cns_id, 1] = math.copysign(1, cns)
extra_row_features[ecole_cns_id, 2] = row.getConstant()
extra_row_features[ecole_cns_id, 3] = row.getOrigintype()
extra_row_features[ecole_cns_id, 4] = row.isIntegral()
ecole_cns_id += 1
#extra_row_features[i, 0] = -row.getLhs()
#extra_row_features[i, 1] = row.getRhs()
#extra_row_features[i, 1] = row.getConstant()
#lhs = row.getLhs()
#print("- LHS = " + str(lhs))
#rhs = row.getRhs()
#print("- RHS = " + str(rhs))
#cons = row.getConstant()
#print("- CONS = " + str(cons))
#print("- POS: " + str(pos))
#val = row.getVals()
#print("- VALS = " + str(val))
#for col in row.getCols():
# print("- COLS: " + str(cols))
#row = scip_model.getTransformedCons(row)
#lhs = row.getLhs()
#print("- LHS = " + str(lhs))
#rhs = row.getRhs()
#print("- RHS = " + str(rhs))
#cons = row.getConstant()
#print("- CONS = " + str(cons))
#pos = row.getLPPos()
#print("- POS: " + str(pos))
#val = row.getVals()
#print("- VALS = " + str(val))
#node_id += 1
assert ecole_cns_id == base_obs.row_features.shape[0]
stop = time.monotonic()
self.extra_row_feature_extraction_time += stop - start
#extra_row_features[:, 3:] = base_obs.row_features
#base_obs.row_features = torch.from_numpy(extra_row_features)
#vars = scip_model.getVars(transformed=False)
#for var in vars:
# print("VAR = " + str(var) + ": " + str(var.getCol()) + " " + str(var.getObj()))
#vars = scip_model.getVars(transformed=True)
#i = 0
#for var in vars:
# print("TRANSFORMED VAR = " + str(var) + ": " + str(var.getCol()) + " " + str(var.getObj()))
# assert i == var.getCol().getLPPos()
# i += 1
# #print("LB = " + str(var.getLbOriginal()) + "/" + str(var.getLbLocal()) + "/" + str(var.getLbGlobal()))
# #print("UB = " + str(var.getUbOriginal()) + "/" + str(var.getUbLocal()) + "/" + str(var.getUbGlobal()))
#conss = scip_model.getConss()
#assert(len(conss) == base_obs.row_features.shape[0])
#for cons in conss:
# print(str(cons))
#obj = scip_model.getObjective()
#print("OBJ = " + str(obj))
#params = model.get_params()
#print("PARAMS: " + str(params))
#lp_columns = model.lp_columns()
#print("LP_COLUMNS " + str(lp_columns))
#lp_rows = model.lp_rows()
#print("LP_ROWS " + str(lp_rows))
#constraints = scip_model.getConss()
#print("CNS: " + str(constraints))
#constraints = scip_model.getNConss()
#print("NCNS: " + str(len(cols)) + " vs " + str(base_obs.column_features.shape[0]), flush=True)
#print("NROWS: " + str(len(rows)) + " vs " + str(base_obs.row_features.shape[0]), flush=True)
#print("CNS: " + str(base_obs.row_features))
#print("EDGES: " + str(base_obs.edge_features.indices))
#print("EDG VALS: " + str(base_obs.edge_features.values))
#print("VARS: " + str(base_obs.column_features), flush=True)
#print("WHOLE FEATURIZATION" + str(base_obs))
##############
# MORE STUFF
#scip_model.getRowLPActivity()
# Normalize the objective features
factor = 1.0 / np.max(np.absolute(extra_col_features[:, 4]))
extra_col_features[:, 4] *= factor
# Store both normalized and unormalized constraints
new_edge_values = np.tile(base_obs.edge_features.values.astype(np.float32).reshape(-1, 1), (1, 2))
#assert not np.any(np.isnan(new_edge_values))
cns_id = base_obs.edge_features.indices[0, :]
cns = extra_row_features[cns_id, 0]
div = np.maximum(1e-6, np.abs(cns))
new_edge_values[:, 1] /= div
#assert not np.any(np.isnan(new_edge_values))
stop = time.monotonic()
self.feature_normalization_time += stop - start
column_features = torch.from_numpy(np.concatenate([extra_col_features, base_obs.variable_features.astype(np.float32)], axis=1))
row_features = torch.from_numpy(np.concatenate([extra_row_features, base_obs.row_features.astype(np.float32)], axis=1))
edge_features = MilpEdgeFeatures(torch.from_numpy(base_obs.edge_features.indices.astype(np.int64)), torch.from_numpy(new_edge_values))
obs = MilpProblemObservation(column_features, row_features, edge_features)
stop = time.monotonic()
self.feature_merge_time += stop - start
if self.check_for_nans:
assert not torch.any(torch.isnan(obs.row_features))
assert not torch.any(torch.isnan(obs.column_features))
assert not torch.any(torch.isnan(obs.edge_features.indices))
assert not torch.any(torch.isnan(obs.edge_features.values))
assert not torch.any(torch.isinf(obs.row_features))
assert not torch.any(torch.isinf(obs.column_features))
assert not torch.any(torch.isinf(obs.edge_features.indices))
assert not torch.any(torch.isinf(obs.edge_features.values))
stop = time.monotonic()
self.total_time += stop - start
self.num_calls += 1
'''
print("feature_extraction_time", self.feature_extraction_time)
print("feature_cleanup_time", self.feature_cleanup_time)
print("extra_col_feature_extraction_time", self.extra_col_feature_extraction_time)
print("extra_row_feature_extraction_time", self.extra_row_feature_extraction_time)
print("feature_normalization_time", self.feature_normalization_time)
print("feature_merge_time", self.feature_merge_time)
print("total_time", self.total_time)
'''
return obs
def timings(self):
if self.num_calls == 0:
return ""
timing = f"observation time = {self.feature_extraction_time/self.num_calls: >.4f} {self.feature_cleanup_time/self.num_calls: >.4f} {self.extra_col_feature_extraction_time/self.num_calls: >.4f} {self.extra_row_feature_extraction_time/self.num_calls: >.4f} {self.feature_normalization_time/self.num_calls: >.4f} {self.feature_merge_time/self.num_calls: >.4f} {self.total_time/self.num_calls: >.4f}"
return timing
| CL-LNS-main | graph_datasets/bipartite_graph_observations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole.typing
class DualBound(ecole.typing.InformationFunction):
def __init__(self):
super().__init__()
def before_reset(self, model):
super().before_reset(model)
def extract(self, model, done):
m = model.as_pyscipopt()
dual_bound = m.getDualbound()
return dual_bound
class Gap(ecole.typing.InformationFunction):
def __init__(self):
super().__init__()
def before_reset(self, model):
super().before_reset(model)
def extract(self, model, done):
m = model.as_pyscipopt()
gap = m.getGap()
return gap
| CL-LNS-main | graph_datasets/informations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import torch
import random
import string
import os
import graph_datasets.bipartite_graph as bg
import graph_datasets.bipartite_graph_dataset as bgd
import graph_datasets.bipartite_graph_loader as bgl
class BipartiteGraphLoaderTest(unittest.TestCase):
def build_db(self, seed=None):
random.seed(seed)
letters = string.ascii_letters
name = '/tmp/' + ''.join(random.choice(letters) for i in range(10))
if os.path.exists(name):
os.remove(name)
db = bgd.BipartiteGraphDataset(name)
# create a graph with 2 variables and 1 constraint
db.add(bg.BipartiteGraph(torch.FloatTensor([123]), torch.IntTensor([[0, 1], [0, 0]]), torch.FloatTensor([32, 21]), torch.FloatTensor([78, 910]), torch.LongTensor([0]), [0], torch.FloatTensor([0.65]), [0]))
# create a graph with 3 variables and 2 constraints
db.add(bg.BipartiteGraph(torch.FloatTensor([456, 567]), torch.IntTensor([[0, 1, 1, 2], [0, 0, 1, 1]]), torch.FloatTensor([654, 765, 876, 987]), torch.FloatTensor([987, 109, 111]), torch.LongTensor([1, 2]), [0], torch.FloatTensor([0.56, 0.12]), [0]))
db.commit()
return name
def testLoadAsPTGeom(self):
name = self.build_db(seed="pt_geom")
loader = bgl.BipartiteGraphLoader(name, shuffle=False)
gen = loader.load(batch_size=1, format="pt_geom")
g1 = next(gen)
self.assertEqual(g1.constraint_features, torch.FloatTensor([123]))
self.assertTrue(torch.equal(g1.variable_features, torch.FloatTensor([78, 910])))
self.assertTrue(torch.equal(g1.edge_attr, torch.FloatTensor([32, 21])))
g2 = next(gen)
self.assertTrue(torch.equal(g2.constraint_features, torch.FloatTensor([456, 567])))
self.assertTrue(torch.equal(g2.variable_features, torch.FloatTensor([987, 109, 111])))
self.assertTrue(torch.equal(g2.edge_attr, torch.FloatTensor([654, 765, 876, 987])))
def testLoadAsDGL(self):
name = self.build_db(seed="dgl")
loader = bgl.BipartiteGraphLoader(name, shuffle=False)
gen = loader.load(batch_size=1, format="dgl")
g1 = next(gen)
self.assertTrue(torch.equal(g1.nodes['variables'].data['variable_features'], torch.FloatTensor([78, 910])))
self.assertTrue(torch.equal(g1.nodes['variables'].data['fsb_scores'], torch.FloatTensor([0.65, -1.0e10])))
self.assertEqual(g1.nodes['constraints'].data['constraint_features'], torch.FloatTensor([123]))
self.assertTrue(torch.equal(g1.edges['edges'].data['edge_attr'], torch.FloatTensor([32, 21])))
self.assertTrue(g1.has_edges_between(0, 0, ("variables", "edges", "constraints")))
self.assertTrue(g1.has_edges_between(1, 0, ("variables", "edges", "constraints")))
g2 = next(gen)
self.assertTrue(torch.equal(g2.nodes['variables'].data['variable_features'], torch.FloatTensor([987, 109, 111])))
self.assertTrue(torch.equal(g2.nodes['variables'].data['fsb_scores'], torch.FloatTensor([-1.0e10, 0.56, 0.12])))
self.assertTrue(torch.equal(g2.nodes['constraints'].data['constraint_features'], torch.FloatTensor([456, 567])))
self.assertTrue(torch.equal(g2.edges['edges'].data['edge_attr'], torch.FloatTensor([654, 765, 876, 987])))
self.assertTrue(g2.has_edges_between(0, 0, ("variables", "edges", "constraints")))
self.assertTrue(g2.has_edges_between(1, 0, ("variables", "edges", "constraints")))
self.assertTrue(g2.has_edges_between(1, 1, ("variables", "edges", "constraints")))
self.assertTrue(g2.has_edges_between(2, 1, ("variables", "edges", "constraints")))
def testLoadAsNTX(self):
name = self.build_db(seed="ntx")
loader = bgl.BipartiteGraphLoader(name, shuffle=False)
gen = loader.load(batch_size=1, format="ntx")
g1 = next(gen)
# TODO: figure out how to check the graph
#nx.write_gpickle(g1, "/tmp/g1.pickle")
#with open('/tmp/g1.txt', mode='w') as f:
# print(str(g1), file=f)
g2 = next(gen)
#nx.write_gpickle(g2, "/tmp/g2.pickle")
#with open('/tmp/g2.txt', mode='w') as f:
# print(str(g2), file=f)
reached_end = False
try:
_ = next(gen)
except:
reached_end = True
self.assertTrue(reached_end)
| CL-LNS-main | graph_datasets/bipartite_graph_loader_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pyscipopt import Eventhdlr
from pyscipopt import SCIP_EVENTTYPE
class DualBoundEventHandler(Eventhdlr):
def __init__(self, initial_bound=None):
super().__init__()
self.initial_bound = initial_bound
if initial_bound:
self.events = [(initial_bound, 0, 0)]
self.last_dual = initial_bound
else:
self.events = []
self.last_dual = float("NaN")
def eventinit(self):
self.model.catchEvent(SCIP_EVENTTYPE.LPEVENT, self)
def eventexit(self):
self.model.dropEvent(SCIP_EVENTTYPE.LPEVENT, self)
def eventexec(self, event):
dual = self.model.getDualbound()
if dual != self.last_dual:
if self.initial_bound:
if self.model.getObjectiveSense() == "minimize":
dual = max(dual, self.initial_bound)
else:
dual = min(dual, self.initial_bound)
self.last_dual = dual
time = self.model.getSolvingTime()
nb_nodes = self.model.getNNodes()
self.events.append((dual, time, nb_nodes))
#print(f"CAUGHT EVENT {dual} at t={time} nb_nodes={nb_nodes}", flush=True)
return {} | CL-LNS-main | graph_datasets/event_handlers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
from pathlib import Path
import hashlib
import string
import random
import functools
from collections import defaultdict
class EvaluationData():
def __init__(self, db, read_only=False):
p = Path(db)
if not p.parent.exists():
p.parent.mkdir(exist_ok=True)
already_created = p.exists()
assert already_created or not read_only
uri = "file:" + db
if read_only:
uri += "?mode=ro"
self.db = sqlite3.connect(uri, uri=True)
self.cur = self.db.cursor()
# Create table if needed
if not already_created:
self.cur.execute('''CREATE TABLE eval_data (instance_id string not null, model_version string not null, step_id integer not null, nb_nodes integer not null, timestamp float, primal float, dual float)''')
self.cur.execute('''CREATE INDEX per_instance_id ON eval_data(instance_id)''')
self.cur.execute('''CREATE INDEX per_model_version ON eval_data(model_version)''')
self.db.commit()
self.added_rows = 0
def __del__(self):
self.db.commit()
self.db.close()
@functools.lru_cache(maxsize=16)
def _instance_to_key(self, model):
letters = string.ascii_letters
tmp_file = '/tmp/' + ''.join(random.choice(letters) for i in range(10)) + '.lp'
model.writeProblem(tmp_file)
with open(tmp_file, 'r') as f:
problem = f.read()
problem = problem.encode()
key = hashlib.sha256(problem).hexdigest()
return key
def add(self, instance, model_version, step_id, primal, dual, nb_nodes, timestamp):
instance_id = self._instance_to_key(instance)
self.cur.execute(f"INSERT INTO eval_data VALUES (\'{instance_id}\', \'{model_version}\', {step_id}, {nb_nodes}, {timestamp}, {primal}, {dual})")
self.added_rows += 1
if self.added_rows % 1000 == 0:
self.db.commit()
def commit(self):
self.db.commit()
class EvaluationDataMining():
def __init__(self, db, models):
self.db = EvaluationData(db, read_only=True)
self.models = models
def compute_metrics(self):
model_filter = f"model_version == '{self.models[0]}' "
for m in self.models[1:]:
model_filter += f"OR model_version == '{m}' "
query = f"SELECT DISTINCT instance_id FROM eval_data WHERE {model_filter}"
#print(query)
self.db.cur.execute(query)
instances = self.db.cur.fetchall()
#print(str(instances))
integrals_over_time = defaultdict(lambda: [])
integrals_over_nodes = defaultdict(lambda: [])
nb_nodes = defaultdict(lambda: [])
for instance in instances:
instance_id = instance[0]
max_nb_nodes = 1e100
for version in self.models:
query = f"SELECT MAX(nb_nodes) FROM eval_data WHERE instance_id == '{instance_id}' AND model_version == '{version}'"
#print(query)
self.db.cur.execute(query)
num_nodes = self.db.cur.fetchone()
#print(str(num_nodes))
max_nb_nodes = min(max_nb_nodes, int(num_nodes[0]))
for version in self.models:
#print(version)
nb_nodes[version].append(max_nb_nodes)
integral_over_time = 0
integral_over_nodes = 0
query = f"SELECT nb_nodes, dual, timestamp FROM eval_data WHERE instance_id == '{instance_id}' AND model_version == '{version}' AND nb_nodes <= {max_nb_nodes} ORDER BY nb_nodes ASC"
#print(query)
first = True
last_dual = 0
last_nb_nodes = 0
last_timestamp = 0
for rslt in self.db.cur.execute(query):
#print("ORDERED RSLT:" + str(rslt))
if not first:
integral_over_time += last_dual * (float(rslt[2]) - last_timestamp)
integral_over_nodes += last_dual * (int(rslt[0]) - last_nb_nodes)
first = False
last_dual = float(rslt[1])
last_nb_nodes = int(rslt[0])
last_timestamp = float(rslt[2])
if last_nb_nodes < max_nb_nodes:
integral_over_nodes += last_dual * (max_nb_nodes - last_nb_nodes)
integrals_over_time[version].append(integral_over_time)
integrals_over_nodes[version].append(integral_over_nodes)
return (nb_nodes, integrals_over_nodes, integrals_over_time)
def draw_in_tensorboard(self):
pass
| CL-LNS-main | graph_datasets/evaluation_data.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import sqlite3
import pickle
import base64
import random
from pathlib import Path
from graph_datasets.bipartite_graph import BipartiteGraph
import intervaltree
import zlib
import torch
class BipartiteGraphDataset(torch_geometric.data.Dataset):
"""
This class encodes a collection of graphs, as well as a method to load such graphs from the disk.
It can be used in turn by the data loaders provided by pytorch geometric.
"""
def __init__(self, sample_db, query_opt=False, read_only=False, first_k=None):
super().__init__(root=None, transform=None, pre_transform=None)
self.query_opt = query_opt
p = Path(sample_db)
if not p.parent.exists():
p.parent.mkdir(exist_ok=True)
already_created = p.exists()
assert already_created or not read_only
uri = "file:" + sample_db
if read_only:
uri += "?mode=ro"
self.db = sqlite3.connect(uri, uri=True)
self.cur = self.db.cursor()
# Create table if needed
if not already_created:
self.cur.execute('''CREATE TABLE samples (id integer primary key asc, features text not null unique)''')
#self.cur.execute('''CREATE UNIQUE INDEX per_id ON samples(id)''')
self.cur.execute('''INSERT INTO samples VALUES (-1, \'0\')''')
self.db.commit()
self.sample_cnt = 0
else:
self.cur.execute("SELECT features FROM samples WHERE id = -1")
rslt = self.cur.fetchone()
self.sample_cnt = int(rslt[0])
if first_k is not None:
self.sample_cnt = min(self.sample_cnt, first_k)
print(f"Use first_k = {first_k}. Dataset size = {self.sample_cnt}")
def __del__(self):
self.db.close()
def len(self):
return self.sample_cnt
def get(self, index):
"""
Load a bipartite graph observation as saved on the disk during data collection.
"""
#print("here: get")
#assert False
#from IPython import embed; embed()
if self.query_opt:
# Ignore the requested index, so we can stream data
rslt = self.cur.fetchone()
if rslt is None:
query = "SELECT features FROM samples WHERE id >= 0"
self.cur.execute(query)
rslt = self.cur.fetchone()
assert rslt is not None
else:
# Fetch the data at the requested index. This is much slower
query = f"SELECT features FROM samples WHERE id = {index}"
self.cur.execute(query)
rslt = self.cur.fetchone()
entry = base64.b64decode(rslt[0].encode())
try:
raw = zlib.decompress(entry)
except:
# Old uncompressed dataset
raw = entry
graph = pickle.loads(raw)
#from IPython import embed; embed()
#if torch.sum(graph.candidate_scores).item() < 25:
# return None
#if index % 2 ==0 :
# return None
return graph
def add(self, graph):
"""
Add a bipartite graph observation to the dataset. Only adds the observation if it wasn't
already present in the dataset
"""
# Insert a row of data
raw = pickle.dumps(graph)
compressed = zlib.compress(raw, level=9)
sample = base64.b64encode(compressed).decode()
query = f"INSERT INTO samples VALUES ({self.sample_cnt}, \'{sample}\')"
try:
self.cur.execute(query)
self.sample_cnt += 1
self.commit()
return True
except sqlite3.IntegrityError:
return False
def merge(self, dataset):
"""
Add another dataset to the current one
"""
query = "SELECT features FROM samples WHERE id >= 0"
for sample in dataset.cur.execute(query):
insert = f"INSERT INTO samples VALUES ({self.sample_cnt}, \'{sample[0]}\')"
try:
self.cur.execute(insert)
self.sample_cnt += 1
except sqlite3.IntegrityError:
continue
if self.sample_cnt % 1000 == 0:
self.commit()
self.commit()
def merge_multiple(self, datasets):
"""
Add several other datasets to the current one
"""
query = "SELECT features FROM samples WHERE id >= 0"
for dataset in datasets:
dataset.cur.execute(query)
done = False
while not done:
idx = random.randint(0, len(datasets)-1)
dataset = datasets[idx]
sample = dataset.cur.fetchone()
if sample is None:
datasets.pop(idx)
if len(datasets) == 0:
done = True
else:
insert = f"INSERT INTO samples VALUES ({self.sample_cnt}, \'{sample[0]}\')"
try:
self.cur.execute(insert)
self.sample_cnt += 1
except sqlite3.IntegrityError:
continue
if self.sample_cnt % 1000 == 0:
self.commit()
self.commit()
def commit(self):
query = f"INSERT OR REPLACE INTO samples VALUES (-1, \'{self.sample_cnt}\')"
self.cur.execute(query)
self.db.commit()
class BipartiteGraphDatasets(torch_geometric.data.Dataset):
"""
Allows training on the data from multiple datasets.
"""
def __init__(self, databases, query_opt=False, first_k=None):
super().__init__(root=None, transform=None, pre_transform=None)
if first_k:
first_k = max(1,first_k // len(databases))
self.dbs = intervaltree.IntervalTree()
self.sample_cnt = 0
for db in databases:
p = Path(db)
assert p.exists()
dataset = BipartiteGraphDataset(db, query_opt, read_only=True, first_k=first_k)
new_samples = dataset.len()
self.dbs[self.sample_cnt:self.sample_cnt+new_samples] = dataset
self.sample_cnt += new_samples
def len(self):
return self.sample_cnt
def get(self, index):
"""
Load a bipartite graph observation as saved on the disk during data collection.
"""
rslt = None
#while rslt is None:
d = self.dbs[index].pop()
db = d.data
index -= d.begin
rslt = db.get(index)
return rslt
| CL-LNS-main | graph_datasets/bipartite_graph_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import ecole
import torch
import torch_geometric
import numpy as np
import pyscipopt
import graph_datasets.bipartite_graph as bg
import graph_datasets.bipartite_graph_dataset as bgd
import graph_datasets.bipartite_graph_observations as bgo
import ilp_solver
import os
import time
def advance_to_root_node(model, branching):
"""Utility to advance a model to the root node."""
if branching:
dyn = ecole.dynamics.BranchingDynamics()
#print("BranchingDynamics")
else:
dyn = ecole.dynamics.PrimalSearchDynamics()
#print("PrimalSearchDynamics")
model = dyn.reset_dynamics(model)
return model
def make_obs(obs_func, model, branching=True):
"""Utility function to extract observation on root node."""
start = time.monotonic()
if isinstance(obs_func, tuple):
for f in obs_func:
f.before_reset(model)
else:
obs_func.before_reset(model)
stop = time.monotonic()
advance_to_root_node(model, branching)
stop = time.monotonic()
if isinstance(obs_func, tuple):
rslt = []
for f in obs_func:
rslt.append(f.extract(model, False))
return rslt
else:
return obs_func.extract(model, False)
def disable_all(solver):
solver.disable_presolver()
solver.disable_cuts()
solver.disable_heuristics()
class FeaturizationTest(unittest.TestCase):
def setUp(self):
# Create a small ILP problem
solver1 = ilp_solver.ILPSolver(engine="scip")
x1 = solver1.create_integer_var("x1")
x2 = solver1.create_integer_var("x2")
solver1.add_constraint(10 * x1 + 15 * x2 >= 100.23)
solver1.add_constraint(20 * x1 + 16 * x2 >= 161.8)
solver1.add_constraint(17 * x1 + 11 * x2 >= 129.42)
# Minimize the objective
solver1.set_objective_function(80 * x1 + 95 * x2, maximize=False)
disable_all(solver1)
scip_model = solver1.as_scip_model()
self.model1 = ecole.scip.Model.from_pyscipopt(scip_model)
#self.model1.transform_prob()
solver2 = ilp_solver.ILPSolver(engine="scip")
x1 = solver2.create_integer_var("x1")
x2 = solver2.create_integer_var("x2")
solver2.add_constraint(20 * x1 + 30 * x2 <= 200)
solver2.add_constraint(40 * x1 + 32 * x2 <= 320)
solver2.add_constraint(34 * x1 + 22 * x2 <= 260)
# Minimize the objective
solver2.set_objective_function(80 * x1 + 95 * x2, maximize=True)
disable_all(solver2)
scip_model = solver2.as_scip_model()
self.model2 = ecole.scip.Model.from_pyscipopt(scip_model)
#self.model2.transform_prob()
solver3 = ilp_solver.ILPSolver(engine="scip")
x0 = solver3.create_integer_var("x0")
x1 = solver3.create_integer_var("x1")
x2 = solver3.create_integer_var("x2")
solver3.add_constraint(20 * x1 + 30 * x2 >= 200)
solver3.add_constraint(40 * x1 + 32 * x2 >= 320)
solver3.add_constraint(34 * x1 + 22 * x2 >= 260)
solver3.add_constraint(2 * x0 + 3 * x1 == 12)
# Minimize the objective
solver3.set_objective_function(87.3 * x1 + 93.2 * x2, maximize=False)
disable_all(solver3)
scip_model = solver3.as_scip_model()
self.model3 = ecole.scip.Model.from_pyscipopt(scip_model)
#self.model3.transform_prob()
def testBranchingFeaturization(self):
observation = make_obs(bgo.BipartiteGraphObservations(), self.model1)
#print("VARS1: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 8.4211e-01,
5.8809e+00, 6.4414e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8809e+00,
8.8086e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7614e+00, 7.6491e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7614e+00,
7.6143e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS1: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-1.0023e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5598e+00, -9.9375e-01, 1.0000e+00, -1.9779e-03, 0.0000e+00],
[-1.6180e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3172e+00, -9.8082e-01, 1.0000e+00, -5.6137e-04, 0.0000e+00],
[-1.2942e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3916e+00, -9.5634e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01]])""")
#print("EDGES1: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])""")
#print("EDGE VALS1: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-10.0000, -0.0998],
[-15.0000, -0.1497],
[-20.0000, -0.1236],
[-16.0000, -0.0989],
[-17.0000, -0.1314],
[-11.0000, -0.0850]])""")
observation = make_obs(bgo.BipartiteGraphObservations(), self.model2)
#print("VARS2: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, -8.4211e-01,
5.7143e+00, -6.4414e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.7143e+00,
7.1429e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, -1.0000e+00,
2.8571e+00, -7.6491e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.8571e+00,
8.5714e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS2: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[ 2.0000e+02, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
5.5470e+00, -9.9375e-01, 1.0000e+00, -4.9448e-04, 0.0000e+00],
[ 3.2000e+02, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
6.2470e+00, -9.8082e-01, 1.0000e+00, -1.4034e-04, 0.0000e+00],
[ 2.6000e+02, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
6.4202e+00, -9.5634e-01, 0.0000e+00, -0.0000e+00, 1.6667e-01]])""")
#print("EDGES2: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])""")
#print("EDGE VALS2: " + str(observation.edge_features.values), flush=True)
self.assertEqual(str(observation.edge_features.values),
"""tensor([[20.0000, 0.1000],
[30.0000, 0.1500],
[40.0000, 0.1250],
[32.0000, 0.1000],
[34.0000, 0.1308],
[22.0000, 0.0846]])""")
observation = make_obs(bgo.BipartiteGraphObservations(), self.model3)
#print("VARS3: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 0.0000e+00,
-2.7931e+00, 0.0000e+00, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, -2.7931e+00,
2.0690e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 9.3670e-01,
5.8621e+00, 6.8363e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8621e+00,
8.6207e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7586e+00, 7.2983e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7586e+00,
7.5862e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS3: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-2.0000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5470e+00, -9.8646e-01, 1.0000e+00, -4.6740e-04, 0.0000e+00],
[-3.2000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.2470e+00, -9.8975e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01],
[-2.6000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.4202e+00, -9.7044e-01, 1.0000e+00, -2.5171e-04, 0.0000e+00],
[-1.2000e+01, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-3.3282e+00, -5.6881e-01, 1.0000e+00, 0.0000e+00, 1.6667e-01],
[ 1.2000e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
3.3282e+00, 5.6881e-01, 1.0000e+00, -0.0000e+00, 1.6667e-01]])""")
#print("EDGES3: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[1, 2, 1, 2, 1, 2, 0, 1, 0, 1]])""")
#print("EDGE VALS3: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-20.0000, -0.1000],
[-30.0000, -0.1500],
[-40.0000, -0.1250],
[-32.0000, -0.1000],
[-34.0000, -0.1308],
[-22.0000, -0.0846],
[ -2.0000, -0.1667],
[ -3.0000, -0.2500],
[ 2.0000, 0.1667],
[ 3.0000, 0.2500]])""")
def testPrimalSearchFeatures(self):
observation = make_obs(bgo.BipartiteGraphObservations(), self.model3, branching=False)
#print("VARS: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 0.0000e+00,
-2.7931e+00, 0.0000e+00, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, -2.7931e+00,
2.0690e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 9.3670e-01,
5.8621e+00, 6.8363e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8621e+00,
8.6207e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7586e+00, 7.2983e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7586e+00,
7.5862e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-2.0000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5470e+00, -9.8646e-01, 1.0000e+00, -4.6740e-04, 0.0000e+00],
[-3.2000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.2470e+00, -9.8975e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01],
[-2.6000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.4202e+00, -9.7044e-01, 1.0000e+00, -2.5171e-04, 0.0000e+00],
[-1.2000e+01, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-3.3282e+00, -5.6881e-01, 1.0000e+00, 0.0000e+00, 1.6667e-01],
[ 1.2000e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
3.3282e+00, 5.6881e-01, 1.0000e+00, -0.0000e+00, 1.6667e-01]])""")
#print("EDGES: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[1, 2, 1, 2, 1, 2, 0, 1, 0, 1]])""")
#print("EDGE VALS: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-20.0000, -0.1000],
[-30.0000, -0.1500],
[-40.0000, -0.1250],
[-32.0000, -0.1000],
[-34.0000, -0.1308],
[-22.0000, -0.0846],
[ -2.0000, -0.1667],
[ -3.0000, -0.2500],
[ 2.0000, 0.1667],
[ 3.0000, 0.2500]])""")
def testKhalilFeaturization(self):
observation, khalil = make_obs((bgo.BipartiteGraphObservations(), ecole.observation.Khalil2016()), self.model1)
branching_vars = np.array([0, 1])
observation.add_khalil_features(khalil, branching_vars)
print("VARS: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 8.4211e-01,
5.8809e+00, 6.4414e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8809e+00,
8.8086e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00, 1.6000e+01, 1.6000e+01,
0.0000e+00, 3.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 3.0000e+00, 1.5667e+01, 4.1899e+00, 1.0000e+01,
2.0000e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 1.1914e-01, 1.1914e-01, 1.1914e-01, 8.8086e-01,
1.3526e-01, 1.0000e+00, 1.0495e-01, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 5.0000e-01, 5.0000e-01, 5.0000e-01, 1.0000e+00,
-1.0000e+00, -1.1610e-01, -9.0719e-02, 4.0000e-01, 6.0714e-01,
1.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 1.0000e+00,
0.0000e+00, 2.0000e+00, 3.0000e+01, 1.5000e+01, 3.5355e+00,
1.0000e+01, 2.0000e+01, 6.7778e-02, 9.5556e-01, 4.7778e-01,
5.4997e-02, 4.0000e-01, 5.5556e-01, 6.7778e-02, 9.5556e-01,
4.7778e-01, 5.4997e-02, 4.0000e-01, 5.5556e-01, 1.2429e+00,
1.6000e+01, 8.0000e+00, 6.0609e-01, 7.1429e+00, 8.8571e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7614e+00, 7.6491e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7614e+00,
7.6143e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00, 1.9000e+01, 1.9000e+01,
0.0000e+00, 3.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 3.0000e+00, 1.4000e+01, 2.1602e+00, 1.1000e+01,
1.6000e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 2.3857e-01, 2.3857e-01, 2.3857e-01, 7.6143e-01,
3.1332e-01, 1.0000e+00, 1.8166e-01, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 5.0000e-01, 5.0000e-01, 5.0000e-01, 1.0000e+00,
-1.0000e+00, -1.3017e-01, -7.8336e-02, 3.9286e-01, 6.0000e-01,
1.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 1.0000e+00,
0.0000e+00, 2.0000e+00, 3.1000e+01, 1.5500e+01, 3.5355e-01,
1.5000e+01, 1.6000e+01, 6.7778e-02, 1.0444e+00, 5.2222e-01,
5.4997e-02, 4.4444e-01, 6.0000e-01, 6.7778e-02, 1.0444e+00,
5.2222e-01, 5.4997e-02, 4.4444e-01, 6.0000e-01, 1.2429e+00,
1.9000e+01, 9.5000e+00, 2.6769e+00, 5.7143e+00, 1.3286e+01]])""")
print("CNS: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-1.0023e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5598e+00, -9.9375e-01, 1.0000e+00, -1.9779e-03, 0.0000e+00],
[-1.6180e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3172e+00, -9.8082e-01, 1.0000e+00, -5.6137e-04, 0.0000e+00],
[-1.2942e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3916e+00, -9.5634e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01]])""")
print("EDGES: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])""")
print("EDGE VALS: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-10.0000, -0.0998],
[-15.0000, -0.1497],
[-20.0000, -0.1236],
[-16.0000, -0.0989],
[-17.0000, -0.1314],
[-11.0000, -0.0850]])""")
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | graph_datasets/featurization_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| CL-LNS-main | graph_datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import graph_datasets.bipartite_graph_dataset as bgd
import torch_geometric
#import dgl
import random
import torch
class BipartiteGraphLoader:
def __init__(self, db, shuffle=True, first_k=None):
self.shuffle = shuffle
dbs = db.split('+')
if len(dbs) == 1:
self.data = bgd.BipartiteGraphDataset(db, query_opt=not shuffle, read_only=True, first_k=first_k)
else:
self.data = bgd.BipartiteGraphDatasets(dbs, query_opt=not shuffle, first_k=first_k)
def num_examples(self):
return self.data.sample_cnt
def load(self, batch_size=32, format="pt_geom"):
#from IPython import embed;embed()
if format == "pt_geom":
#print("here")
def my_collate(batch):
#embed()
#print(len(batch))
#batch = list(filter(lambda x: torch.sum(x.candidate_scores) > 0.5 * x.info["neighborhood_size"], batch))
#return None
#from IPython import embed; embed()
batch = list(filter(lambda x: x is not None), batch)
return torch.utils.data.dataloader.default_collate(batch)
loader = torch_geometric.loader.DataLoader(self.data, batch_size, shuffle=self.shuffle)#, collate_fn=my_collate)
for ptg in loader:
#from IPython import embed;embed()
yield ptg
return
elif format == 'dgl':
k = self.data.len()
permutation = random.sample(range(k), k)
graphs = []
for loc in permutation:
ptg = self.data.get(loc)
ntx = ptg.to_networkx()
#print("here")
#from IPython import embed;embed()
dgl_graph = dgl.bipartite_from_networkx(ntx, utype='variables', etype='edges', vtype='constraints',
u_attrs=['variable_features'], e_attrs=['edge_attr'], v_attrs=['constraint_features'])
# Annotate the variables with other information
num_variables = dgl_graph.nodes("variables").size(0)
fsb_scores = torch.full((num_variables,), -1.0e10) #, dype=torch.float)
candidate_scores = ntx.graph["candidate_scores"]
branching_candidates = ntx.graph["candidates"]
num_candidates = branching_candidates.size(0)
for i in range(num_candidates):
candidate_id = branching_candidates[i]
candidate_score = candidate_scores[i]
assert candidate_score >= 0
fsb_scores[candidate_id] = candidate_score
dgl_graph.nodes['variables'].data['fsb_scores'] = fsb_scores
graphs.append(dgl_graph)
if len(graphs) == batch_size:
yield dgl.batch(graphs)
graphs = []
return
assert format == 'ntx'
k = self.data.len()
permutation = random.sample(range(k), k)
batch = []
for loc in permutation:
ptg = self.data.get(loc)
ntx = ptg.to_networkx()
batch.append(ntx)
if len(batch) == batch_size:
yield batch
batch = []
| CL-LNS-main | graph_datasets/bipartite_graph_loader.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import numpy as np
import networkx as nx
class BipartiteGraph(torch_geometric.data.Data):
"""
This class encode a node bipartite graph observation as returned by the `ecole.observation.NodeBipartite`
observation function in a format understood by the pytorch geometric data handlers.
"""
def __init__(self, constraint_features, edge_indices, edge_features, variable_features,
candidates, candidate_choice, candidate_scores, info,
iteration = None, instance_id = None, incumbent_history = None, LB_relaxation_history = None, improvement_history = None, neighborhood_size = None):
super().__init__()
self.constraint_features = constraint_features
self.edge_index = edge_indices
self.edge_attr = edge_features
self.variable_features = variable_features
#print("Variable features shape", variable_features.shape)
self.candidates = candidates
self.nb_candidates = len(candidates) if candidates is not None else 0
self.candidate_choices = candidate_choice
self.candidate_scores = candidate_scores
self.info = info
# We must tell pytorch geometric how many nodes there are, for indexing purposes
self.num_nodes = constraint_features.shape[0] if constraint_features is not None else 0
self.num_nodes += variable_features.shape[0] if variable_features is not None else 0
self.iteration = iteration
self.instance_id = instance_id
self.incumbent_history = incumbent_history
self.LB_relaxation_history = LB_relaxation_history
self.improvement_history = improvement_history
self.neighborhood_size = neighborhood_size
def __inc__(self, key, value, *args, **kwargs):
"""
We overload the pytorch geometric method that tells how to increment indices when concatenating graphs
for those entries (edge index, candidates) for which this is not obvious.
"""
if key == 'edge_index':
return torch.tensor([[self.constraint_features.shape[0]], [self.variable_features.shape[0]]])
elif key == 'candidates':
return self.variable_features.shape[0]
else:
return super().__inc__(key, value)
def to_networkx(self):
G = nx.DiGraph(candidates=self.candidates, candidate_scores=self.candidate_scores,
nb_candidates=self.nb_candidates, candidate_choice=self.candidate_choices,
info=self.info)
G.add_nodes_from(range(self.num_nodes))
num_vars = self.variable_features.shape[0]
#print(num_vars)
for i, (v, u) in enumerate(self.edge_index.T.tolist()):
G.add_edge(u, v+num_vars)
#print(u, v)
assert 0 <= u and u < num_vars, str(u)
assert v >= 0, str(v)
G[u][v+num_vars]["edge_attr"] = self.edge_attr[i]
for i, feat_dict in G.nodes(data=True):
if i < num_vars:
feat_dict.update({"variable_features": self.variable_features[i].squeeze()})
feat_dict.update({"bipartite": 0})
else:
feat_dict.update({"constraint_features": self.constraint_features[i-num_vars].squeeze()})
feat_dict.update({"bipartite": 1})
for u, v in G.edges():
#print(u, v, G.nodes[u]['bipartite'], G.nodes[v]['bipartite'], num_vars)
assert(G.nodes[u]['bipartite'] == 0)
assert(G.nodes[v]['bipartite'] == 1)
return G
| CL-LNS-main | graph_datasets/bipartite_graph.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole.typing
import competition.common.rewards as competition_rewards
# Returns the relative improvement in dual bound since the last step
class Dual(ecole.typing.RewardFunction):
def __init__(self):
self.parameters = competition_rewards.IntegralParameters()
super().__init__(wall=True, bound_function=lambda model: (
self.parameters.offset,
self.parameters.initial_primal_bound))
def set_parameters(self, objective_offset=None, initial_primal_bound=None, initial_dual_bound=None):
self.parameters = competition_rewards.IntegralParameters(
offset=objective_offset,
initial_primal_bound=initial_primal_bound,
initial_dual_bound=initial_dual_bound)
def before_reset(self, model):
self.parameters.fetch_values(model)
super().before_reset(model)
self.last_dual_bound = self.parameters.initial_dual_bound
def extract(self, model, done):
if done:
return 0
m = model.as_pyscipopt()
dual_bound = m.getDualbound()
reward = abs(dual_bound - self.last_dual_bound) / abs(self.last_dual_bound)
self.last_dual_bound = dual_bound
return reward
# Returns the relative improvement in the primal/dual gap since the last step
class PrimalDualGrap(ecole.typing.RewardFunction):
def __init__(self):
self.parameters = competition_rewards.IntegralParameters()
super().__init__(wall=True, bound_function=lambda model: (
self.parameters.offset,
self.parameters.initial_primal_bound))
def set_parameters(self, objective_offset=None, initial_primal_bound=None, initial_dual_bound=None):
self.parameters = competition_rewards.IntegralParameters(
offset=objective_offset,
initial_primal_bound=initial_primal_bound,
initial_dual_bound=initial_dual_bound)
def before_reset(self, model):
self.parameters.fetch_values(model)
super().before_reset(model)
self.last_gap = abs(self.parameters.initial_dual_bound - self.parameters.initial_primal_bound) / min(abs(self.parameters.initial_dual_bound), abs(self.parameters.initial_primal_bound))
def extract(self, model, done):
if done:
return 0
m = model.as_pyscipopt()
gap = m.getGap()
reward = (self.last_gap - gap) / self.last_gap
self.last_gap = gap
return reward
| CL-LNS-main | graph_datasets/step_rewards.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import ilp_solver
import random
import string
from graph_datasets.solved_milp_dataset import SolvedMilpDataset
class SolvedMilpDatasetTest(unittest.TestCase):
def setUp(self):
# Create a small ILP problem
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
solver.add_constraint(10 * x1 + 15 * x2 >= 100.23)
solver.add_constraint(20 * x1 + 16 * x2 >= 161.8)
solver.add_constraint(17 * x1 + 11 * x2 >= 129.42)
solver.set_objective_function(80 * x1 + 95 * x2, maximize=False)
self.model = solver.as_scip_model()
self.model.optimize()
self.solution = self.model.getBestSol()
self.obj_value = self.model.getObjVal()
self.gap = self.model.getGap()
letters = string.ascii_letters
self.db_name = '/tmp/' + ''.join(random.choice(letters) for i in range(10))
self.db_name2 = '/tmp/' + ''.join(random.choice(letters) for i in range(10))
def test_read_write(self):
dataset = SolvedMilpDataset(self.db_name)
dataset.add(self.model, self.solution, self.obj_value, self.gap)
a, b = dataset.get_one(self.model)
sol = {}
for v in self.model.getVars():
val = self.solution[v]
sol[v.getIndex()] = val
self.assertEqual(a, sol)
self.assertEqual(b, self.obj_value)
def test_missing_entry(self):
dataset = SolvedMilpDataset(self.db_name)
try:
a, b = dataset.get_one(self.model)
found = True
except:
found = False
self.assertFalse(found)
def test_overwrite(self):
dataset = SolvedMilpDataset(self.db_name)
dataset.add(self.model, self.solution, 10, 23)
dataset.add(self.model, self.solution, 1.0, 21)
a, b = dataset.get_one(self.model)
sol = {}
for v in self.model.getVars():
val = self.solution[v]
sol[v.getIndex()] = val
self.assertEqual(a, sol)
self.assertEqual(b, 1.0)
dataset.add(self.model, self.solution, 2.0, 22)
a, b = dataset.get_one(self.model)
self.assertEqual(b, 1.0)
def test_multiple_entries(self):
dataset = SolvedMilpDataset(self.db_name2, best_solution_only=False)
dataset.add(self.model, self.solution, 50.0, 23)
dataset.add(self.model, self.solution, 10.0, 21)
dataset.add(self.model, self.solution, 2.0, 22)
expected_obj_value = 50.0
for a, b in dataset.get_all(self.model):
sol = {}
for v in self.model.getVars():
val = self.solution[v]
sol[v.getIndex()] = val
self.assertEqual(a, sol)
self.assertEqual(b, expected_obj_value)
expected_obj_value /= 5
def test_aggregate(self):
dataset1 = SolvedMilpDataset(self.db_name)
dataset1.add(self.model, self.solution, 10, 25)
_, b = dataset1.get_one(self.model)
self.assertEqual(b, 10)
dataset2 = SolvedMilpDataset(self.db_name)
dataset2.add(self.model, self.solution, 1, 11)
dataset1.merge(dataset2)
_, b = dataset1.get_one(self.model)
self.assertEqual(b, 1)
dataset3 = SolvedMilpDataset(self.db_name)
dataset3.add(self.model, self.solution, 5, 17)
dataset1.merge(dataset3)
_, b = dataset1.get_one(self.model)
self.assertEqual(b, 1) | CL-LNS-main | graph_datasets/solved_milp_dataset_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import ecole
import torch
import torch_geometric
import numpy as np
import string
import random
import os
import sys
import graph_datasets.bipartite_graph as bg
import graph_datasets.bipartite_graph_dataset as bgd
import ilp_solver
def advance_to_root_node(model):
"""Utility to advance a model to the root node."""
dyn = ecole.dynamics.BranchingDynamics()
model = dyn.reset_dynamics(model)
return model
def make_obs(obs_func, model):
"""Utility function to extract observation on root node."""
obs_func.before_reset(model)
advance_to_root_node(model)
return obs_func.extract(model, False)
class BipartiteGraphDatasetTest(unittest.TestCase):
def setUp(self):
# Create a small ILP problem
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
solver.add_constraint(10 * x1 + 15 * x2 >= 100)
solver.add_constraint(20 * x1 + 16 * x2 >= 160)
solver.add_constraint(17 * x1 + 11 * x2 >= 130)
# Minimize the objective
solver.set_objective_function(80 * x1 + 95 * x2, maximize=False)
scip_model = solver.as_scip_model()
self.model = ecole.scip.Model.from_pyscipopt(scip_model)
self.model.disable_presolve()
self.model.disable_cuts()
letters = string.ascii_letters
self.db = []
for i in range(6):
self.db.append('/tmp/' + ''.join(random.choice(letters) for i in range(10)))
def tearDown(self):
for db in self.db:
try:
os.remove(db)
except:
pass
def testBipartiteGraphQueries(self):
db = bgd.BipartiteGraphDataset(self.db[0], query_opt=False)
g0 = bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0])
db.add(g0)
g1 = bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0])
db.add(g1)
g2 = bg.BipartiteGraph(np.array([2]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0])
db.add(g2)
t0 = db.get(0)
t1 = db.get(1)
t2 = db.get(2)
self.assertEqual(t0.constraint_features, g0.constraint_features)
self.assertEqual(t1.constraint_features, g1.constraint_features)
self.assertEqual(t2.constraint_features, g2.constraint_features)
def testBipartiteGraphIterationNoOpt(self):
db = bgd.BipartiteGraphDataset(self.db[1], query_opt=False)
db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db.add(bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(2, db.len())
for i in range(5):
_ = db.get(i % 2)
def testBipartiteGraphIterationOpt(self):
db = bgd.BipartiteGraphDataset(self.db[2], query_opt=True)
db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db.add(bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(2, db.len())
for i in range(5):
_ = db.get(i % 2)
def _testDuplicateEntries(self):
db = bgd.BipartiteGraphDataset(self.db[3], query_opt=True)
rslt1 = db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
rslt2 = db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt1)
self.assertEqual(False, rslt2)
self.assertEqual(1, db.len())
def _testMerge(self):
db1 = bgd.BipartiteGraphDataset(self.db[4], query_opt=True)
rslt1 = db1.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt1)
db2 = bgd.BipartiteGraphDataset(self.db[5], query_opt=True)
rslt2 = db2.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt2)
rslt2 = db2.add(bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt2)
db1.merge(db2)
self.assertEqual(2, db1.len())
class BipartiteGraphDatasetTests(unittest.TestCase):
def setUp(self) -> None:
letters = string.ascii_letters
self.db = []
for i in range(6):
self.db.append('/tmp/' + ''.join(random.choice(letters) for i in range(10)))
def tearDown(self):
for db in self.db:
try:
os.remove(db)
except:
pass
def _testBipartiteGraphExtraction(self):
db1 = bgd.BipartiteGraphDataset(self.db[0])
db1.add(bg.BipartiteGraph(torch.tensor([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db1.add(bg.BipartiteGraph(torch.tensor([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(db1.len(), 2)
db2 = bgd.BipartiteGraphDataset(self.db[1])
db2.add(bg.BipartiteGraph(torch.tensor([2]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db2.add(bg.BipartiteGraph(torch.tensor([3]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(db2.len(), 2)
db = bgd.BipartiteGraphDatasets([self.db[0], self.db[1]])
self.assertEqual(db.get(0).constraint_features, torch.tensor([0]))
self.assertEqual(db.get(1).constraint_features, torch.tensor([1]))
self.assertEqual(db.get(2).constraint_features, torch.tensor([2]))
self.assertEqual(db.get(3).constraint_features, torch.tensor([3]))
for i in range(4):
t = db.get(i)
self.assertEqual(t.constraint_features, torch.tensor([i]))
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | graph_datasets/bipartite_graph_dataset_test.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
tokenizer.add_special_tokens({'additional_special_tokens': ['<|belief|>', '<|endofbelief|>', '<|action|>', '<|endofaction|>', \
'<|response|>', '<|endofresponse|>', '<|context|>', '<|endofcontext|>', '<|user|>', '<|system|>', \
'<|task|>', '<|endoftask|>', '<|chitchat|>', '<|endofchitchat|>']})
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = get_dataset(data_args, tokenizer=tokenizer) if training_args.do_train else None
eval_dataset = get_dataset(data_args, tokenizer=tokenizer, evaluate=True) if training_args.do_eval else None
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| accentor-main | run_language_modeling.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import random
import argparse
import os
def clean(x):
return x.replace("\n", "").replace("\r", "").replace("\t", " ").strip()
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="./accentor-sgd/", type=str, required=False, help="path to SGD")
args = parser.parse_args()
random.seed(42)
pairs = {}
for s in ["train", "dev", "test"]:
pairs[s] = []
fns = os.listdir(args.data + s)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue") or not fn.endswith(".json"):
continue
with open(args.data + s + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
for i in range(len(data)):
t = ''
for j in range(len(data[i]["turns"])):
for ps in ["beginning", "end"]:
if ps in data[i]["turns"][j]:
for k in range(len(data[i]["turns"][j][ps])):
if data[i]["turns"][j][ps][k]["label"] == "good":
pair = [t, clean(data[i]["turns"][j][ps][k]["candidate"])]
pairs[s] += [pair]
if t != '':
t += ' '
if j % 2 == 0:
t += 'user: '
else:
t += 'system: '
t += clean(data[i]["turns"][j]["utterance"])
for s in pairs:
print(s, len(pairs[s]))
random.shuffle(pairs["train"])
for s in ["train", "dev", "test"]:
with open("parlai_"+(s if s != "dev" else "valid")+".txt", "w", encoding='utf8') as f:
for i in range(len(pairs[s])):
f.write("text:" + pairs[s][i][0] + "\t" + "labels:" + pairs[s][i][1] + "\tepisode_done:True\n")
| accentor-main | gen_parlai_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from utils import bleuscorer
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--inference", default="dev.inference.gpt2_10epoch_1e-3_fp16.json", type=str, required=False, help='inference file')
parser.add_argument("--datafolder", default="./simpletod/", type=str, required=False, help='data folder')
parser.add_argument("--predictionfolder", default="./prediction/", type=str, required=False, help='prediction folder')
parser.add_argument("--split", default="dev", type=str, required=False, help="[dev,test]")
args = parser.parse_args()
inference = args.inference
datafolder = args.datafolder
predictionfolder = args.predictionfolder
folder = args.split + "/"
if inference.endswith(".txt"):
with open(inference, "r") as f:
predict = f.read().strip().split("\n")
predict = [a.strip() for a in predict]
else:
with open(inference, "r") as f:
predict = json.load(f)
idx = 0
cnt = 0
seen_services = set()
with open(datafolder + "train/" + "schema.json", "r") as f:
schema = json.load(f)
for i in range(len(schema)):
seen_services.add(schema[i]["service_name"])
domain_slots = set()
with open(datafolder + folder + "schema.json", "r") as f:
schema = json.load(f)
for i in range(len(schema)):
for j in range(len(schema[i]["slots"])):
assert(" " not in schema[i]["slots"][j])
domain_slots.add(schema[i]["service_name"].split("_")[0].lower() + " " + schema[i]["slots"][j]["name"].lower())
fns = os.listdir(datafolder + folder)
fns.sort()
act_precision = []
act_recall = []
seen_act_precision = []
seen_act_recall = []
unseen_act_precision = []
unseen_act_recall = []
bleu = []
bleua = []
bleub = []
seenbleu = []
seenbleua = []
seenbleub = []
unseenbleu = []
unseenbleua = []
unseenbleub = []
for fn in fns:
if not fn.startswith("dialogue"):
continue
if fn.startswith("dialogues_and_metrics.json"):
continue
with open(datafolder + folder + fn, "r") as f:
data = json.load(f)
for i in range(len(data)):
for j in range(1, len(data[i]["turns"]), 2):
cnt += 1
if idx >= len(predict):
continue
belief = predict[idx].split("<|belief|>")
if len(belief) >= 2 and "<|endofbelief|>" in belief[1]:
belief = belief[1].split("<|endofbelief|>")[0].strip()
else:
belief = ""
action = predict[idx].split("<|action|>")
if len(action) >= 2 and "<|endofaction|>" in action[1]:
action = action[1].split("<|endofaction|>")[0].strip()
else:
action = ""
response = predict[idx].split("<|response|>")
if len(response) >= 2:
response = response[1].split("<|")[0].strip()
else:
response = ""
data[i]["turns"][j]["response"] = response
seen = True
for k in range(len(data[i]["turns"][j-1]["frames"])):
if data[i]["turns"][j-1]["frames"][k]["service"] not in seen_services:
seen = False
parsedbelief = belief.split(", ")
for k in range(len(parsedbelief)):
parsed = False
for ds in domain_slots:
if parsedbelief[k].startswith(ds):
parsedbelief[k] = [ds, parsedbelief[k][len(ds):].strip()]
parsed = True
break
if not parsed:
parsedbelief[k] = [parsedbelief[k]]
k = 1
while k < len(parsedbelief):
if len(parsedbelief[k]) == 1:
parsedbelief[k-1] += parsedbelief[k]
del parsedbelief[k]
else:
k += 1
if len(parsedbelief) >= 1:
if parsedbelief[0][0] not in domain_slots:
del parsedbelief[0]
parsedbelief = {x[0]:x[1:] for x in parsedbelief}
parsedaction = action.split(", ")
for k in range(len(parsedaction)):
parsedaction[k] = parsedaction[k].strip().split()
k = 0
while k < len(parsedaction):
if len(parsedaction[k]) <= 1 or len(parsedaction[k]) > 3:
del parsedaction[k]
else:
k += 1
act_gt = set()
for k in range(len(data[i]["turns"][j]["frames"][0]["actions"])):
act_gt.add((data[i]["turns"][j]["frames"][0]["actions"][k]["act"].lower() + " " + data[i]["turns"][j]["frames"][0]["actions"][k]["slot"]).strip())
act_p = set()
for k in range(len(parsedaction)):
act_p.add(' '.join(parsedaction[k][1:]))
act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
if seen:
seen_act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
seen_act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
else:
unseen_act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
unseen_act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
bleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
bleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
bleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
if seen:
seenbleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
seenbleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
seenbleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
else:
unseenbleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
unseenbleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
unseenbleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
for k in range(len(data[i]["turns"][j-1]["frames"])):
data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"] = {}
for ds in parsedbelief:
if ds.split()[0].lower() == data[i]["turns"][j-1]["frames"][k]["service"].split("_")[0].lower():
data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"][ds.split()[1]] = parsedbelief[ds]
idx += 1
if not os.path.exists(predictionfolder + folder):
os.makedirs(predictionfolder + folder)
with open(predictionfolder + folder + fn, "w") as f:
json.dump(data, f, indent=1)
act_precision = sum(act_precision) / len(act_precision)
act_recall = sum(act_recall) / len(act_recall)
print("act", act_precision, act_recall, 2*act_precision*act_recall/(act_precision+act_recall))
print('bleu:', sum(bleu)/len(bleu)) #BLEU-4_{orig}
print('bleua:', sum(bleua)/len(bleua)) #BLEU-4_{aug}
#print('bleub:', sum(bleub)/len(bleub))
seen_act_precision = sum(seen_act_precision) / len(seen_act_precision)
seen_act_recall = sum(seen_act_recall) / len(seen_act_recall)
print("act (seen):", seen_act_precision, seen_act_recall, 2*seen_act_precision*seen_act_recall/(seen_act_precision+seen_act_recall))
unseen_act_precision = sum(unseen_act_precision) / len(unseen_act_precision)
unseen_act_recall = sum(unseen_act_recall) / len(unseen_act_recall)
print("act (unseen):", unseen_act_precision, unseen_act_recall, 2*unseen_act_precision*unseen_act_recall/(unseen_act_precision+unseen_act_recall))
print('bleu (seen):', sum(seenbleu)/len(seenbleu))
print('bleua (seen):', sum(seenbleua)/len(seenbleua))
#print('bleub (seen):', sum(seenbleub)/len(seenbleub))
print('bleu (unseen):', sum(unseenbleu)/len(unseenbleu))
print('bleua (unseen):', sum(unseenbleua)/len(unseenbleua))
#print('bleub (unseen):', sum(unseenbleub)/len(unseenbleub))
if __name__ == '__main__':
main()
| accentor-main | gen_predict.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import copy
import random
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--all", default=False, type=bool, required=False, help="use all dialogues rather than only augmented dialogues")
parser.add_argument("--delexlevel", default=2, type=int, required=False, help="0: no delex; 1: delex values in \"slots\"; 2: delex values in both \"slots\" and \"actions\"")
parser.add_argument("--data", default="./accentor-sgd/", type=str, required=False, help="path to SGD")
parser.add_argument("--target", default="./simpletod/", type=str, required=False, help="path to output")
args = parser.parse_args()
datafolder = args.data
targetfolder = args.target
for folder in ["train", "dev", "test"]:
if not os.path.exists(targetfolder + folder):
os.makedirs(targetfolder + folder)
inlm = []
inlme = []
inlma = []
inlmb = []
incc = []
inlmf = []
fns = os.listdir(datafolder + folder)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue"):
with open(datafolder + folder + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
with open(targetfolder + folder + "/" + fn, "w", encoding='utf8') as f:
json.dump(data, f, indent=1)
continue
with open(datafolder + folder + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
i = 0
while i < len(data):
dbs = []
slots = {}
canmap = {}
vmap = {}
for j in range(len(data[i]["turns"])):
if data[i]["turns"][j]["speaker"] != "SYSTEM":
continue
if "service_results" in data[i]["turns"][j]["frames"][0]:
dbs += data[i]["turns"][j]["frames"][0]["service_results"]
if len(data[i]["turns"][j]["frames"][0]["slots"]) != 0:
slots = {}
for k in range(len(data[i]["turns"][j]["frames"][0]["actions"])):
assert(len(data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"]) == len(data[i]["turns"][j]["frames"][0]["actions"][k]["values"]))
for l in range(len(data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"])):
canmap[data[i]["turns"][j]["frames"][0]["actions"][k]["values"][l]] = data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"][l]
vmap[data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"][l]] = data[i]["turns"][j]["frames"][0]["actions"][k]["values"][l]
for k in range(len(data[i]["turns"][j]["frames"][0]["slots"])):
s = data[i]["turns"][j]["frames"][0]["slots"][k]["slot"]
slots[s] = data[i]["turns"][j]["utterance"][data[i]["turns"][j]["frames"][0]["slots"][k]["start"]:data[i]["turns"][j]["frames"][0]["slots"][k]["exclusive_end"]]
db = {}
for k in range(len(dbs)):
matched = True
for s in slots:
if s not in dbs[k]:
matched = False
break
if dbs[k][s] != canmap[slots[s]]:
matched = False
break
if matched:
db = copy.deepcopy(dbs[k])
for s in db:
if db[s] in vmap:
db[s] = vmap[db[s]]
break
data[i]["turns"][j]["frames"][0]["selecteddbslots"] = slots
data[i]["turns"][j]["frames"][0]["selecteddb"] = db
for j in range(1, len(data[i]["turns"]), 2):
domain = data[i]["turns"][j]["frames"][0]["service"].split("_")[0].lower()
assert(data[i]["turns"][j]["speaker"] == "SYSTEM")
assert(len(data[i]["turns"][j]["frames"]) == 1)
slots = copy.deepcopy(data[i]["turns"][j]["frames"][0]["slots"])
slots.sort(key = lambda x : -x["start"])
delex = data[i]["turns"][j]["utterance"]
delexed = set()
if args.delexlevel >= 1:
for k in range(1, len(slots)):
assert(slots[k-1]["start"] >= slots[k]["exclusive_end"])
for k in range(len(slots)):
domain_slot = domain + "_" + slots[k]["slot"]
delex = delex[:slots[k]["start"]] + "[" + domain_slot + "]" + delex[slots[k]["exclusive_end"]:]
delexed.add(domain_slot)
if args.delexlevel >= 2:
slots2 = copy.deepcopy(data[i]["turns"][j]["frames"][0]["actions"])
slots2 = [x for x in slots2 if len(x["values"]) > 0]
slots2.sort(key = lambda x : -len(x["values"][0]))
for k in range(len(slots2)):
domain_slot = domain + "_" + slots2[k]["slot"]
if domain_slot in delexed:
continue
for l in range(len(slots2[k]["values"])):
delex = delex.replace(slots2[k]["values"][l], "[" + domain_slot + "]")
delexed.add(domain_slot)
data[i]["turns"][j]["delex"] = delex
target = ''
belief = []
for k in range(len(data[i]["turns"][j-1]["frames"])):
for slot in data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"]:
belief += [[data[i]["turns"][j-1]["frames"][k]["service"].split("_")[0].lower(), slot, data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"][slot]]]
belief.sort(key = lambda x : x[0] + " " + x[1])
for k in range(len(belief)):
belief[k][2].sort()
belief[k][2] = belief[k][2][0]
belief = [x[0] + " " + x[1] + " " + x[2] for x in belief]
target += '<|belief|> ' + ", ".join(belief) + ' <|endofbelief|> '
action = copy.deepcopy(data[i]["turns"][j]["frames"][0]["actions"])
action.sort(key = lambda x : x["act"])
action = [domain + " " + x["act"].lower() + " " + x["slot"] for x in action]
targetaug = []
delexaug = []
tcpos = []
tcneg = []
for k in range(len(data[i]["turns"][j]["beginning"])):
if "social" in data[i]["turns"][j]["beginning"][k]["justification"] or "useful" in data[i]["turns"][j]["beginning"][k]["justification"]:
delexaug += [data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' ' + delex]
targetaug += [target + '<|action|> ' + "chitchat, " + ", ".join(action) + ' <|endofaction|> ' + '<|response|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' ' + delex + ' <|endofresponse|>']
tcpos += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' <|endofchitchat|> ']
else:
tcneg += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' <|endofchitchat|> ']
for k in range(len(data[i]["turns"][j]["end"])):
if "social" in data[i]["turns"][j]["end"][k]["justification"] or "useful" in data[i]["turns"][j]["end"][k]["justification"]:
delexaug += [delex + ' ' + data[i]["turns"][j]["end"][k]["candidate"].strip()]
targetaug += [target + '<|action|> ' + ", ".join(action) + ", chitchat" + ' <|endofaction|> ' + '<|response|> ' + delex + ' ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofresponse|>']
tcpos += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofchitchat|> ']
else:
tcneg += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofchitchat|> ']
target += '<|action|> ' + ", ".join(action) + ' <|endofaction|> '
target += '<|response|> ' + delex + ' <|endofresponse|>'
data[i]["turns"][j]["target"] = target
data[i]["turns"][j]["targetaug"] = targetaug
data[i]["turns"][j]["delexaug"] = delexaug
context = '<|context|> '
for k in range(j):
if k % 2 == 0:
context += '<|user|> '
else:
context += '<|system|> '
context += data[i]["turns"][k]["utterance"] + " "
context += '<|endofcontext|>'
data[i]["turns"][j]["context"] = context
inlm += [(context + target).replace("\n", " ").replace("\r", "")]
assert("\n" not in inlm[-1])
inlme += [(context).replace("\n", " ").replace("\r", "")]
if len(targetaug) != 0:
for k in range(len(targetaug)):
inlma += [(context + targetaug[k]).replace("\n", " ").replace("\r", "")]
inlmb += [(context + targetaug[k]).replace("\n", " ").replace("\r", "")]
inlmf += [(context + tcpos[k] + targetaug[k]).replace("\n", " ").replace("\r", "")]
for l in range(len(tcneg)):
inlmf += [(context + tcneg[l] + targetaug[k]).replace("\n", " ").replace("\r", "")]
else:
inlmb += [(context + target).replace("\n", " ").replace("\r", "")]
for k in range(len(tcneg)):
inlmf += [(context + tcneg[k] + target).replace("\n", " ").replace("\r", "")]
incc += [context.replace('<|context|>', '').replace('<|endofcontext|>', '').replace('<|user|>', 'user:').replace('<|system|>', 'system:').replace('\t', ' ').strip(), '[DONE]']
i += 1
with open(targetfolder + folder + "/" + fn, "w") as f:
json.dump(data, f, indent=1)
random.shuffle(inlm)
with open("lm.input."+folder+".txt", "w", encoding='utf8') as f: #SimpleTOD
f.write('\n'.join(inlm))
with open("lm.input."+folder+".eval.txt", "w", encoding='utf8') as f: #used as the input during evaluation of SimpleTOD and SimpleTOD extension
f.write('\n'.join(inlme))
with open("lm.input."+folder+".aug.txt", "w", encoding='utf8') as f: #SimpleTOD extension (augmented responses only)
f.write('\n'.join(inlma))
with open("lm.input."+folder+".both.txt", "w", encoding='utf8') as f: #SimpleTOD extension (all responses)
f.write('\n'.join(inlmb))
with open("lm.input."+folder+".cc.txt", "w", encoding='utf8') as f: #cc: chitchat
f.write('\n'.join(incc+['[EXIT]']))
with open("lm.input."+folder+".ff.txt", "w", encoding='utf8') as f: #ff: free-form
f.write('\n'.join(inlmf))
if __name__ == '__main__':
random.seed(42)
main()
| accentor-main | gen_delex.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
import argparse
import numpy as np
import json
from tqdm import tqdm
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
parser = argparse.ArgumentParser()
parser.add_argument("--no_cuda", action="store_true", help="avoid using CUDA when available")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--model_name_or_path", type=str, default="output", help="path to pre-trained model or shortcut name")
parser.add_argument("--input", type=str, help="input text file, each line corresponding to one instance")
parser.add_argument("--output", type=str, help="output file")
parser.add_argument("--eos_token_id", type=int, default=None, help="eos token id")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--jobid", type=int, default=0, help="jobid")
parser.add_argument("--jobnum", type=int, default=1, help="jobnum")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path, pad_token='<PAD>')
model.to(args.device)
with open(args.input, "r") as f:
prompts = f.read().strip().split("\n")
batch_size = args.batch_size
ret = []
for batch in tqdm(range(args.jobid, len(prompts), batch_size * args.jobnum)):
prompt_text = prompts[batch: batch+batch_size]
encodings_dict = tokenizer.batch_encode_plus(prompt_text, max_length=None, pad_to_max_length=True)
input_ids = torch.tensor(encodings_dict['input_ids'])
attn_mask = torch.tensor(encodings_dict['attention_mask'])
seq_len = len(input_ids[0])
num_tokens_to_produce = 1024 - seq_len
pad_token_id = tokenizer.pad_token_id
eos_token_id = args.eos_token_id
if eos_token_id is None:
eos_token_id = tokenizer.eos_token_id
eos_not_in_sents = torch.ones(input_ids.shape[0]).long()
last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1
start_idx = inp_idx = (last_non_masked_idx).view(-1, 1).repeat(1, tokenizer.vocab_size + len(tokenizer.additional_special_tokens)).unsqueeze(1)
past = None
position_ids = torch.tensor([list(range(seq_len)) for i in range(input_ids.shape[0])])
for i, position_ids_slice in enumerate(position_ids):
position_ids_slice[last_non_masked_idx[i]:] = position_ids_slice[last_non_masked_idx[i]]
input_ids = input_ids.to(args.device)
attn_mask = attn_mask.to(args.device)
eos_not_in_sents = eos_not_in_sents.to(args.device)
start_idx = start_idx.to(args.device)
position_ids = position_ids.to(args.device)
for step in range(num_tokens_to_produce):
outputs = model(input_ids, attention_mask=attn_mask, position_ids=position_ids)
if step == 0:
next_token_logits = outputs[0].gather(1, start_idx).squeeze(1)
else:
next_token_logits = outputs[0][:, -1, :]
next_tokens = torch.argmax(next_token_logits, dim=-1)
eos_not_in_sents.mul_(next_tokens.ne(eos_token_id).long())
tokens_to_add = next_tokens * (eos_not_in_sents) + pad_token_id * (1 - eos_not_in_sents)
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
attn_mask = torch.cat([attn_mask, torch.ones((attn_mask.shape[0], 1)).long().to(args.device)], dim=1)
position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
if torch.max(eos_not_in_sents) == 0:
break
ret += [tokenizer.decode(output, skip_special_tokens=False, clean_up_tokenization_spaces=True).replace("<|endoftext|>", "") for output in input_ids]
with open(args.output, "w") as f:
json.dump(ret, f, indent=1)
| accentor-main | run_generation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="./simpletod/", type=str, required=False, help="path to delexed & augmented SGD")
args = parser.parse_args()
def clean(x):
return x.replace("\n", "").replace("\r", "").replace("\t", " ").strip()
random.seed(42)
pairs = {}
pos = {}
tot = {}
for s in ["train", "dev", "test"]:
pairs[s] = []
pos[s] = 0
tot[s] = 0
fns = os.listdir(args.data + s)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue") or not fn.endswith(".json"):
continue
with open(args.data + s + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
for i in range(len(data)):
t = ''
for j in range(len(data[i]["turns"])):
for ps in ["beginning", "end"]:
if ps in data[i]["turns"][j]:
for k in range(len(data[i]["turns"][j][ps])):
tot[s] += 1
if data[i]["turns"][j][ps][k]["label"] == "good":
pair = [t, data[i]["turns"][j]["delex"], clean(data[i]["turns"][j][ps][k]["candidate"]), 1 if ps == "beginning" else 2]
pairs[s] += [pair]
pos[s] += 1
else:
pair = [t, data[i]["turns"][j]["delex"], clean(data[i]["turns"][j][ps][k]["candidate"]), 0]
pairs[s] += [pair]
if t != '':
t += ' '
if j % 2 == 0:
t += 'user: '
else:
t += 'system: '
t += clean(data[i]["turns"][j]["utterance"])
for s in pos:
print(s, pos[s], tot[s], pos[s]/tot[s])
for s in pairs:
print(s, len(pairs[s]))
random.shuffle(pairs["train"])
with open("arranger_input.json", "w", encoding='utf8') as f:
json.dump(pairs, f, indent=1)
| accentor-main | gen_arranger_input.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import nltk
def bleuscorer(hyps, refs):
#print(hyps, refs)
bleu = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split()
ref = [a.split() for a in ref]
#hyp = nltk.word_tokenize(hyp)
#ref = [nltk.word_tokenize(a) for a in ref]
bleu += [nltk.translate.bleu_score.sentence_bleu(ref, hyp)]
return sum(bleu) / len(bleu)
if __name__ == '__main__':
print(bleuscorer(['the the the the the the the', 'there is a cat', 'it is'], [["the cat is on the mat", "there is a cat on the mat"], ["there is a cat on the mat"], ["it is true"]]))
| accentor-main | utils.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForMultipleChoice, BertTokenizer,
RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer)
from transformers import AdamW, get_linear_schedule_with_warmup
import torch.nn as nn
from utils_multiple_choice import (convert_examples_to_features, processors)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForMultipleChoice, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer),
}
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
best_dev_acc, best_dev_loss = 0.0, 99999999999.0
best_steps = 0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert'] else None,
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
if results["eval_acc"] > best_dev_acc:
best_dev_acc = results["eval_acc"]
best_dev_loss = results["eval_loss"]
best_steps = global_step
if args.do_test:
results_test = evaluate(args, model, tokenizer, test=True)
for key, value in results_test.items():
tb_writer.add_scalar('test_{}'.format(key), value, global_step)
logger.info("test acc: %s, loss: %s, global steps: %s", str(results_test['eval_acc']), str(results_test['eval_loss']), str(global_step))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logger.info("Average loss: %s at global step: %s", str((tr_loss - logging_loss)/args.logging_steps), str(global_step))
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_vocabulary(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step, best_steps
def evaluate(args, model, tokenizer, prefix="", test=False):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=not test, test=test)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert'] else None,
'labels': batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
output_logits_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_logits.txt")
with open(output_logits_file, "w") as writer:
logits_list = list(preds)
for i in range(len(logits_list)):
for j in range(len(logits_list[i])):
writer.write(str(logits_list[i][j]))
if j == len(logits_list[i]) - 1:
writer.write("\n")
else:
writer.write(" ")
preds = np.argmax(preds, axis=1)
acc = simple_accuracy(preds, out_label_ids)
result = {"eval_acc": acc, "eval_loss": eval_loss}
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(str(prefix) + " is test:" + str(test)))
writer.write("model =%s\n" % str(args.model_name_or_path))
writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps *
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
writer.write("train num epochs=%d\n" % args.num_train_epochs)
writer.write("fp16 =%s\n" % args.fp16)
writer.write("max seq length =%d\n" % args.max_seq_length)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False):
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
# Load data features from cache or dataset file
if evaluate:
cached_mode = 'dev'
elif test:
cached_mode = 'test'
else:
cached_mode = 'train'
assert (evaluate == True and test == True) == False
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
cached_mode,
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if evaluate:
examples = processor.get_dev_examples(args.data_dir)
elif test:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_train_examples(args.data_dir)
logger.info("Training number: %s", str(len(examples)))
features = convert_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=False,
pad_token_segment_id=0
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long)
all_label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true', help='Whether to run test on the test set')
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
best_steps = 0
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss, best_steps = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if not args.do_train:
args.output_dir = args.model_name_or_path
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if args.do_test and args.local_rank in [-1, 0]:
if not args.do_train:
args.output_dir = args.model_name_or_path
checkpoints = [args.output_dir]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, test=True)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if best_steps:
logger.info("best steps of eval acc is the following checkpoints: %s", best_steps)
return results
if __name__ == "__main__":
main()
| accentor-main | run_multiple_choice.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import os
import sys
from io import open
import json
import csv
import glob
import tqdm
from typing import List
from transformers import PreTrainedTokenizer
import random
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for multiple choice"""
def __init__(self, example_id, question, contexts, endings, label=None):
"""Constructs a InputExample.
Args:
example_id: Unique id for the example.
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
question: string. The untokenized text of the second sequence (question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.example_id = example_id
self.question = question
self.contexts = contexts
self.endings = endings
self.label = label
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for input_ids, input_mask, segment_ids in choices_features
]
self.label = label
class DataProcessor(object):
"""Base class for data converters for multiple choice data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class ACCProcessor(DataProcessor):
def __init__(self):
self.D = [[], [], []]
datasetfile = "arranger_input.json"
with open(datasetfile, "r") as f:
data = json.load(f)
for sid in range(2):
dt = ["train", "dev"][sid]
for i in range(len(data[dt])):
d = [data[dt][i][0].lower(), data[dt][i][1].lower(), data[dt][i][2].lower(), data[dt][i][3]]
self.D[sid] += [d]
sid = 2
for fns in [["lm.input.dev.cc.txt", "lm.output.dev.cc.txt", "dev.inference.gpt2_10epoch_1e-3_fp16.json"],
["lm.input.test.cc.txt", "lm.output.test.cc.txt", "test.inference.gpt2_10epoch_1e-3_fp16.json"]]:
with open(fns[0], "r") as f:
data = f.read().split("\n")[0:-1:2]
data_d = data
with open(fns[1], "r") as f:
data = f.read()
data = data.split("[TransformerGenerator]:")[1:]
for i in range(len(data)):
data[i] = data[i].split("\n")[0].strip()
data_cc = data
with open(fns[2], "r") as f:
data = json.load(f)
for i in range(len(data)):
data[i] = data[i].split("<|response|>")
if len(data[i]) == 1:
data[i] += ['']
elif len(data[i]) > 2:
data[i] = ["<|response|>".join(data[i][:-2]), data[i][-1]]
self.D[2] += [[data_d[i].strip(), data[i][1], data_cc[i].strip(), 0]]
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return ["0", "1", "2"]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
acc_id = "%s-%d" % (set_type, i)
examples.append(
InputExample(
example_id=acc_id,
question="",
contexts=[data[i][0], data[i][0], data[i][0]],
endings=[data[i][1], data[i][2] + " " + data[i][1], data[i][1] + " " + data[i][2]],
label=str(data[i][3])))
return examples
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
pad_token_segment_id=0,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
) -> List[InputFeatures]:
"""
Loads a data file into a list of `InputFeatures`
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
text_a = context
if example.question.find("_") != -1:
text_b = example.question.replace("_", ending)
else:
text_b = example.question + " " + ending
inputs = tokenizer.encode_plus(
text_a,
text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length
assert len(attention_mask) == max_length
assert len(token_type_ids) == max_length
choices_features.append((input_ids, attention_mask, token_type_ids))
label = label_map[example.label]
if ex_index < 2:
logger.info("*** Example ***")
logger.info("race_id: {}".format(example.example_id))
for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask))))
logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids))))
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id=example.example_id,
choices_features=choices_features,
label=label,
)
)
return features
processors = {
"acc": ACCProcessor,
}
MULTIPLE_CHOICE_TASKS_NUM_LABELS = {
"acc", 3
}
| accentor-main | utils_multiple_choice.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
for fns in [["./lm.input.dev.eval.txt", "./lm.output.dev.cc.txt", "./dev.inference.gpt2_10epoch_1e-3_fp16.json", "lm.input.dev.eval.ff.txt"],
["./lm.input.test.eval.txt", "./lm.output.test.cc.txt", "./test.inference.gpt2_10epoch_1e-3_fp16.json", "lm.input.test.eval.ff.txt"]]:
with open(fns[0], "r", encoding='utf8') as f:
context = f.read().strip().split("\n")
with open(fns[1], "r", encoding='utf8') as f:
cc = f.read().strip()
cc = cc.split("[TransformerGenerator]:")[1:]
for i in range(len(cc)):
cc[i] = cc[i].split("\n")[0].strip()
with open(fns[2], "r", encoding='utf8') as f:
task = json.load(f)
print(len(context), len(cc), len(task))
assert(len(context) == len(cc))
assert(len(cc) == len(task))
with open(fns[3], "w", encoding='utf8') as f:
for i in range(len(cc)):
t = task[i].split("<|response|>")
if len(t) >= 2:
t = t[-1].strip()
else:
t = ""
b = task[i].split("<|belief|>")
if len(b) >= 2:
b = b[1].split("<|endofbelief|>")
if len(b) == 2:
b = b[0]
else:
b = ""
else:
b = ""
f.write(context[i] + " <|task|> " + t + " <|endoftask|> <|chitchat|> " + cc[i] + ' <|endofchitchat|> <|belief|>' + b + "<|endofbelief|>\n")
| accentor-main | gen_rewriter_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
with open("./acc_arranger_roberta_base_3epoch/is_test_true_eval_logits.txt", "r") as f:
model_outputs = f.read().strip().split("\n")
for i in range(len(model_outputs)):
model_outputs[i] = model_outputs[i].split()
for j in range(len(model_outputs[i])):
model_outputs[i][j] = float(model_outputs[i][j])
assert(len(model_outputs[i]) == 3)
print(len(model_outputs))
for fns in [["./lm.input.dev.cc.txt", "./lm.output.dev.cc.txt", "./dev.inference.gpt2_10epoch_1e-3_fp16.json", "./dev.inference.arranger_3epoch.json"],
["./lm.input.test.cc.txt", "./lm.output.test.cc.txt", "./test.inference.gpt2_10epoch_1e-3_fp16.json", "./test.inference.arranger_3epoch.json"]]:
with open(fns[0], "r") as f:
data = f.read().split("\n")[0:-1:2]
print(len(data))
data_d = data
with open(fns[1], "r") as f:
data = f.read()
data = data.split("[TransformerGenerator]:")[1:]
for i in range(len(data)):
data[i] = data[i].split("\n")[0].strip()
print(len(data))
data_cc = data
with open(fns[2], "r") as f:
data = json.load(f)
print(len(data))
eval_data = []
for i in range(len(data)):
data[i] = data[i].split("<|response|>")
if len(data[i]) == 1:
data[i] += ['']
elif len(data[i]) > 2:
data[i] = ["<|response|>".join(data[i][:-2]), data[i][-1]]
eval_data += [[data_d[i].strip(), data[i][1], data_cc[i].strip(), 0]]
print(len(eval_data))
stats = {0:0, 1:0, 2:0}
for i in range(len(data)):
assert(len(model_outputs[i]) == 3)
o = 0
for j in range(1, 3):
if model_outputs[i][j] > model_outputs[i][o]:
o = j
stats[o] += 1
if o == 0:
data[i] = "<|response|>".join(data[i])
elif o == 1:
data[i] = data[i][0] + "<|response|> " + data_cc[i].strip() + " " + data[i][1].strip()
else:
data[i] = data[i][0] + "<|response|> " + data[i][1].strip() + " " + data_cc[i].strip()
print(len(data), len(model_outputs))
print(stats)
model_outputs = model_outputs[len(data):]
with open(fns[3], "w", encoding='utf8') as f:
json.dump(data, f, indent=1)
| accentor-main | gen_arranger_output.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source", default="./MultiWOZ_2.1/data.json", type=str, required=False, help="Path to the MultiWOZ dataset.")
args = parser.parse_args()
with open("candidates-multiwoz.json", "r", encoding='utf8') as f:
augmentation = json.load(f)
with open(args.source, "r", encoding='utf8') as f:
data = json.load(f)
data = {x:data[x] for x in data if x in augmentation}
for x in data:
for i in range(1, len(data[x]["log"]), 2):
data[x]["log"][i]["beginning"] = []
data[x]["log"][i]["end"] = []
for cc in augmentation[x]:
data[x]["log"][cc[0]][cc[1]] += [{"candidate": cc[2], "label": cc[3], "justification": cc[4]}]
with open("accentor-multiwoz-1k.json", "w", encoding='utf8') as f:
json.dump(data, f, indent=1, ensure_ascii=False)
| accentor-main | v1.0/accentor-multiwoz.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import argparse
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source", default="./dstc8-schema-guided-dialogue", type=str, required=False, help="Path to the SGD dataset.")
parser.add_argument("--target", default="./accentor-sgd", type=str, required=False, help="The target directory to store ACCENTOR-SGD.")
args = parser.parse_args()
with open("candidates-sgd.json", "r", encoding='utf8') as f:
augmentation = json.load(f)
for subdir in ["train", "dev", "test"]:
targetdir = os.path.join(args.target, subdir)
sourcedir = os.path.join(args.source, subdir)
os.makedirs(targetdir, exist_ok=True)
fns = os.listdir(sourcedir)
for fn in fns:
if not fn.endswith(".json"):
continue
with open(os.path.join(sourcedir, fn), "r", encoding='utf8') as f:
data = json.load(f)
if fn.startswith("dialogue"):
for i in range(len(data)):
for j in range(1, len(data[i]["turns"]), 2):
data[i]["turns"][j]["beginning"] = []
data[i]["turns"][j]["end"] = []
for cc in augmentation[subdir + data[i]["dialogue_id"]]:
data[i]["turns"][cc[0]][cc[1]] += [{"candidate": cc[2], "label": cc[3], "justification": cc[4]}]
with open(os.path.join(targetdir, fn), "w", encoding='utf8') as f:
json.dump(data, f, indent=1, ensure_ascii=False)
| accentor-main | v1.0/accentor-sgd.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from Slimmable - https://github.com/JiahuiYu/slimmable_networks
import torch
class CrossEntropyLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification """
def forward(self, output, target):
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss.mean()
class KLLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification
output: output logits of the student network
target: output logits of the teacher network
T: temperature
KL(p||q) = Ep \log p - \Ep log q
"""
def forward(self, output, soft_logits, target=None, temperature=1., alpha=0.9):
output, soft_logits = output / temperature, soft_logits / temperature
soft_target_prob = torch.nn.functional.softmax(soft_logits, dim=1)
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
kd_loss = -torch.sum(soft_target_prob * output_log_prob, dim=1)
if target is not None:
n_class = output.size(1)
target = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
ce_loss = -torch.bmm(target, output_log_prob).squeeze()
loss = alpha*temperature* temperature*kd_loss + (1.0-alpha)*ce_loss
else:
loss = kd_loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
class CrossEntropyLossSmooth(torch.nn.modules.loss._Loss):
def __init__(self, label_smoothing=0.1):
super(CrossEntropyLossSmooth, self).__init__()
self.eps = label_smoothing
""" label smooth """
def forward(self, output, target):
n_class = output.size(1)
one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = one_hot * (1 - self.eps) + self.eps / n_class
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
loss = -torch.bmm(target, output_log_prob)
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
def f_divergence(q_logits, p_logits, alpha, iw_clip=1e3):
assert isinstance(alpha, float)
q_prob = torch.nn.functional.softmax(q_logits, dim=1).detach()
p_prob = torch.nn.functional.softmax(p_logits, dim=1).detach()
q_log_prob = torch.nn.functional.log_softmax(q_logits, dim=1) #gradient is only backpropagated here
importance_ratio = p_prob / q_prob
if abs(alpha) < 1e-3:
importance_ratio = importance_ratio.clamp(0, iw_clip)
f = -importance_ratio.log()
f_base = 0
rho_f = importance_ratio.log() - 1.0
elif abs(alpha - 1.0) < 1e-3:
f = importance_ratio * importance_ratio.log()
f_base = 0
rho_f = importance_ratio
else:
iw_alpha = torch.pow(importance_ratio, alpha)
iw_alpha = iw_alpha.clamp(0, iw_clip)
f = iw_alpha / alpha / (alpha - 1.0)
f_base = 1.0 / alpha / (alpha - 1.0)
rho_f = iw_alpha / alpha + f_base
loss = torch.sum(q_prob * (f - f_base), dim=1)
grad_loss = -torch.sum(q_prob * rho_f * q_log_prob, dim=1)
return loss, grad_loss
"""
It's often necessary to clip the maximum
gradient value (e.g., 1.0) when using this adaptive KD loss
"""
class AdaptiveLossSoft(torch.nn.modules.loss._Loss):
def __init__(self, alpha_min=-1.0, alpha_max=1.0, iw_clip=5.0):
super(AdaptiveLossSoft, self).__init__()
self.alpha_min = alpha_min
self.alpha_max = alpha_max
self.iw_clip = iw_clip
def forward(self, output, target, alpha_min=None, alpha_max=None):
alpha_min = alpha_min or self.alpha_min
alpha_max = alpha_max or self.alpha_max
loss_left, grad_loss_left = f_divergence(output, target, alpha_min, iw_clip=self.iw_clip)
loss_right, grad_loss_right = f_divergence(output, target, alpha_max, iw_clip=self.iw_clip)
ind = torch.gt(loss_left, loss_right).float()
loss = ind * grad_loss_left + (1.0 - ind) * grad_loss_right
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
| AlphaNet-main | loss_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import random
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import models
from utils.config import setup
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from evaluate import attentive_nas_eval as attentive_nas_eval
import utils.logging as logging
import argparse
"""
using multiple nodes to run evolutionary search:
1) each GPU will evaluate its own sub-networks
2) all evaluation results will be aggregated on GPU 0
"""
parser = argparse.ArgumentParser(description='Test AlphaNet Models')
parser.add_argument('--config-file', default='./configs/parallel_supernet_evo_search.yml')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
parser.add_argument('--seed', default=1, type=int,
help='default random seed')
run_args = parser.parse_args()
logger = logging.get_logger(__name__)
def eval_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging("stdout.log", 'w')
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
torch.cuda.set_device(args.gpu)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# build the supernet
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
model = comm.get_parallel_model(model, args.gpu) #local rank
# define loss function (criterion)
criterion = nn.CrossEntropyLoss().cuda()
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
assert args.resume
#reloading model
model.module.load_weights_from_pretrained_models(args.resume)
if train_sampler:
train_sampler.set_epoch(0)
targeted_min_flops = args.evo_search.targeted_min_flops
targeted_max_flops = args.evo_search.targeted_max_flops
# run evolutionary search
parent_popu = []
for idx in range(args.evo_search.parent_popu_size):
if idx == 0:
cfg = model.module.sample_min_subnet()
else:
cfg = model.module.sample_active_subnet_within_range(
targeted_min_flops, targeted_max_flops
)
cfg['net_id'] = f'net_{idx % args.world_size}_evo_0_{idx}'
parent_popu.append(cfg)
pareto_global = {}
for evo in range(args.evo_search.evo_iter):
# partition the set of candidate sub-networks
# and send them to each GPU for parallel evaluation
# sub-networks to be evaluated on GPU {args.rank}
my_subnets_to_be_evaluated = {}
n_evaluated = len(parent_popu) // args.world_size * args.world_size
for cfg in parent_popu[:n_evaluated]:
if cfg['net_id'].startswith(f'net_{args.rank}_'):
my_subnets_to_be_evaluated[cfg['net_id']] = cfg
# aggregating all evaluation results
eval_results = attentive_nas_eval.validate(
my_subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
)
# update the Pareto frontier
# in this case, we search the best FLOPs vs. accuracy trade-offs
for cfg in eval_results:
f = round(cfg['flops'] / args.evo_search.step) * args.evo_search.step
if f not in pareto_global or pareto_global[f]['acc1'] < cfg['acc1']:
pareto_global[f] = cfg
# next batch of sub-networks to be evaluated
parent_popu = []
# mutate
for idx in range(args.evo_search.mutate_size):
while True:
old_cfg = random.choice(list(pareto_global.values()))
cfg = model.module.mutate_and_reset(old_cfg, prob=args.evo_search.mutate_prob)
flops = model.module.compute_active_subnet_flops()
if flops >= targeted_min_flops and flops <= targeted_max_flops:
break
cfg['net_id'] = f'net_{idx % args.world_size}_evo_{evo}_mutate_{idx}'
parent_popu.append(cfg)
# cross over
for idx in range(args.evo_search.crossover_size):
while True:
cfg1 = random.choice(list(pareto_global.values()))
cfg2 = random.choice(list(pareto_global.values()))
cfg = model.module.crossover_and_reset(cfg1, cfg2)
flops = model.module.compute_active_subnet_flops()
if flops >= targeted_min_flops and flops <= targeted_max_flops:
break
cfg['net_id'] = f'net_{idx % args.world_size}_evo_{evo}_crossover_{idx}'
parent_popu.append(cfg)
if __name__ == '__main__':
# setup enviroments
args = setup(run_args.config_file)
args.dist_url = run_args.dist_url
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
assert args.world_size > 1, "only support DDP settings"
# Use torch.multiprocessing.spawn to launch distributed processes: the
# eval_worker process function
mp.spawn(eval_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
| AlphaNet-main | parallel_supernet_evo_search.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from AttentiveNAS (https://github.com/facebookresearch/AttentiveNAS)
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
import operator
from datetime import date
import torch
import torch.nn as nn
#from torch.utils.tensorboard import SummaryWriter
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from data.data_loader import build_data_loader
from utils.config import setup
import utils.saver as saver
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
import utils.logging as logging
from evaluate import attentive_nas_eval as attentive_nas_eval
from solver import build_optimizer, build_lr_scheduler
import models
from copy import deepcopy
import numpy as np
import loss_ops as loss_ops
parser = argparse.ArgumentParser(description='AlphaNet Training')
parser.add_argument('--config-file', default=None, type=str,
help='training configuration')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
logger = logging.get_logger(__name__)
def build_args_and_env(run_args):
assert run_args.config_file and os.path.isfile(run_args.config_file), 'cannot locate config file'
args = setup(run_args.config_file)
args.config_file = run_args.config_file
#load config
assert args.distributed and args.multiprocessing_distributed, 'only support DDP training'
args.distributed = True
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
args.dist_url = run_args.dist_url
args.models_save_dir = os.path.join(args.models_save_dir, args.exp_name)
if not os.path.exists(args.models_save_dir):
os.makedirs(args.models_save_dir)
#backup config file
saver.copy_file(args.config_file, '{}/{}'.format(args.models_save_dir, os.path.basename(args.config_file)))
args.checkpoint_save_path = os.path.join(
args.models_save_dir, 'alphanet.pth.tar'
)
args.logging_save_path = os.path.join(
args.models_save_dir, f'stdout.log'
)
return args
def main():
run_args = parser.parse_args()
args = build_args_and_env(run_args)
random.seed(args.seed)
torch.manual_seed(args.seed)
#cudnn.deterministic = True
#warnings.warn('You have chosen to seed training. '
# 'This will turn on the CUDNN deterministic setting, '
# 'which can slow down your training considerably! '
# 'You may see unexpected behavior when restarting '
# 'from checkpoints.')
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
assert args.world_size > 1, 'only support ddp training'
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
args.batch_size_total = args.batch_size * args.world_size
#rescale base lr
args.lr_scheduler.base_lr = args.lr_scheduler.base_lr * (max(1, args.batch_size_total // 256))
# set random seed, make sure all random subgraph generated would be the same
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed(args.seed)
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging(args.logging_save_path, 'w')
logger.info(f"Use GPU: {args.gpu}, machine rank {args.machine_rank}, num_nodes {args.num_nodes}, \
gpu per node {ngpus_per_node}, world size {args.world_size}")
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
args.local_rank = args.gpu
torch.cuda.set_device(args.gpu)
# build model
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
# use sync batchnorm
if getattr(args, 'sync_bn', False):
model.apply(
lambda m: setattr(m, 'need_sync', True))
model = comm.get_parallel_model(model, args.gpu) #local rank
logger.info(model)
criterion = loss_ops.CrossEntropyLossSmooth(args.label_smoothing).cuda(args.gpu)
soft_criterion = loss_ops.AdaptiveLossSoft(args.alpha_min, args.alpha_max, args.iw_clip).cuda(args.gpu)
if not getattr(args, 'inplace_distill', True):
soft_criterion = None
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
args.n_iters_per_epoch = len(train_loader)
logger.info( f'building optimizer and lr scheduler, \
local rank {args.gpu}, global rank {args.rank}, world_size {args.world_size}')
optimizer = build_optimizer(args, model)
lr_scheduler = build_lr_scheduler(args, optimizer)
# optionally resume from a checkpoint
if args.resume:
saver.load_checkpoints(args, model, optimizer, lr_scheduler, logger)
logger.info(args)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
args.curr_epoch = epoch
logger.info('Training lr {}'.format(lr_scheduler.get_lr()[0]))
# train for one epoch
acc1, acc5 = train_epoch(epoch, model, train_loader, optimizer, criterion, args, \
soft_criterion=soft_criterion, lr_scheduler=lr_scheduler)
if comm.is_master_process() or args.distributed:
# validate supernet model
validate(
train_loader, val_loader, model, criterion, args
)
if comm.is_master_process():
# save checkpoints
saver.save_checkpoint(
args.checkpoint_save_path,
model,
optimizer,
lr_scheduler,
args,
epoch,
)
def train_epoch(
epoch,
model,
train_loader,
optimizer,
criterion,
args,
soft_criterion=None,
lr_scheduler=None,
):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
end = time.time()
num_updates = epoch * len(train_loader)
for batch_idx, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# total subnets to be sampled
num_subnet_training = max(2, getattr(args, 'num_arch_training', 2))
optimizer.zero_grad()
### compute gradients using sandwich rule ###
# step 1 sample the largest network, apply regularization to only the largest network
drop_connect_only_last_two_stages = getattr(args, 'drop_connect_only_last_two_stages', True)
model.module.sample_max_subnet()
model.module.set_dropout_rate(args.dropout, args.drop_connect, drop_connect_only_last_two_stages) #dropout for supernet
output = model(images)
loss = criterion(output, target)
loss.backward()
with torch.no_grad():
soft_logits = output.clone().detach()
#step 2. sample the smallest network and several random networks
sandwich_rule = getattr(args, 'sandwich_rule', True)
model.module.set_dropout_rate(0, 0, drop_connect_only_last_two_stages) #reset dropout rate
for arch_id in range(1, num_subnet_training):
if arch_id == num_subnet_training-1 and sandwich_rule:
model.module.sample_min_subnet()
else:
model.module.sample_active_subnet()
# calcualting loss
output = model(images)
if soft_criterion:
loss = soft_criterion(output, soft_logits)
else:
assert not args.inplace_distill
loss = criterion(output, target)
loss.backward()
#clip gradients if specfied
if getattr(args, 'grad_clip_value', None):
torch.nn.utils.clip_grad_value_(model.parameters(), args.grad_clip_value)
optimizer.step()
#accuracy measured on the local batch
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
corr1, corr5, loss = acc1*args.batch_size, acc5*args.batch_size, loss.item()*args.batch_size #just in case the batch size is different on different nodes
stats = torch.tensor([corr1, corr5, loss, args.batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1/batch_size, corr5/batch_size, loss/batch_size
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
else:
losses.update(loss.item(), images.size(0))
top1.update(acc1, images.size(0))
top5.update(acc5, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
num_updates += 1
if lr_scheduler is not None:
lr_scheduler.step()
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
return top1.avg, top5.avg
def validate(
train_loader,
val_loader,
model,
criterion,
args,
distributed = True,
):
subnets_to_be_evaluated = {
'attentive_nas_min_net': {},
'attentive_nas_max_net': {},
}
acc1_list, acc5_list = attentive_nas_eval.validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration = True,
)
if __name__ == '__main__':
main()
| AlphaNet-main | train_alphanet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from AttentiveNAS (https://github.com/facebookresearch/AttentiveNAS)
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
from datetime import date
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models
from utils.config import setup
from utils.flops_counter import count_net_flops_and_params
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from utils.progress import AverageMeter, ProgressMeter, accuracy
import argparse
parser = argparse.ArgumentParser(description='Test AlphaNet Models')
parser.add_argument('--config-file', default='./configs/eval_alphanet_models.yml')
parser.add_argument('--model', default='a0', type=str, choices=['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a5_1', 'a6'])
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
run_args = parser.parse_args()
if __name__ == '__main__':
args = setup(run_args.config_file)
args.model = run_args.model
args.gpu = run_args.gpu
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.__dict__['active_subnet'] = args.__dict__['pareto_models'][args.model]
print(args.active_subnet)
train_loader, val_loader, train_sampler = build_data_loader(args)
## init static attentivenas model with weights inherited from the supernet
model = models.model_factory.create_model(args)
model.to(args.gpu)
model.eval()
# bn running stats calibration following Slimmable (https://arxiv.org/abs/1903.05134)
# please consider trying a different random seed if you see a small accuracy drop
with torch.no_grad():
model.reset_running_stats_for_calibration()
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
images = images.cuda(args.gpu, non_blocking=True)
model(images) #forward only
model.eval()
with torch.no_grad():
criterion = nn.CrossEntropyLoss().cuda()
from evaluate.imagenet_eval import validate_one_subnet
acc1, acc5, loss, flops, params = validate_one_subnet(val_loader, model, criterion, args)
print(acc1, acc5, flops, params)
| AlphaNet-main | test_alphanet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="ctrl-benchmark",
version="0.0.3",
author="Tom Veniat, Ludovic Denoyer & Marc'Aurelio Ranzato",
license="MIT License",
description="Continual Transfer Learning Benchmark",
packages=setuptools.find_packages(),
install_requires=[
'pyyaml',
'torch>=1.3,<2',
'torchvision<1',
'networkx>2,<3',
'plotly',
'pydot',
'tqdm',
'sklearn',
'bs4'
],
include_package_data=True,
) | CTrLBenchmark-master | setup.py |
from .streams import get_stream | CTrLBenchmark-master | ctrl/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
from collections import defaultdict
import torch
import torchvision
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from torch.utils.data import TensorDataset
logger = logging.getLogger(__name__)
class Task(object):
def __init__(self, name, samples, loss_fn, transformation=None,
split_names=None, creator=None, source_concepts=None,
attributes=None, dim_red='PCA', generator=None,
n_samples_per_class=None, save_path=None, id=None):
"""
:param samples: Iterable containing the data and labels for each split. The length corresponds to the number of
splits. Each split i should be composed of two Tensors:
- a `(N_i x ...)` tensor containing the features of the N_i samples for this splits
- a `(N_i x n_labels)` tensor containing the labels for each attribute that we want to classify. The attributes in different splits are not forced to overlap, allowing to generate ZSL tasks.
:param transformation:
:param creator:
"""
self._infos = {
'src_concepts': [] if source_concepts is None else source_concepts,
'transformation': transformation,
'attributes': attributes
}
self.name = name
self.save_path = None
self.loss_fn = loss_fn
self.id = id
self.split_names = split_names
self.datasets = [TensorDataset(s_samples, labels.long()) for
s_samples, labels in samples]
self.n_classes = [dataset.tensors[1].max(dim=0).values + 1 for dataset
in self.datasets]
self.x_dim = list(self.datasets[0].tensors[0].size()[1:])
assert all(list(split.tensors[0].size()[1:]) == self.x_dim for split in
self.datasets)
self.n_samples = [dataset.tensors[0].size(0) for dataset in
self.datasets]
self.n_samples_per_class = n_samples_per_class
assert all([torch.equal(self.n_classes[0], t) for t in self.n_classes])
self.n_classes = self.n_classes[0]
self._dim_reduction = PCA(n_components=3) \
if dim_red == 'PCA' else TSNE(n_components=3)
self.creator = creator
# self.generator = generator
self.statistics = self.compute_statistics()
if save_path:
self.save_path = self.save(save_path)
def compute_statistics(self):
train_split = self.datasets[0].tensors[0]
if train_split[0].dim() == 3:
# Images
# assert train_split.size(1) == 3
n_channels = train_split.size(1)
mean = [train_split[:, i, :, :].mean() for i in range(n_channels)]
std = [train_split[:, i, :, :].std() for i in range(n_channels)]
else:
# Vectors
mean = train_split.mean()
std = train_split.std()
# Prevent division by 0 if we have a constant channel
std = [1 if itm == 0 else itm for itm in std]
return {'mean': mean, 'std': std}
@property
def concepts(self):
return [concept for cat_concepts in self.src_concepts for concept in
cat_concepts]
@property
def transformation(self):
return self._infos['transformation']
@property
def src_concepts(self):
"""
:return: A copy of the concepts list of this task
"""
return self._infos['src_concepts'].copy()
@property
def attributes(self):
return self._infos['attributes']
def get_data(self, split:str):
"""
:param split:
:type split:
:return:
:rtype:
"""
return self.datasets[split].tensors[0]
def get_labels(self, split, prop):
return self.datasets[split].tensors[1][:, prop]
def plot_task(self, viz, name):
legend = [str(c) for c in self.src_concepts]
selected_means = []
cat_ids = []
for cat_id, cat in enumerate(self.src_concepts):
for c in cat:
if hasattr(c, 'mean'):
selected_means.append(c.mean)
cat_ids.append(cat_id + 1)
if len(selected_means) > 2:
data = torch.stack(selected_means)
title = '{} selected concepts'.format(name)
if selected_means[0].numel() > 3:
title = '{} of {}'.format(
self._dim_reduction.__class__.__name__, title)
data = self._dim_reduction.fit_transform(data)
viz.scatter(data, Y=cat_ids,
opts={'title': title, 'markersize': 3,
'legend': legend})
plot_data = self.get_data(split=0)
title = '{} features'.format(name)
if plot_data[0].ndimension() == 3 and plot_data[0].size(0) in [1, 3]:
# We have an image
imgs_per_label = defaultdict(list)
for ds in self.datasets:
x, y = ds.tensors
y = y.squeeze()
for y_val in y.unique():
x_sample = random.choice(x[y == y_val])
imgs_per_label[y_val.item()].append(x_sample)
for y, images in imgs_per_label.items():
grid = torchvision.utils.make_grid(images)
viz.image(grid, opts={
'title': '{} ({})'.format(self.src_concepts[y], y),
'width': grid.size(2) * 3,
'height': grid.size(1) * 3.2})
else:
# Vectorial data
if plot_data[0].numel() > 3:
plot_data = self._dim_reduction.fit_transform(
plot_data.view(plot_data.size(0), -1))
title = '{} of {}'.format(
self._dim_reduction.__class__.__name__, title)
viz.scatter(plot_data, Y=self.get_labels(split=0, prop=0) + 1,
opts={'title': title, 'webgl': True, 'markersize': 3,
'legend': legend})
def save(self, path):
if not os.path.isdir(path):
os.makedirs(path)
task_datasets = []
save_paths = []
for split_data, split_name in zip(self.datasets,
['train', 'val', 'test']):
save_path = os.path.join(path,
'{}_{}.pth'.format(self.name, split_name))
save_paths.append(save_path)
torch.save(split_data.tensors, save_path)
task_datasets.append(save_path)
logger.info('Task saved to {} ...'.format(save_paths))
metadata_file = os.path.join(path, '{}.meta'.format(self.name))
torch.save(self._meta(), metadata_file)
return task_datasets
def _meta(self):
meta = {
'source_concepts': [tuple(str(c) for c in cat) for cat in
self.src_concepts],
'transformation': str(self.transformation),
'creator': self.creator
}
return meta
def info(self, full=True):
infos = {
'data_path': self.save_path,
'split_names': self.split_names,
'id': self.id,
'x_dim': self.x_dim,
'n_classes': self.n_classes.tolist(),
'descriptor': self.name,
'full_descr': str(self),
}
if full:
infos['loss_fn'] = self.loss_fn
infos['statistics'] = self.statistics
return infos
def __repr__(self):
return "{}-way classification".format(len(self.src_concepts))
def __str__(self):
categories = '\n\t-'.join([str(c) for c in self.src_concepts])
descr = "{}-way classification created by {} ({} samples): \n\t {} \n\t-{}"
trans_descr = self.transformation
return descr.format(self.n_classes[0].item(), self.creator,
self.n_samples, trans_descr, categories)
def __eq__(self, other):
return all(
map(lambda x: torch.equal(*x),
zip(self.datasets[0].tensors,
other.datasets[0].tensors)))
| CTrLBenchmark-master | ctrl/tasks/task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CTrLBenchmark-master | ctrl/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import time
from types import SimpleNamespace
import numpy as np
import torch
import torch.nn.functional as F
from ctrl.concepts.concept import ComposedConcept
from ctrl.concepts.concept_tree import ConceptTree
from ctrl.tasks.task import Task
from torchvision import transforms
logger = logging.getLogger(__name__)
def loss(y_hat, y, reduction: str = 'none'):
"""
:param y_hat: Model predictions
:param y: Ground Truth
:param reduction:
:return:
"""
assert y.size(1) == 1 and torch.is_tensor(y_hat)
y = y.squeeze(1)
loss_val = F.cross_entropy(y_hat, y, reduction=reduction)
assert loss_val.dim() == 1
return loss_val
def augment_samples(samples):
trans = transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor()
])
aug_samples = []
for sample in samples:
for i in range(4):
aug_samples.append(trans(sample))
for sample in samples:
aug_samples.append(transforms.ToTensor()(transforms.ToPILImage()(sample)))
return torch.stack(aug_samples)
def _generate_samples_from_descr(categories, attributes, n_samples_per_class,
augment, rnd):
use_cat_id, attributes = attributes
assert use_cat_id and not attributes, \
"usage of attributes isn't supporte in v1."
samples = []
labels = []
for i, cat_concepts in enumerate(categories):
mixture = ComposedConcept(cat_concepts, id=None)
cat_samples = []
cat_labels = []
for s_id, n in enumerate(n_samples_per_class):
split_samples, split_attrs = mixture._get_samples(n, attributes,
split_id=s_id, rng=rnd)
if s_id in augment:
split_samples = augment_samples(split_samples)
split_labels = torch.Tensor().long()
cat_id = torch.tensor([i]).expand(split_samples.shape[0], 1)
split_labels = torch.cat([split_labels, cat_id], dim=1)
cat_samples.append(split_samples)
cat_labels.append(split_labels)
samples.append(cat_samples)
labels.append(cat_labels)
if torch.is_tensor(samples[0][0]):
cat_func = torch.cat
else:
cat_func = np.concatenate
samples = (cat_func(split) for split in zip(*samples))
labels = (torch.cat(split) for split in zip(*labels))
return samples, labels
class TaskGenIter(object):
def __init__(self, task_generator):
self.task_gen = task_generator
self.n = 0
def __next__(self):
if len(self.task_gen.task_pool) > self.n:
t = self.task_gen.task_pool[self.n]
else:
assert self.n == len(self.task_gen.task_pool)
try:
t = self.task_gen.add_task()
except IndexError:
raise StopIteration
self.n += 1
return t
class TaskGenerator(object):
def __init__(self, concept_pool: ConceptTree, transformation_pool,
samples_per_class, split_names, strat,
seed: int, flatten, n_initial_classes, use_cat_id, tta,
*args, **kwargs):
"""
:param concepts: Concept pool from which we will sample when creating
new tasks.
:param transformation_pool: Transformation pool from which we will
select the operations to be applied on the data of new tasks.
:param samples_per_class: Initial number of samples per class
:param split_names: Name of the different data splits usually
(train, val, test)
:param strat: Strategy to use for the creation of new tasks
:param seed: The seed used for the samples selection
:param flatten:
:param n_initial_classes:
:param use_cat_id: Legacy prop used with attributes.
:param tta: use Test Time Augmentation
"""
super(TaskGenerator, self).__init__(*args, **kwargs)
self.task_pool = []
self.concept_pool = concept_pool
self.transformation_pool = transformation_pool
assert len(samples_per_class) == len(split_names)
self.n_samples_per_class = samples_per_class
self.split_names = split_names
self.rnd = random.Random(seed)
self.flatten = flatten
self.tta = tta
# For default task creation
self.n_initial_classes = n_initial_classes
self.use_cat_id = use_cat_id
self.strat = strat
self.contains_loaded_tasks = False
@property
def n_tasks(self):
return len(self.task_pool)
def add_task(self, name=None, save_path=None):
"""
Adds a new task to the current pool.
This task will be created using the current strategy `self.strat`
:param name: The name of the new task
:param save_path: If provided, the task will be saved under this path
:return: The new Task
"""
new_task_id = len(self.task_pool)
if new_task_id == 0:
concepts, attrs, trans, n = self._create_new_task(
self.concept_pool, self.transformation_pool)
else:
concepts = self.task_pool[-1].src_concepts
attrs = self.task_pool[-1].attributes
trans = self.task_pool[-1].transformation
n = self.task_pool[-1].n_samples_per_class
cur_task_spec = SimpleNamespace(src_concepts=concepts,
attributes=attrs,
transformation=trans,
n_samples_per_class=n,
)
cur_task_spec = self.strat.new_task(cur_task_spec, self.concept_pool,
self.transformation_pool,
self.task_pool)
assert len(cur_task_spec.n_samples_per_class) == len(self.split_names)
new_task = self._create_task(cur_task_spec, name, save_path)
new_task.id = new_task_id
self.task_pool.append(new_task)
return new_task
def load_task(self, task_name, load_path):
splits = ['train', 'val', 'test']
samples = []
save_paths = []
for split in splits:
file_path = os.path.join(load_path, '{}_{}.pth'.format(task_name, split))
save_paths.append(file_path)
assert os.path.isfile(file_path), file_path
xs, ys = torch.load(file_path)
samples.append((xs, ys))
metadata_file = os.path.join(load_path, '{}.meta'.format(task_name))
if os.path.isfile(metadata_file):
meta = torch.load(metadata_file)
else:
meta = {}
task = Task(task_name, samples, loss, split_names=self.split_names,
id=len(self.task_pool), **meta)
task.save_path = save_paths
self.task_pool.append(task)
self.contains_loaded_tasks = True
return task
def _create_task(self, task_spec, name, save_path):
concepts = task_spec.src_concepts
attributes = task_spec.attributes
transformation = task_spec.transformation
n_samples_per_class = task_spec.n_samples_per_class
samples = self.get_samples(concepts, attributes, transformation,
n_samples_per_class)
if self.flatten:
samples = [(x.view(x.size(0), -1), y) for x, y in samples]
task = Task(name, samples, loss, transformation, self.split_names,
source_concepts=concepts, attributes=attributes,
creator=self.strat.descr(), generator=self,
n_samples_per_class=n_samples_per_class,
save_path=save_path)
return task
def get_similarities(self, component=None):
"""
:param component: String representing the components across which the
similarities should be computed, can be any combination of :
- 'x' for p(x|z)
- 'y' for p(y|z)
- 'z' for p(z)
:return: A dict associating each component to an n_tasks x n_tasks
tensor containing the similarities between tasks over this component.
"""
if component is None:
component = 'xyz'
similarities = torch.zeros(self.n_tasks, self.n_tasks, len(component))
times = torch.zeros(len(component))
for i, t1 in enumerate(self.task_pool):
for j, t2 in enumerate(self.task_pool[i:]):
sim, time = self.get_similarity(t1, t2, component)
sim = torch.tensor(sim)
# Similarities are symmetric
similarities[i, i + j] = sim
similarities[i + j, i] = sim
times += torch.tensor(time)
for comp, time in zip(component, times.unbind()):
if time > 1:
logger.warning(
"Comparison of {} took {:4.2f}s".format(comp, time))
sim_dict = dict(zip(component, similarities.unbind(-1)))
return sim_dict
def get_similarity(self, t1, t2, component=None):
if component is None:
component = 'xyz'
res = []
times = []
for char in component:
start_time = time.time()
if char == 'x':
res.append(self.transformation_pool.transformations_sim(
t1.transformation, t2.transformation))
elif char == 'y':
res.append(self.concept_pool.y_attributes_sim(t1.attributes,
t2.attributes))
elif char == 'z':
res.append(self.concept_pool.categories_sim(t1.src_concepts,
t2.src_concepts))
else:
raise ValueError('Unknown component {}'.format(char))
times.append(time.time() - start_time)
return res, times
def get_samples(self, concepts, attributes, transformation,
n_samples_per_class):
augment = [1] if self.tta else []
samples, labels = _generate_samples_from_descr(concepts, attributes,
n_samples_per_class,
augment, np.random.default_rng(self.rnd.randint(0, int(1e9))))
# Apply the input transformation
samples = [transformation(x) for x in samples]
return [(x, y) for x, y in zip(samples, labels)]
def stream_infos(self, full=True):
"""
return a list containing the information of each task in the task_pool,
useful when the stream needs to be serialized (e.g. to be sent to
workers.)
"""
return [t.info(full) for t in self.task_pool]
def _create_new_task(self, concept_pool, transformation_pool, n_attributes=0):
logger.info('Creating new task from scratch')
concepts = concept_pool.get_compatible_concepts(self.n_initial_classes,
leaf_only=True,)
n_avail_attrs = len(concept_pool.attributes)
if n_attributes > n_avail_attrs:
raise ValueError('Can\'t select {} attributes, only {} available'
.format(n_attributes, n_avail_attrs))
attributes = self.rnd.sample(range(n_avail_attrs), n_attributes)
transformation = transformation_pool.get_transformation()
concepts = [(c,) for c in concepts]
return concepts, (self.use_cat_id, attributes), transformation, \
self.n_samples_per_class
def __str__(self):
descr = "Task stream containing {} tasks:\n\t".format(self.n_tasks)
tasks = '\n\t'.join(map(str, self.task_pool))
return descr + tasks
def __iter__(self):
return TaskGenIter(self)
| CTrLBenchmark-master | ctrl/tasks/task_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ctrl.transformations.transformation_tree import TransformationTree
from torch import nn
from tqdm import tqdm
class NoisyNNTransformationTree(TransformationTree):
def __init__(self, noise_min, noise_max, x_dim, z_dim, n_canonic_transfo,
n_var_per_trans, *args, **kwargs):
self.noise_min = noise_min
self.noise_max = noise_max
self.x_dim = x_dim
self.z_dim = z_dim
self.n_canonic_transfo = n_canonic_transfo
self.n_var_per_trans = n_var_per_trans
self.depth = 2
super().__init__(*args, **kwargs)
self._inv_index = {v: k for k, v in self._node_index.items()}
def build_tree(self):
first_module = nn.Sequential(nn.Linear(self.z_dim, self.z_dim),
nn.ReLU())
# node_name = '{}{}'.format(self.name, 'front')
node_name = 'front'
self.tree.add_node(self._node_index[self.name], name=self.name)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name], f=first_module)
noise_source = torch.distributions.uniform.Uniform(self.noise_min,
self.noise_max)
for i in tqdm(range(self.n_canonic_transfo), desc='Init noisy x',
disable=self.n_canonic_transfo < 30):
lin = nn.Linear(self.z_dim, self.x_dim)
for j in range(self.n_var_per_trans):
mod = mod_lin(lin, noise_source)
node_name = (i, j)
self.tree.add_node(self._node_index[node_name], name=str(node_name))
self.tree.add_edge(self._node_index['front'],
self._node_index[node_name],
f=nn.Sequential(mod, nn.ReLU()))
self.leaf_nodes.add(self._node_index[node_name])
return self._node_index[self.name]
def transformations_sim(self, t1, t2):
t1 = self._inv_index[t1.path[-1]]
t2 = self._inv_index[t2.path[-1]]
return 0 if t1[0] != t2[0] else 1
def mod_lin(lin, noise_source):
noise = noise_source.sample(lin.weight.size())
new_lin = nn.Linear(lin.in_features, lin.out_features)
state_dict = lin.state_dict()
state_dict['weight'] = state_dict['weight'] + noise
new_lin.load_state_dict(state_dict)
return new_lin
| CTrLBenchmark-master | ctrl/transformations/noisy_nn_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Transformation(object):
def __init__(self, transfo_pool, path, trans_descr):
assert path[0] == transfo_pool.root_node
self.transfo_pool = transfo_pool
self.path = path
self.trans_descr = trans_descr
def __call__(self, X):
with torch.no_grad():
for u, v in zip(self.path, self.path[1:]):
f = self.transfo_pool.tree.edges()[u, v]['f']
X = f(X)
return X
def __str__(self):
return self.trans_descr | CTrLBenchmark-master | ctrl/transformations/transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
from ctrl.transformations.transformation_tree import TransformationTree
from ctrl.transformations.utils import BatchedTransformation
from torchvision import transforms
from torchvision.transforms import RandomAffine
ROTATIONS = {
'0': 0,
# '90': 90,
# '180': 180,
# '270': 270
}
COLORS = [[255, 0, 0], [0, 255, 0], [0, 0, 255]]
OLD_BACKGOUND = [0]
SCALES = {
'full': 1,
# '3/4': 0.75,
# 'half': 0.5,
# '1/4': 0.25
}
def get_rotations():
transformations = {}
for name, angle in ROTATIONS.items():
trans = transforms.Compose([
transforms.ToPILImage(),
RandomAffine(degrees=(angle, angle)),
transforms.ToTensor()
])
transformations[name] = BatchedTransformation(trans)
return transformations
def get_scales():
transformations = {}
for name, scale in SCALES.items():
trans = transforms.Compose([
transforms.ToPILImage(),
RandomAffine(degrees=0, scale=(scale, scale)),
transforms.ToTensor()
])
transformations[name] = BatchedTransformation(trans)
return transformations
def change_background_color(images, old_background, new_background):
"""
:param images: BCHW
:return:
"""
assert old_background == [0]
if not torch.is_tensor(new_background):
new_background = torch.tensor(new_background, dtype=images.dtype)
if images.max() <= 1 and new_background.max() > 1:
new_background /= 255
if images.size(1) == 1 and len(new_background) == 3:
images = images.expand(-1, 3, -1, -1)
else:
assert images.size(1) == len(new_background)
# raise NotImplementedError(images.size(), new_background)
images = images.clone()
new_background = new_background.view(-1, 1, 1)
bg_ratio = images.max() - images
bg = bg_ratio * new_background
imgs = images + bg
# print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item())
# print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item())
return imgs
def get_colors():
transformations = {}
for color in COLORS:
trans = partial(change_background_color, old_background=OLD_BACKGOUND,
new_background=color)
transformations[str(color)] = trans
return transformations
class RainbowTransformationTree(TransformationTree):
def __init__(self, *args, **kwargs):
self.n_rotations = None
self.n_colors = None
self.n_scaless = None
super(RainbowTransformationTree, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
rotations = get_rotations()
colors = get_colors()
scales = get_scales()
levels = [rotations, scales, colors]
prev_nodes = [self.name]
for domain in levels:
prev_nodes = self._add_transfos(prev_nodes, domain)
self.leaf_nodes.update([self._node_index[node] for node in prev_nodes])
self.depth = len(levels)
return self._node_index[self.name]
def _add_transfos(self, parent_nodes, transfos):
nodes = []
for parent in parent_nodes:
for name, transfo in transfos.items():
node_name = '{}_{}'.format(parent, name)
self.tree.add_node(self._node_index[node_name], name=node_name,
last_transfo=name)
self.tree.add_edge(self._node_index[parent],
self._node_index[node_name],
f=transfo, )
nodes.append(node_name)
return nodes
def transformations_sim(self, t1, t2):
"""
arccos((tr(R)−1)/2)
:param t1:
:param t2:
:return:
"""
t1_nodes = [t1.transfo_pool.tree.nodes()[id]['last_transfo'] for id in
t1.path[1:]]
t2_nodes = [t2.transfo_pool.tree.nodes()[id]['last_transfo'] for id in
t2.path[1:]]
n_eq = 0
for op1, op2 in zip(t1_nodes, t2_nodes):
if op1 == op2:
n_eq += 1
return n_eq / (len(t1_nodes))
| CTrLBenchmark-master | ctrl/transformations/rainbow_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
class TransformationPool(abc.ABC):
@abc.abstractmethod
def get_transformation(self, exclude_trans=None):
raise NotImplementedError
@abc.abstractmethod
def transformations_sim(self, t1, t2):
raise NotImplementedError
| CTrLBenchmark-master | ctrl/transformations/transformation_pool.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ctrl.transformations.transformation_tree import TransformationTree
from torchvision import transforms
from torchvision.transforms import RandomRotation
class ImgRotationTransformationTree(TransformationTree):
def __init__(self, n_rotations, max_degrees, *args, **kwargs):
self.n_rotations = n_rotations
self.max_degrees = max_degrees
super(ImgRotationTransformationTree, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
for i in range(self.n_rotations):
node_name = 'rotate_{}'.format(i)
self.leaf_nodes.add(self._node_index[node_name])
degrees = self.rnd.uniform(-self.max_degrees, self.max_degrees)
trans = transforms.Compose([
transforms.ToPILImage(),
RandomRotation((degrees, degrees)),
transforms.ToTensor()
])
f = BatchedTransformation(trans)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name],
f=f, degrees=degrees)
self.depth = 1
return self._node_index[self.name]
def transformations_sim(self, t1, t2):
"""
arccos((tr(R)−1)/2)
:param t1:
:param t2:
:return:
"""
theta_1 = self.tree.in_edges()[t1.path[-2:]]['degrees']
theta_2 = self.tree.in_edges()[t2.path[-2:]]['degrees']
theta = abs(theta_1 - theta_2) * np.pi/180
min_angle = np.arccos(np.cos(theta))
return 1 - min_angle / np.pi
| CTrLBenchmark-master | ctrl/transformations/img_rotations.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .identity_transformation import IdentityTransformation
from .img_rotations import ImgRotationTransformationTree
from .noisy_nn_transformation import NoisyNNTransformationTree
from .rainbow_transformation import RainbowTransformationTree
from .randperm_transformation import RandomPermutationsTransformation
from .transformation_tree import RandomNNTransformationTree | CTrLBenchmark-master | ctrl/transformations/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image
from ctrl.transformations.transformation_tree import TransformationTree
from ctrl.transformations.utils import BatchedTransformation
from torchvision.transforms import transforms
def load_or_convert_to_image(img):
if isinstance(img, str):
img = Image.open(img).convert('RGB')
elif isinstance(img, torch.Tensor) or isinstance(img, np.ndarray):
img = F.to_pil_image(img)
assert isinstance(img, Image.Image)
return img
def crop_if_not_square(img, max_size=72):
if min(img.size) > max_size:
img = F.resize(img, max_size, Image.BILINEAR)
if img.size[0] != img.size[1]:
img = F.center_crop(img, min(img.size))
return img
class IdentityTransformation(TransformationTree):
def __init__(self, format_image, *args, **kwargs):
self.format_image = format_image
super(IdentityTransformation, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
node_name = 'Id'
self.leaf_nodes.add(self._node_index[node_name])
self.tree.add_node(self._node_index[node_name], name=node_name)
if self.format_image:
trans = transforms.Compose([
load_or_convert_to_image,
# transforms.ToPILImage(),
crop_if_not_square,
transforms.ToTensor()
])
f = BatchedTransformation(trans)
else:
f = lambda x: x
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name],
f=f)
self.depth = 1
return self._node_index[self.name]
| CTrLBenchmark-master | ctrl/transformations/identity_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class BatchedTransformation(object):
def __init__(self, transfo, descr=None):
self.transfo = transfo
self.descr = descr
def __call__(self, batch):
if torch.is_tensor(batch):
batch = batch.unbind(0)
res = [self.transfo(elt) for elt in batch]
return torch.stack(res, 0)
def __str__(self):
if self.descr is None:
return super().__str__()
else:
return self.descr | CTrLBenchmark-master | ctrl/transformations/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from abc import ABC
from collections import defaultdict
from numbers import Number
import networkx as nx
from torch import nn
from ctrl.commons.tree import Tree
from ctrl.transformations.transformation import Transformation
from ctrl.transformations.transformation_pool import TransformationPool
logger = logging.getLogger(__name__)
class TransformationTree(TransformationPool, Tree, ABC):
def __init__(self, *args, **kwargs):
self._node_index = defaultdict()
self._node_index.default_factory = self._node_index.__len__
super().__init__(*args, **kwargs)
def get_transformation(self, exclude_trans=None, allowed_trans=None):
if exclude_trans is None:
exclude_trans = []
exclude_nodes = [trans.path[-1] for trans in exclude_trans]
if allowed_trans is not None:
allowed_nodes = set(trans.path[-1] for trans in allowed_trans)
else:
allowed_nodes = None
node = self.get_compatible_nodes(exclude_nodes=exclude_nodes,
force_nodes=allowed_nodes,
leaf_only=True)
all_paths = list(nx.all_simple_paths(self.tree, self.root_node, node))
selected_path = random.choice(all_paths)
path_descr = self.get_path_descr(selected_path)
return Transformation(self, selected_path, path_descr)
def transformations_sim(self, t1, t2):
return self.wu_palmer(t1.path[-1], t2.path[-1])
def edit_transformation(self, transformation, min_dist, max_dist):
dist = random.randint(min_dist, max_dist)
old_path = transformation.path.copy()
old_path = old_path[:-dist]
new_candidates = list(nx.all_simple_paths(self.tree, old_path[-1],
self.out_nodes))
selected_path = random.choice(new_candidates)
new_path = old_path + selected_path[1:]
return Transformation(self, new_path)
def get_path_descr(self, path):
return '->'.join([self.tree.nodes[itm]['name'] for itm in path])
class RandomNNTransformationTree(TransformationTree):
def __init__(self, depth, degree, x_dim, z_dim, non_lin, *args, **kwargs):
self.depth = depth
self.n_children = self._format_property(degree)
self.hidden_sizes = self._format_property(x_dim)
self.z_dim = z_dim
if non_lin == 'relu':
self.non_linearity = nn.ReLU
elif non_lin == 'tanh':
self.non_linearity = nn.Tanh
super().__init__(*args, **kwargs)
def _format_property(self, prop):
if isinstance(prop, Number):
prop = [prop]
if len(prop) == 1:
prop = prop * self.depth
assert len(prop) == self.depth
return prop
def build_tree(self):
self._build_tree(self.depth-1, self.n_children, self.name, self.z_dim, self.hidden_sizes)
self.tree.add_node(self._node_index[self.name], name=self.name)
return self._node_index[self.name]
def _build_tree(self, depth, n_children, parent_node, parent_dim, hidden_dims):
for i in range(n_children[0]):
module = nn.Sequential(
nn.Linear(parent_dim, hidden_dims[0]),
self.non_linearity())
node_name = '{}{}'.format(parent_node, i)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[parent_node], self._node_index[node_name], f=module)
if depth > 0:
self._build_tree(depth - 1, n_children[1:], node_name, hidden_dims[0], hidden_dims[1:])
else:
self.leaf_nodes.add(self._node_index[node_name])
| CTrLBenchmark-master | ctrl/transformations/transformation_tree.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ctrl.transformations.transformation_tree import TransformationTree
from ctrl.transformations.utils import BatchedTransformation
from torchvision import transforms
class RandomPermutationsTransformation(TransformationTree):
def __init__(self, n_permutations, x_off, y_off, width, height, flatten,
*args, **kwargs):
self.n_permutations = n_permutations
self.x_off = x_off
self.y_off = y_off
self.width = width
self.height = height
self.flatten = flatten
super(RandomPermutationsTransformation, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
for i in range(self.n_permutations):
node_name = 'permutation_{}'.format(i)
self.leaf_nodes.add(self._node_index[node_name])
perm = RandomPermutation(self.x_off, self.y_off, self.width,
self.height, self.flatten)
trans = transforms.Compose(
[BatchedTransformation(transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor()])),
perm])
# f = BatchedTransformation(trans)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name], f=trans)
self.depth = 1
return self._node_index[self.name]
class RandomPermutation(object):
"""
Applies a constant random permutation to the images.
"""
def __init__(self, x_off=0, y_off=0, width=None, height=None,
flatten=False):
self.x_off = x_off
self.y_off = y_off
self.width = width
self.height = height
self.x_max = x_off + width
self.y_max = y_off + height
self.kernel = torch.randperm(width * height)
self.flatten = flatten
def __call__(self, input):
return rand_perm_(input.clone(), self.x_off, self.y_off, self.x_max,
self.y_max, self.kernel, self.flatten)
def rand_perm_(img, x, y, x_max, y_max, kernel, flatten):
"""
Applies INPLACE the random permutation defined in `kernel` to the image
`img` on the zone defined by `x`, `y`, `x_max`, `y_max`
:param img: Input image of dimension (B*C*W*H)
:param x: offset on x axis
:param y: offset on y axis
:param x_max: end of the zone to permute on the x axis
:param y_max: end of the zone to permute on the y axis
:param kernel: LongTensor of dim 1 containing one value for each point in
the zone to permute
:return: the permuted image.
"""
assert img.dim() == 4
if img.size(1) != 1:
raise NotImplementedError('Not Implemented for multi-channel images')
zone = img[:, :, x:x_max, y:y_max].contiguous()
img[:, :, x:x_max, y:y_max] = zone.view(zone.size(0), -1)\
.index_select(1, kernel).view(zone.size())
return img.view(img.size(0), -1) if flatten else img
| CTrLBenchmark-master | ctrl/transformations/randperm_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import random
from functools import lru_cache
import networkx as nx
class Tree(abc.ABC):
"""
Abstract Tree structure containing basic attributes and methods.
# """
def __init__(self, name, seed=None):
super().__init__()
self.tree = nx.DiGraph()
self.name = name
self.leaf_nodes = set()
self.all_nodes = set()
self._seed = seed
self.rnd = random.Random(self._seed)
self.root_node = self.build_tree()
self._shortest_path_lengths = None
self._shortest_paths = None
@property
def shortest_path_lengths(self):
if self._shortest_path_lengths is None:
self._shortest_path_lengths = dict(
nx.shortest_path_length(self.tree.to_undirected()))
return self._shortest_path_lengths
@property
def shortest_paths(self):
if self._shortest_paths is None:
self._shortest_paths = dict(nx.shortest_path(self.tree))
return self._shortest_paths
@abc.abstractmethod
def build_tree(self):
raise NotImplementedError
@lru_cache(1000)
def lowest_common_ancestor(self, nodes):
"""
Computes the LCA of a bunch of nodes.
:param nodes: tuple of nodes
:return: the Lowest Common Ancestor of the nodes.
"""
# TODO change that using
# networkx::tree_all_pairs_lowest_common_ancestor
cur_lca = nodes[0]
for node in nodes[1:]:
cur_lca = nx.lowest_common_ancestor(self.tree, cur_lca, node)
return cur_lca
def wu_palmer(self, a, b):
"""
Compute the similarity between two nodes in the tree as
sim(a, b) = 2 * depth(lcs(a, b)) / (depth(a) + depth(b))
where lcs(a, b) is the Least Common Subsumer of two nodes (aka
Lowest Common Ancestor).
https://www.geeksforgeeks.org/nlp-wupalmer-wordnet-similarity/
"""
lcs = self.lowest_common_ancestor((a, b))
depth_lcs = self.shortest_path_lengths[self.root_node][lcs]
depth_a = self.shortest_path_lengths[self.root_node][a]
depth_b = self.shortest_path_lengths[self.root_node][b]
return 2 * depth_lcs / (depth_a + depth_b)
def get_compatible_nodes(self, N=None, exclude_nodes=None, leaf_only=False,
preferred_lca_dist=-1, max_lca_dist=-1,
force_nodes=None,):
"""
Searches for N compatible nodes in the tree
:param N: Int, Number of nodes to select
:param exclude_nodes: Iterable, Nodes already selected, their parents and children will also be excluded from the search.
:param leaf_only: Bool, the selection will be made only on leaf concepts if True.
:return: A List of N compatible nodes.
:raises ValueError: If the selection is not possible.
"""
if exclude_nodes is None:
exclude_nodes = set()
else:
exclude_nodes = set(exclude_nodes)
if force_nodes is None:
available_nodes = self.leaf_nodes if leaf_only else self.all_nodes
else:
available_nodes = force_nodes
for node in exclude_nodes:
children = nx.dfs_preorder_nodes(self.tree, node)
parents = self.shortest_paths[self.root_node][node]
available_nodes = available_nodes.difference(children, parents)
if max_lca_dist == -1:
max_lca_dist = self.depth
cur_max_lca_dist = preferred_lca_dist
while cur_max_lca_dist <= max_lca_dist:
try:
return self._get_compatible_nodes(N,
available_nodes,
cur_max_lca_dist)
except ValueError:
# The problem doesn't have any solution, we will try with a
# broader search space if possible.
cur_max_lca_dist += 1
raise ValueError("Impossible to find new compatible Nodes")
def _get_compatible_nodes(self, N, candidates, max_lca_dist):
"""
Searches for a list of N compatible nodes from the list `candidates`.
Two nodes are compatible if they don't have a parent/descendants
relationship at any degress (i.e. there is no path going from A to B
nor from B to A in the tree).
A set of Nodes are compatible if every pair of nodes from the set is
compatible.
:param N: Number of compatible nodes to select.
:param candidates: List or Set of nodes from which the selection will be made.
:return: A list of compatible nodes of length N
:raises ValueError: If no such combination exists.
"""
# Make a copy of the list from which we will pick elements.
cur_candidates = list(candidates)
if N is None or N == 1:
# We are done, we initialize the result with a randomly selected
# element
return [self.rnd.choice(cur_candidates)]
# Before each trial, make sure that the problem can be solved
while len(cur_candidates) >= N:
cur_node = self.rnd.choice(cur_candidates)
descendants = nx.dfs_preorder_nodes(self.tree, cur_node)
ancestors = self.shortest_paths[self.root_node][cur_node]
# Remove the ancestors and descendants nodes from the candidates
# for the rest of the selection process
new_candidates = candidates.difference(descendants, ancestors)
if max_lca_dist != -1:
new_candidates = self.filter_candidates(new_candidates,
cur_node,
max_lca_dist)
try:
# Try to solve the sub problem using the updated candidates
other_nodes = self._get_compatible_nodes(N - 1, new_candidates,
max_lca_dist)
# We have a solution for the sub-problem, we add the current
# node to this solution
other_nodes.append(cur_node)
return other_nodes
except (ValueError, IndexError):
# There is no solution possible for the sub problem, we will
# try another node at current level
cur_candidates.remove(cur_node)
# The problem doesn't have any solution
raise ValueError("Impossible to find new compatible Nodes")
def filter_candidates(self, candidates, selected, max_lca_dist):
path_to_selected = self.shortest_paths[self.root_node][selected]
max_lca = path_to_selected[-(max_lca_dist + 1)]
max_lca_children = nx.dfs_preorder_nodes(self.tree, max_lca)
return candidates.intersection(max_lca_children)
| CTrLBenchmark-master | ctrl/commons/tree.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common components shared across the project.
""" | CTrLBenchmark-master | ctrl/commons/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import plotly
def plotly_rgb_to_hex(rgb_colors):
"""
Convert a list of RGB strings in the format used by plotly ("rgb(<R>,<G>,<B>") to a list of hexadecimal codes.
:param rgb_colors: List of RGB integer strings in the format ["rgb(255,0,0)", "rgb(0,255,0)", ...]
:return: List of corresponding hex code strings ["#ff0000", "#00ff00", ...]
"""
# Get the R, G and B components for each string
color_codes = plotly_rgb_values(rgb_colors)
# Format each rgb code in hex
colors = ['#{:x}{:x}{:x}'.format(*cc) for cc in color_codes]
return colors
def hex_to_rgb(hex_colors,):
"""
"""
res = []
for color in hex_colors:
res.append([int(color[i:i+2], 16) for i in (1, 3, 5)])
return res
def rgba_to_pl(rgb_color, alpha=False):
"""
"""
# res = []
# for color in rgb_colors:
# if not alpha:
# color = color[:3]
return '#{:x}{:x}{:x}'.format(*rgb_color)
def plotly_rgb_values(rgb_colors):
rgb_values = []
for color in rgb_colors:
vals = re.findall("rgb\(([0-9]+)\,\s?([0-9]+)\,\s?([0-9]+)\)", color)[0]
vals = [int(val) for val in vals]
rgb_values.append(vals)
return rgb_values
default_colors = plotly_rgb_to_hex(plotly.colors.DEFAULT_PLOTLY_COLORS)
rainbow_colors = plotly_rgb_values(map(lambda col: col[1], plotly.colors.PLOTLY_SCALES['Rainbow']))
__all__ = [default_colors, rainbow_colors]
| CTrLBenchmark-master | ctrl/commons/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ctrl.strategies.task_creation_strategy import TaskCreationStrategy
class LabelPermutationStrategy(TaskCreationStrategy):
def __init__(self, *args, **kwargs):
super(LabelPermutationStrategy, self).__init__(*args, **kwargs)
def new_task(self, concepts, transformations, previous_tasks, n_samples_per_class):
if not previous_tasks:
return self._create_new_task(concepts, transformations)
prev_task = previous_tasks[-1]
new_concepts = prev_task.src_concepts.copy()
self.rnd.shuffle(new_concepts)
return new_concepts, prev_task.attributes, prev_task.transformation
| CTrLBenchmark-master | ctrl/strategies/label_permutation_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ctrl.strategies.task_creation_strategy import TaskCreationStrategy
class MixedStrategy(TaskCreationStrategy):
def __init__(self, strategies, random_select, *args, **kwargs):
super().__init__(*args, **kwargs)
self.strategies = strategies
self.strategies_list = list(strategies.values())
# self.last_strat = None
self.random_select = random_select
assert not self.random_select
self.idx = 0
def new_task(self, task_spec, concepts, transformations, previous_tasks):
for strat in self.strategies_list:
task_spec = strat.new_task(task_spec, concepts, transformations,
previous_tasks)
return task_spec
def descr(self):
return 'Mixed<{}>'.format(list(self.strategies.keys()))
| CTrLBenchmark-master | ctrl/strategies/mixed_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ctrl.strategies.task_creation_strategy import TaskCreationStrategy
from ctrl.transformations.identity_transformation import \
load_or_convert_to_image, crop_if_not_square
from ctrl.transformations.utils import BatchedTransformation
from torchvision.transforms import transforms
class InputDomainMutationStrategy(TaskCreationStrategy):
def __init__(self, min_edit, max_edit, with_replacement, trans_trajectory,
*args, **kwargs):
super(InputDomainMutationStrategy, self).__init__(*args, **kwargs)
self.min_edit = min_edit
self.max_edit = max_edit
self.with_replacement = with_replacement
self.trans_trajectory = trans_trajectory
self.idx = 0
def new_task(self, task_spec, concepts, trans, previous_tasks):
cur_task_id = self.idx
if self.trans_trajectory is not None:
cur_trans_id = self.trans_trajectory[cur_task_id]
first_usage = self.trans_trajectory.index(cur_trans_id)
if first_usage < cur_task_id:
allowed_trans = [previous_tasks[first_usage].transformation]
exclude = None
else:
allowed_trans = None
exclude = [t.transformation for t in previous_tasks if
hasattr(t.transformation, 'path')]
else:
exclude = None if self.with_replacement else \
[t.transformation for t in previous_tasks]
allowed_trans = None
if self.trans_trajectory and cur_trans_id == None:
trans = transforms.Compose([
load_or_convert_to_image,
# transforms.ToPILImage(),
crop_if_not_square,
transforms.ToTensor()
])
new_transfo = BatchedTransformation(trans, 'Identity')
elif self.min_edit < 0 or self.max_edit < 0:
new_transfo = trans.get_transformation(exclude_trans=exclude,
allowed_trans=allowed_trans)
else:
new_transfo = trans.edit_transformation(task_spec.transformation,
self.min_edit,
self.max_edit)
task_spec.transformation = new_transfo
self.idx += 1
return task_spec
| CTrLBenchmark-master | ctrl/strategies/input_domain_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from ctrl.concepts.concept import ComposedConcept
from ctrl.strategies.input_domain_strategy import TaskCreationStrategy
logger = logging.getLogger(__name__)
class MutationException(Exception):
pass
class RandomMutationStrategy(TaskCreationStrategy):
def __init__(self, operations, p_mutate, p_last, n_attr_min, n_attr_max, *args, **kwargs):
super().__init__(*args, **kwargs)
self.p_mutate = p_mutate
self.p_last = p_last
self.n_attr_min = n_attr_min
self.n_attr_max = n_attr_max
self._all_operators = {'add': self.add_category,
'remove': self.remove_category,
'merge': self.merge_classes,
'split': self.split_classes,
'input_transform': self.random_transformation}
if operations == 'all':
self.operations = list(self._all_operators.keys())
else:
assert all(op in self._all_operators for op in operations)
self.operations = operations
def new_task(self, concepts, transformations, previous_tasks, n_samples_per_class):
mutate = self.rnd.random() < self.p_mutate
if previous_tasks and mutate:
old_task = self.choose_task(previous_tasks)
try:
next_task = self.mutate_task(old_task, concepts, transformations)
except MutationException:
mutate = False
if not mutate or not previous_tasks:
logger.info('Creating new task from scratch')
n_attributes = self.rnd.randint(self.n_attr_min, self.n_attr_max)
next_task = self._create_new_task(concepts, transformations, n_attributes)
return next_task
def choose_task(self, task_pool):
assert task_pool, "Can't choose a task from empty pool"
if self.rnd.random() < self.p_last:
return task_pool[-1]
else:
return self.rnd.choice(task_pool)
def mutate_task(self, task, concept_pool, transfo_pool, samples_per_class):
logger.info('Mutate existing task: "{}"'.format(task))
new_task = None
avail_ops = self.operations.copy()
while avail_ops and new_task is None:
op = self.rnd.choice(avail_ops)
avail_ops.remove(op)
mutation_func = self._all_operators[op]
try:
new_task = mutation_func(task, concept_pool, transfo_pool, samples_per_class)
new_task.creator = op
except MutationException as e:
logger.exception(e)
if new_task is None:
raise MutationException("Impossible to mutate this Task")
return new_task
def add_category(self, task, concept_pool, transformation_pool):
logger.info('Trying to add_category')
concepts = list(task.concepts)
new_concept = concept_pool.get_compatible_concepts(exclude_concepts=concepts)
if new_concept is None:
raise MutationException('No compatible concept found.')
new_category = tuple(new_concept)
categories = task.src_concepts + [new_category]
return categories, task.attributes, task.transformation
def remove_category(self, task, concept_pool, transformation_pool):
logger.info('Trying to remove_category')
categories = task.src_concepts
if len(categories) < 3:
raise MutationException('Not enough classes to remove one.')
choice = self.rnd.choice(categories)
categories.remove(choice)
return categories, task.attributes, task.transformation
def merge_classes(self, task, concept_pool, transformation_pool):
logger.info('Trying to merge_classes')
categories = task.src_concepts
if len(categories) < 3:
raise MutationException('Not enough classes to merge.')
new_categories = concept_pool.merge_closest_categories(categories)
if new_categories is None:
raise MutationException('No couple of categories can be merged.')
return new_categories, task.attributes, task.transformation
def split_classes(self, task, concept_pool, transformation_pool):
logger.info('Trying to split_classes')
categories = task.src_concepts
split_candidates = concept_pool.get_widest_categories(categories)
def is_valid(candidate):
# A category can only be split if it contains several concepts or a higher level ComposedConcept
return len(candidate) > 1 or isinstance(candidate[0], ComposedConcept)
split_candidates = list(filter(is_valid, split_candidates))
if not split_candidates:
raise MutationException('No category can be split.')
cat_to_split = self.rnd.choice(split_candidates)
logger.info("splitting {}".format(cat_to_split))
new_categories = concept_pool.split_category(cat_to_split)
new_categories = [tuple(cat) for cat in new_categories]
categories.remove(cat_to_split)
categories.extend(new_categories)
return categories, task.attributes, task.transformation
def random_transformation(self, task, concept_pool, transformation_pool):
logger.info('Trying to random_transformation')
new_transformation = transformation_pool.get_transformation()
return task.src_concepts, task.attributes, new_transformation
| CTrLBenchmark-master | ctrl/strategies/random_mutation_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .incremental_strategy import IncrementalStrategy
from .input_domain_strategy import InputDomainMutationStrategy
from .label_permutation_strategy import LabelPermutationStrategy
from .random_mutation_strategy import RandomMutationStrategy
from .split_strategy import SplitStrategy
from .attributes_strategy import AttributeStrategy
from .data_strategy import DataStrategy
from .mixed_strategy import MixedStrategy
from .task_creation_strategy import TaskCreationStrategy
| CTrLBenchmark-master | ctrl/strategies/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import logging
import random
logger = logging.getLogger(__name__)
class TaskCreationStrategy(abc.ABC):
def __init__(self, domain, seed,
concepts_preferred_lca_dist, concepts_max_lca_dist):
self.rnd = random.Random(seed)
self.domain = domain
self.concepts_preferred_lca_dist = concepts_preferred_lca_dist
self.concepts_max_lca_dist = concepts_max_lca_dist
@abc.abstractmethod
def new_task(self, task_spec, concepts, transformations, previous_tasks):
raise NotImplementedError
def descr(self):
return type(self).__name__
| CTrLBenchmark-master | ctrl/strategies/task_creation_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ctrl.strategies.task_creation_strategy import TaskCreationStrategy
class IncrementalStrategy(TaskCreationStrategy):
def __init__(self, new_classes_per_task, *args, **kwargs):
super().__init__(*args, **kwargs)
self.new_classes_per_task = new_classes_per_task
def new_task(self, concepts, transformations, previous_tasks, n_samples_per_class):
if not previous_tasks:
return self._create_new_task(concepts, transformations)
prev_task = previous_tasks[-1]
new_concepts = concepts.get_compatible_concepts(self.new_classes_per_task, prev_task.concepts, True)
concepts = prev_task.src_concepts + [[c] for c in new_concepts]
return concepts, prev_task.attributes, prev_task.transformation
| CTrLBenchmark-master | ctrl/strategies/incremental_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ctrl.strategies.task_creation_strategy import TaskCreationStrategy
class DataStrategy(TaskCreationStrategy):
def __init__(self, n_samples_per_class_options, random,
with_replacement, max_samples, min_samples, decay_rate, steps,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.n_samples_per_class_options = n_samples_per_class_options
self.random = random
self.with_replacement = with_replacement
if self.random and not self.with_replacement:
self.rnd.shuffle(self.n_samples_per_class_options)
self.max_samples = max_samples
self.min_samples = min_samples
self.decay_rate = decay_rate
self.steps = steps
self.idx = 0
def new_task(self, task_spec, concepts, transformations, previous_tasks):
if self.steps is not None:
n_samples = self._get_n_samples_schedule(len(previous_tasks))
elif self.max_samples is not None:
n_samples = self._decay_n_samples(len(previous_tasks))
else:
n_samples = self._get_n_samples_classic()
self.idx += 1
if isinstance(n_samples, int):
# If a single number is provided, it corresponds to the trains set
# size. We need to add the default sizes for remaining sets.
n_samples = [n_samples]
if len(n_samples) != len(task_spec.n_samples_per_class):
n_samples = [*n_samples,
*task_spec.n_samples_per_class[len(n_samples):]]
task_spec.n_samples_per_class = n_samples
return task_spec
def _get_n_samples_classic(self):
if self.with_replacement and self.random:
n_samples = self.rnd.choice(self.n_samples_per_class_options)
elif self.with_replacement:
# We use replacement but without random selection: we cycle through
# the list of options
idx = self.idx % len(self.n_samples_per_class_options)
n_samples = self.n_samples_per_class_options[idx]
else:
assert self.n_samples_per_class_options, 'Not enough data options'
n_samples = self.n_samples_per_class_options.pop(0)
return n_samples
def _decay_n_samples(self, t):
n_samples = self.max_samples * np.exp(-self.decay_rate * t)
res = [int(round(n_samples)), int(round(n_samples/2))]
print(f'Using {res} samples')
return res
def _get_n_samples_schedule(self, t):
cur_idx = 0
# next_step = self.n_samples_per_class_options[0]
while cur_idx < len(self.steps) and t >= self.steps[cur_idx]:
cur_idx += 1
print(f"CHOOSING FROM {self.n_samples_per_class_options[cur_idx]}")
return self.rnd.choice(self.n_samples_per_class_options[cur_idx]) | CTrLBenchmark-master | ctrl/strategies/data_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ctrl.strategies.input_domain_strategy import TaskCreationStrategy
class AttributeStrategy(TaskCreationStrategy):
def __init__(self, n_attrs_per_task, resample_classes, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_attrs_per_task = n_attrs_per_task
self.resample_classes = resample_classes
def new_task(self, concept_pool, transformations, previous_tasks, n_samples_per_class):
if not previous_tasks:
concepts, (use_cat_id, attrs), transfo = \
self._create_new_task(concept_pool, transformations, self.n_attrs_per_task)
else:
prev_attrs = set(attr for t in previous_tasks for attr in t.attributes[1])
avail_attrs = set(range(len(concept_pool.attributes))).difference(prev_attrs)
n_avail_attrs = len(avail_attrs)
if self.n_attrs_per_task > n_avail_attrs:
raise ValueError(
'Can\'t select {} attributes, only {} available'.format(
self.n_attrs_per_task, n_avail_attrs))
attributes = self.rnd.sample(list(avail_attrs), self.n_attrs_per_task)
prev_task = previous_tasks[-1]
if self.resample_classes:
concepts = concept_pool.get_compatible_concepts(
self.n_initial_classes,
leaf_only=True,
preferred_lca_dist=self.concepts_preferred_lca_dist,
max_lca_dist=self.concepts_max_lca_dist)
concepts = [(c,) for c in concepts]
else:
concepts = prev_task.src_concepts
use_cat_id = self.use_cat_id
attrs = attributes
transfo = prev_task.transformation
if not use_cat_id:
assert len(attrs) == 1, "Should have at max one attribute " \
"when not using the category ids, " \
"otherwise it's unclear what a class" \
" is (for n samples per class)."
return concepts, (use_cat_id, attrs), transfo
| CTrLBenchmark-master | ctrl/strategies/attributes_strategy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import networkx as nx
import numpy as np
from ctrl.strategies.task_creation_strategy import TaskCreationStrategy
class SplitStrategy(TaskCreationStrategy):
def __init__(self, reuse_attrs, with_replacement, traj=None,
first_level_weighting=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reuse_attrs = reuse_attrs
self.with_replacement = with_replacement
assert first_level_weighting in [None, 'class_uniform', 'ds_uniform']
self.first_level_weighting = first_level_weighting
self.traj = traj
self.idx = 0
self.concept_order = {}
self.all_used_concepts = []
def new_task(self, task_spec, concepts, transformations, previous_tasks):
if self.with_replacement:
old_concepts = set()
else:
old_concepts = {c for task in previous_tasks
for c in task.concepts}
if self.traj:
traj_step = self.traj[self.idx]
if traj_step[0] in self.concept_order:
assert all(itm in self.concept_order for itm in traj_step)
new_concepts = [self.concept_order[id] for id in traj_step]
else:
assert not any(itm in self.concept_order for itm in traj_step)
if isinstance(traj_step[0], str) and \
all([isinstance(step, int) for step in traj_step[1:]]):
branch = traj_step.pop(0)
nodes = None
elif all(isinstance(step, str) for step in traj_step):
branch = None
nodes = traj_step
else:
branch = None
nodes = None
new_concepts = concepts.get_compatible_concepts(len(traj_step),
old_concepts, True,
preferred_lca_dist=self.concepts_preferred_lca_dist,
max_lca_dist=self.concepts_max_lca_dist,
branch=branch, nodes=nodes)
for id, concept in zip(traj_step, new_concepts):
self.concept_order[id] = concept
# for id in traj_step:
# if id not in self.concept_order:
# new = concepts.get_compatible_concepts(
# 1, self.all_used_concepts, True,
# preferred_lca_dist=self.concepts_preferred_lca_dist,
# max_lca_dist=self.concepts_max_lca_dist)[0]
# self.all_used_concepts.append(new)
# self.concept_order[id] = new
# new_concepts.append(self.concept_order[id])
elif self.first_level_weighting is not None:
tree = concepts.tree
if self.first_level_weighting == 'class_uniform':
classes_per_ds = defaultdict(int)
for branch in tree.successors(concepts.root_node):
for node in nx.dfs_preorder_nodes(tree, branch):
if tree.out_degree(node) == 0:
classes_per_ds[branch] += 1
# for k, v in classes_per_ds.items():
# classes_per_ds[k] = 1 / v
n_tot = sum(classes_per_ds.values())
for k, v in classes_per_ds.items():
classes_per_ds[k] = v / n_tot
branches = list(classes_per_ds.keys())
probas = list(classes_per_ds.values())
else:
branches = list(tree.successors(concepts.root_node))
probas = [1/len(branches)]*len(branches)
b = np.random.choice(a=branches, size=1, p=probas)[0].descriptor
n_classes = len(task_spec.src_concepts)
new_concepts = concepts.get_compatible_concepts(n_classes,
old_concepts, True,
preferred_lca_dist=self.concepts_preferred_lca_dist,
max_lca_dist=self.concepts_max_lca_dist,
branch=b)
else:
n_classes = len(task_spec.src_concepts)
new_concepts = concepts.get_compatible_concepts(n_classes,
old_concepts, True,
preferred_lca_dist=self.concepts_preferred_lca_dist,
max_lca_dist=self.concepts_max_lca_dist)
new_concepts = [(c,) for c in new_concepts]
self.idx += 1
task_spec.src_concepts = new_concepts
if not self.reuse_attrs:
task_spec.attributes = (task_spec.attributes[0], [])
return task_spec
| CTrLBenchmark-master | ctrl/strategies/split_strategy.py |
"""
This module contains a bunch of code extracted from
https://github.com/TomVeniat/MNTDP in order to allow the usage of automatic
configuration and initialization on the CTrL benchmark.
"""
import collections
import os
from os import path
from copy import deepcopy
import yaml
from numpy.random import default_rng
from ctrl.instances.image_dataset_tree import ImageDatasetTree
from ctrl.instances.md_tree import MultiDomainDatasetTree
from ctrl.strategies import InputDomainMutationStrategy, SplitStrategy, \
IncrementalStrategy, RandomMutationStrategy, DataStrategy, \
AttributeStrategy, MixedStrategy, LabelPermutationStrategy
from ctrl.tasks.task_generator import TaskGenerator
from ctrl.transformations import RandomNNTransformationTree, \
ImgRotationTransformationTree, RandomPermutationsTransformation, \
IdentityTransformation, NoisyNNTransformationTree, \
RainbowTransformationTree
def get_component_by_name(name):
if name in ['cifar10_tree', 'cifar100_tree', 'mnist_tree', 'svhn_tree',
'fashion_mnist_tree', 'dtd_tree', 'aircraft_tree']:
return ImageDatasetTree
if name.startswith('md_tree'):
return MultiDomainDatasetTree
if name == 'nn_x_transformation':
return RandomNNTransformationTree
if name == 'img_rot_x_transformation':
return ImgRotationTransformationTree
if name == 'randperm_x_transformation':
return RandomPermutationsTransformation
if name == 'id_x_transformation':
return IdentityTransformation
if name == 'noisy_nn_x_transformation':
return NoisyNNTransformationTree
if name == 'rainbow_x_transformation':
return RainbowTransformationTree
if name == 'transfo':
return InputDomainMutationStrategy
if name == 'split':
return SplitStrategy
if name == 'incremental':
return IncrementalStrategy
if name == 'random':
return RandomMutationStrategy
if name == 'data':
return DataStrategy
if name == 'attributes':
return AttributeStrategy
if name.startswith('mixed'):
return MixedStrategy
if name == 'label_permut':
return LabelPermutationStrategy
if name == 'task_gen':
return TaskGenerator
raise NotImplementedError(name)
def load_yaml(filename):
with open(filename, "r") as f:
return yaml.load(f, Loader=yaml.FullLoader)
def recursive_update(d, u):
"""
From Sacred (https://github.com/IDSIA/sacred).
Given two dictionaries d and u, update dict d recursively.
E.g.:
d = {'a': {'b' : 1}}
u = {'c': 2, 'a': {'d': 3}}
=> {'a': {'b': 1, 'd': 3}, 'c': 2}
"""
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def load_component_default_config(component_config, all_default_configs):
component_default_config = {}
if '_name' in component_config:
elt_default = deepcopy(all_default_configs.get(
component_config['_name'], {}))
default = load_component_default_config(elt_default,
all_default_configs)
recursive_update(default, elt_default)
component_default_config.update(default)
for key, val in component_config.items():
if isinstance(val, dict):
conf = load_component_default_config(val, all_default_configs)
if conf:
component_default_config[key] = conf
return component_default_config
def load_default_config(config):
fn = path.join(path.dirname(__file__), 'default_datasets.yaml')
# fn = f'./streams/default_datasets.yaml'
if os.path.isfile(fn):# and ('_name' not in comp_conf):
comp_default_configs = load_yaml(fn)
if '_name' in config:
comp_default_config = load_component_default_config(config, comp_default_configs)
else:
comp_default_config = {}
for mod, mod_config in config.items():
if isinstance(mod_config, dict):
comp_default_config[mod] = load_component_default_config(mod_config, comp_default_configs)
return comp_default_config
def init_component(_rnd, **kwargs):
for k, v in kwargs.items():
if isinstance(v, dict):
v = init_component(_rnd=_rnd, **v)
kwargs[k] = v
if '_name' in kwargs:
comp_class = get_component_by_name(kwargs.pop('_name'))
return comp_class(seed=_rnd.integers(0, 1e9), **kwargs)
else:
return kwargs
def get_stream(name, seed=None):
config_path = path.join(path.dirname(__file__), f'{name}.yaml')
stream_config = load_yaml(config_path)
config = load_default_config(stream_config)
recursive_update(config, stream_config)
return init_component(default_rng(seed), **config)['task_gen']
| CTrLBenchmark-master | ctrl/streams/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.