python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xpress as xp
# Wrap is the xpress solver (https://pypi.org/project/xpress/, doc available at
# https://www.fico.com/fico-xpress-optimization/docs/latest/solver/optimizer/python/HTML/GUID-616C323F-05D8-3460-B0D7-80F77DA7D046.html)
class XpressSolver:
def __init__(self, timeout_s=None):
self.vars = []
self.constraints = []
self.maximize = True
self.timeout = timeout_s
self.pd_gap = (None, None)
self.pd_integral = None
def create_integer_var(self, name, lower_bound, upper_bound):
v = xp.var(name=name, lb=lower_bound, ub=upper_bound, vartype=xp.integer)
self.vars.append(v)
return v
def create_real_var(self, name, lower_bound, upper_bound):
v = xp.var(name=name, lb=lower_bound, ub=upper_bound, vartype=xp.continuous)
self.vars.append(v)
return v
def create_binary_var(self, name):
v = xp.var(name=name, vartype=xp.binary)
self.vars.append(v)
return v
def set_objective_function(self, equation, maximize):
self.of = equation
self.maximize = maximize
def add_constraint(self, cns):
self.constraints.append(cns)
def disable_presolver(self):
# TBD
pass
def disable_cuts(self):
# TBD
pass
def disable_heuristics(self):
# TBD
pass
def solve(self):
# Solve the problem. Return the result as a dictionary of values
# indexed by the corresponding variables or an empty dictionary if the
# problem is infeasible.
p = self.as_xpress_problem()
# Make sure the problem is feasible
if p.iisfirst(0) == 0:
raise RuntimeError("Problem is not feasible")
# Solve and return the values for all the variables.
if self.timeout:
p.controls.maxtime = self.timeout
p.solve()
result = {}
for v in self.vars:
result[v] = p.getSolution(v)
# Record the value of the primal dual gap.
self.pd_gap = (p.getAttrib("mipbestobjval"), p.getAttrib("bestbound"))
self.pd_integral = p.getAttrib("primaldualintegral")
return result
def primal_dual_gap(self):
return self.pd_gap
def primal_dual_integral(self):
return self.pd_integral
def load(self, mps_filename):
# Not supported yet.
assert False
def export(self, lp_output_filename):
p = xp.problem(self.vars, self.of, self.constraints)
if self.maximize:
p.chgobjsense(xp.maximize)
else:
p.chgobjsense(xp.minimize)
p.write(lp_output_filename, "lp")
def as_xpress_problem(self):
p = xp.problem(self.vars, self.of, self.constraints)
if self.maximize:
p.chgobjsense(xp.maximize)
else:
p.chgobjsense(xp.minimize)
return p
| CL-LNS-main | xpress_solver.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import os.path
import tarfile
import zipfile
import ecole
import geco
import geco.generator
import glob
import re
import json
import pyscipopt
import hashlib
import string
import random
import pyscipopt
class InstanceLoader:
LOCAL_INSTANCE = {
"INDSET_test": "instances/INDSET_ER_6000/instance_ER4_*.cip",
"INDSET_train": "instances/INDSET_ER_6000/train/train_instance_ER4_*.cip",
}
ECOLE = {
# The settings are taken from the Gasse paper
# (https://papers.nips.cc/paper/2019/file/d14c2267d848abeb81fd590f371d39bd-Paper.pdf)
"SET_COVER_EASY": ecole.instance.SetCoverGenerator(n_rows=500, n_cols=500, density=0.05),
"SET_COVER_MEDIUM": ecole.instance.SetCoverGenerator(n_rows=500, n_cols=1000, density=0.05),
"SET_COVER_HARD": ecole.instance.SetCoverGenerator(n_rows=500, n_cols=2000, density=0.05),
"INDEPENDENT_SET_EASY": ecole.instance.IndependentSetGenerator(n_nodes=500),
"INDEPENDENT_SET_MEDIUM": ecole.instance.IndependentSetGenerator(n_nodes=1000),
"INDEPENDENT_SET_HARD": ecole.instance.IndependentSetGenerator(n_nodes=1500),
"AUCTION_EASY": ecole.instance.CombinatorialAuctionGenerator(n_items=100, n_bids=500),
"AUCTION_MEDIUM": ecole.instance.CombinatorialAuctionGenerator(n_items=200, n_bids=1000),
"AUCTION_HARD": ecole.instance.CombinatorialAuctionGenerator(n_items=300, n_bids=1500),
"FACILITY_EASY": ecole.instance.CapacitatedFacilityLocationGenerator(n_facilities=100, n_customers=100),
"FACILITY_MEDIUM": ecole.instance.CapacitatedFacilityLocationGenerator(n_facilities=100, n_customers=200),
"FACILITY_HARD": ecole.instance.CapacitatedFacilityLocationGenerator(n_facilities=100, n_customers=400),
}
GECO = {
# Instances from the GeCO generator
"KNAPSACK_UC": lambda seed: geco.mips.knapsack.pisinger.uncorrelated(n=1974, c=2864, seed=seed),
"KNAPSACK_WC": lambda seed: geco.mips.knapsack.pisinger.weakly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_SC": lambda seed: geco.mips.knapsack.pisinger.strongly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_ISC": lambda seed: geco.mips.knapsack.pisinger.inverse_strongly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_ASC": lambda seed: geco.mips.knapsack.pisinger.almost_strongly_correlated(n=1974, c=2864, seed=seed),
"KNAPSACK_SUBSET_SUM": lambda seed: geco.mips.knapsack.pisinger.subset_sum(n=1974, c=2864, seed=seed),
"KNAPSACK_UWSW": lambda seed: geco.mips.knapsack.pisinger.uncorrelated_with_similar_weights(n=1974, c=2864, seed=seed),
#"KNAPSACK_SPANNER": lambda seed: geco.mips.knapsack.pisinger.spanner(v=345, m=2, n=995, distribution=**uncorrelated_distribution(), capacity=1720, seed=seed),
"KNAPSACK_PROFIT_CEILING": lambda seed: geco.mips.knapsack.pisinger.profit_ceiling(n=2974, c=1864, d=1.5, seed=seed),
"KNAPSACK_CIRCLE": lambda seed: geco.mips.knapsack.pisinger.circle(n=1974, c=2864, seed=seed),
"KNAPSACK_MSC": lambda seed: geco.mips.knapsack.pisinger.multiple_strongly_correlated(n=1974, c=2864, k1=1, k2=2, d=3, seed=seed),
"KNAPSACK_YANG": lambda seed: geco.mips.knapsack.yang.yang_instance(n=2368, seed=seed),
"SCHEDULING_HEINZ": lambda seed: geco.mips.scheduling.heinz.heinz_instance(number_of_facilities=43, number_of_tasks=114, seed=seed),
"SCHEDULING_HOOKER": lambda seed: geco.mips.scheduling.hooker.hooker_instance(number_of_facilities=23, number_of_tasks=73, time_steps=25, seed=seed),
"SET_PACKING": lambda seed: geco.mips.set_packing.yang.yang_instance(m=734, seed=seed),
"SET_COVER_SUN": lambda seed: geco.mips.set_cover.sun.sun_instance(n=1927, m=1467, seed=seed),
"SET_COVER_YANG": lambda seed: geco.mips.set_cover.yang.yang_instance(m=1513, seed=seed),
#"PRODUCTION_PLANNING": lambda seed: geco.mips.production_planning.tang.tang_instance(T=5, seed=seed),
"MAX_INDEPENDENT_SET": lambda seed: geco.mips.independent_set.barabasi_albert.barabasi_albert_instance(m=10, n=100, seed=seed),
"MAX_CUT": lambda seed: geco.mips.max_cut.tang.tang_instance(n=593, m=684, seed=seed),
"PACKING": lambda seed: geco.mips.packing.tang.tang_instance(n=321, m=428, seed=seed),
#"GRAPH_COLORING": lambda seed: geco.mips.graph_coloring.generic.assigment(seed=seed),
#"FACILITY_CORNUEJOLS": lambda seed: geco.mips.facility_location.cornuejols.cornuejols_instance(n_customers=385, n_facilities=683, ratio=.95, seed=seed),
}
GECO_MIPLIB = {
"MIPLIB_BENCHMARK": geco.mips.loading.miplib.benchmark_instances(),
"MIPLIB_EASY": geco.mips.loading.miplib.easy_instances(),
"MIPLIB_HARD": geco.mips.loading.miplib.hard_instances(),
"MIPLIB_OPEN": geco.mips.loading.miplib.open_instances(),
#"ORLIB": geco.mips.loading.orlib_load_instance(),
}
DATASETS = {
"BCOL": "mip_BCOL-CLS.tar.gz",
"CORLAT": "mip_COR-LAT.tar.gz",
"MIPLIB": "collection.zip",
"MIPLIB_FILTERED": "collection.zip",
"RCW2": "mip_RCW2.tar.gz",
"Regions200": "mip_Regions200.tar.gz",
}
COMPETITION = {
"ANONYMOUS": "anonymous.tar.gz",
"ITEM_PLACEMENT": "item_placement.tar.gz",
"LOAD_BALANCING": "load_balancing.tar.gz",
}
#DFLT_TMP_FILE_LOC = "/tmp/" + str(os.geteuid()) + "/"
#DFLT_TMP_FILE_LOC = "/tmp/" + str(2575) + "/"
DFLT_TMP_FILE_LOC = ""
def __init__(self, dataset_loc = "", tmp_file_loc = DFLT_TMP_FILE_LOC, mode="*", repeat=False, presolve=True, competition_settings=True, load_metadata=False, shard=0, shard_count=0, pprocess = False):
dataset_loc = os.path.expanduser(dataset_loc)
#try:
# os.mkdir(tmp_file_loc)
#except FileExistsError:
# pass
self.dataset_loc = dataset_loc
self.tmp_file_loc = tmp_file_loc
self.mode = mode
self.repeat = repeat
self.presolve = presolve
self.competition_settings=competition_settings
self.load_metadata = load_metadata
self.shard = shard
self.shard_count = shard_count
self.filtered_instances = []
self.post_process = pprocess
assert shard >= 0
assert (shard < shard_count or shard_count == 0)
@staticmethod
def hash_model(model):
letters = string.ascii_letters
tmp_file = '/tmp/' + ''.join(random.choice(letters) for i in range(10)) + '.lp'
model.as_pyscipopt().writeProblem(tmp_file)
with open(tmp_file, 'r') as f:
problem = f.read()
problem = problem.encode()
key = hashlib.blake2s(problem, digest_size=4).hexdigest()
return key
def load(self, dataset_name):
if not self.repeat:
for m in self.load_datasets(dataset_name):
yield m
else:
while True:
for m in self.load_datasets(dataset_name):
yield m
def load_datasets(self, dataset_name):
datasets = dataset_name.split('+')
for d in datasets:
for m in self.load_once(d):
yield m
print(self.filtered_instances)
def load_once(self, dataset_name):
if dataset_name in self.ECOLE:
return self.load_ecole(dataset_name)
elif dataset_name in self.GECO:
return self.load_geco(dataset_name)
elif dataset_name in self.GECO_MIPLIB:
return self.load_geco_miplib(dataset_name)
elif dataset_name in self.LOCAL_INSTANCE:
return self.load_local_instance(dataset_name)
elif dataset_name in self.COMPETITION:
return self.load_competition(dataset_name)
filename = self.DATASETS[dataset_name]
local_version = os.path.join(self.dataset_loc, filename)
if zipfile.is_zipfile(local_version):
return self.load_zip(local_version)
elif tarfile.is_tarfile(local_version):
filter = re.compile(".+mps|.+lp")
return self.load_tar(local_version, filter=filter)
else:
assert False
def setup(self, ecole_model):
if self.competition_settings:
#print("disabling")
# disable SCIP heuristics and restarts
scip_model = ecole_model.as_pyscipopt()
scip_model.setHeuristics(pyscipopt.scip.PY_SCIP_PARAMSETTING.OFF)
ecole_model.set_params({
'estimation/restarts/restartpolicy': 'n',
})
def preprocess(self, ecole_model):
self.setup(ecole_model)
#print(self.presolve)
if self.presolve:
return ## NEVER presolve
print("presolving mip")
ecole_model.presolve()
def load_zip(self, local_version):
with zipfile.ZipFile(local_version) as z:
if self.shard_count:
files = z.namelist()
shard = files[slice(self.shard, None, self.shard_count)]
else:
shard = z.namelist()
for member in shard:
f = z.extract(member, path=self.tmp_file_loc)
instance = os.path.join(self.tmp_file_loc, member)
#yield instance #bad coding :( this is just for loading MIPLIB instance
continue
#ecole_model = ecole.scip.Model.from_file(instance)
temp_model = pyscipopt.Model()
print(instance)
temp_model.readProblem(instance)
if temp_model.getNVars() != temp_model.getNBinVars():
continue
#self.filtered_instances.append(member)
#print(self.filtered_instances)
ecole_model = ecole.scip.Model.from_pyscipopt(temp_model)
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_tar(self, local_version, filter=None, presolved=False):
with tarfile.open(local_version) as t:
members = t.getmembers()
if self.shard:
members = members[slice(self.shard, None, self.shard_count)]
for member in members:
if not member.isfile():
continue
if filter and not filter.match(member.name):
continue
f = t.extract(member, path=self.tmp_file_loc)
instance = os.path.join(self.tmp_file_loc, member.name)
#ecole_model = ecole.scip.Model.from_file(instance)
temp_model = pyscipopt.Model()
temp_model.readProblem(instance)
ecole_model = ecole.scip.Model.from_pyscipopt(temp_model)
self.setup(ecole_model)
if self.presolve and not presolved:
ecole_model.presolve()
if ecole_model.is_solved:
continue
if not self.load_metadata:
yield ecole_model
else:
metadata_loc = member.name.replace('mps', 'json')
f = t.extract(metadata_loc, path=self.tmp_file_loc)
raw_metadata = os.path.join(self.tmp_file_loc, metadata_loc)
with open(raw_metadata) as f:
metadata = json.load(f)
yield (ecole_model, metadata)
def load_ecole(self, instance_type):
instances = self.ECOLE[instance_type]
instances.seed(self.shard)
for ecole_model in instances:
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_geco(self, instance_type):
generator = self.GECO[instance_type]
for m in geco.generator.generate(generator, seed=self.shard):
ecole_model = ecole.scip.Model.from_pyscipopt(m)
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_geco_miplib(self, instance_type):
# Sharding not supported yet
assert self.shard_count == 0
instances = self.GECO_MIPLIB[instance_type]
for m in instances:
ecole_model = ecole.scip.Model.from_pyscipopt(m)
self.preprocess(ecole_model)
if not ecole_model.is_solved:
yield ecole_model
def load_local_instance(self, instance_type):
# Sharding not supported yet
assert self.shard_count == 0
dir = self.LOCAL_INSTANCE[instance_type]
for instance in glob.glob(dir):
print(instance)
temp_model = pyscipopt.Model()
if self.post_process:
yield temp_model
continue
temp_model.readProblem(instance)
ecole_model = ecole.scip.Model.from_pyscipopt(temp_model)
#ecole_model = ecole.scip.Model.from_file(instance)
self.preprocess(ecole_model)
#self.setup(ecole_model)
yield ecole_model
def load_competition(self, instance_type):
filename = self.COMPETITION[instance_type]
local_version = os.path.join(self.dataset_loc, filename)
filter = re.compile(".+mps")
return self.load_tar(local_version, filter=filter, presolved=True)
if __name__ == '__main__':
loader = InstanceLoader()
for m in loader.load("KNAPSACK_YANG"):
print(str(m))
break
| CL-LNS-main | instance_loader.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import multiprocessing
import os
import re
import subprocess
import sys
import sysconfig
from distutils.version import LooseVersion
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test
class CMakeExtension(Extension):
def __init__(self, name, src_dir=""):
super(CMakeExtension, self).__init__(name, sources=[])
self.src_dir = os.path.abspath(src_dir)
class CMakeBuild(build_ext):
def run(self):
try:
cmake_version = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", cmake_version.decode()).group(1))
if cmake_version < "3.14":
raise RuntimeError("CMake >= 3.14 is required.")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
ext_dir = os.path.abspath(os.path.dirname(
self.get_ext_fullpath(ext.name)))
cmake_args = ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" +
ext_dir, "-DPYTHON_EXECUTABLE=" + sys.executable]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", f"-j{multiprocessing.cpu_count()}"]
env = os.environ.copy()
env["CXXFLAGS"] = f'{env.get("CXXFLAGS", "")} \
-DVERSION_INFO="{self.distribution.get_version()}"'
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(["cmake", ext.src_dir] +
cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(["cmake", "--build", "."] +
build_args, cwd=self.build_temp)
print() # Add an empty line for cleaner output
setup(
name="ml4co",
version="0.1",
packages=["ml4co", "ml4co.ops"],
description="",
long_description="",
# add extension module
ext_modules=[CMakeExtension("ml4co", "./ml4co")],
# add custom build_ext command
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
url="",
)
| CL-LNS-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import submitit
import os
import argparse
from graph_datasets.bipartite_graph_loader import BipartiteGraphLoader
import torch
from torch import autograd
import glob
import torch.nn.functional as F
import torch_geometric
import time
from graph_datasets.bipartite_graph_dataset import BipartiteGraphDataset, BipartiteGraphDatasets
from neural_nets.gnn_policy import GNNPolicy
from neural_nets.losses import LogScoreLoss, LinearScoreLoss
from tensorboardX import SummaryWriter as SummaryWriter
import numpy as np
import math
from IPython import embed
from graph_datasets.bipartite_graph_observations import augment_variable_features_with_dynamic_ones
from torchmetrics.functional import auroc
from os.path import exists
import pickle
import sys
from pytorch_metric_learning import losses
from pytorch_metric_learning.distances import DotProductSimilarity
class Args:
pass
def multi_hot_encoding(input):
max_val = torch.max(input, -1, keepdim=True).values - 1.0e-10
multihot = input >= max_val
return multihot.float()
initial_solution = dict()
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#DEVICE = 'cpu'
log_score_loss_function = LogScoreLoss().to(DEVICE)
linear_score_loss_function = LinearScoreLoss().to(DEVICE)
bce_loss_function = torch.nn.BCEWithLogitsLoss(reduction="none").to(DEVICE)
infoNCE_loss_function = losses.NTXentLoss(temperature=0.07,distance=DotProductSimilarity()).to(DEVICE)
#data_loc = "training_data/"
def pad_tensor(input, pad_sizes, normalize, pad_value=-1e10):
"""
This utility function splits a tensor and pads each split to make them all the same size, then stacks them.
"""
max_pad_size = pad_sizes.max()
output = input.split(pad_sizes.cpu().numpy().tolist())
processed = []
for i in range(len(output)):
slice = output[i]
if normalize:
# Normalize the scores to ensure they fall in the [-1, 1] range
max_val = torch.max(abs(output[i]))
print(max_val)
slice /= max_val
processed.append(F.pad(slice, (0, max_pad_size-slice.size(0)), 'constant', pad_value))
output = torch.stack(processed, dim=0)
#output = torch.stack([F.pad(slice_, (0, max_pad_size-slice_.size(0)), 'constant', pad_value)
# for slice_ in output], dim=0)
return output
def load_policy_from_checkpoint(args):
policy = GNNPolicy(args.gnn_type)
try:
ckpt = torch.load(args.warmstart, map_location=DEVICE)
try_again = False
except Exception as e:
print("Checkpoint " + args.checkpoint + " not found, bailing out: " + str(e))
sys.exit(1)
policy.load_state_dict(ckpt.state_dict())
#policy = policy.to(DEVICE)
#model_version = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("Loaded checkpoint")
print(f"Will run evaluation on {DEVICE} device", flush=True)
#embed()
return policy
def process(args, policy, data_loader, optimizer=None):
"""
This function will process a whole epoch of training or validation, depending on whether an optimizer is provided.
"""
prefix = "Train" if optimizer else "Eval"
#embed()
if args.loss == "linear_score":
loss_function = linear_score_loss_function
elif args.loss == "log_score":
loss_function = log_score_loss_function
else:
loss_function = bce_loss_function
mean_loss = 0.0
mean_acc = 0.0
mean_auc = 0.0
mean_offby = 0.0
top_k = [1, 3, 5, 10]
k_acc = [0.0, 0.0, 0.0, 0.0]
n_iters = 0
n_samples_processed = 0
n_positive_samples = 0
n_negative_samples = 0
start = time.time()
n_samples_previously_processed = 0
history_window_size = 3
with torch.set_grad_enabled(optimizer is not None):
for batch in data_loader:
assert not torch.isnan(batch.constraint_features).any()
assert not torch.isnan(batch.edge_attr).any()
assert not torch.isnan(batch.variable_features).any()
assert not torch.isnan(batch.edge_index).any()
assert not torch.isinf(batch.constraint_features).any()
assert not torch.isinf(batch.edge_attr).any()
assert not torch.isinf(batch.variable_features).any()
assert not torch.isinf(batch.edge_index).any()
batch = batch.to(DEVICE)
# TO DO: Fix the dataset instead
if torch.isnan(batch.candidate_scores).any():
print("Skipping batch with NaN scores")
continue
global initial_solution
batch = augment_variable_features_with_dynamic_ones(batch, args, initial_solution)
# Compute the logits (i.e. pre-softmax activations) according to the policy on the concatenated graphs
try:
logits = policy(batch.constraint_features, batch.edge_index, batch.edge_attr, batch.variable_features)
except RuntimeError as e:
print("Skipping batch due to error: " + str(e))
continue
# Index the results by the candidates, and split and pad them
#pred_scores = pad_tensor(logits[batch.candidates], batch.nb_candidates, normalize=False)
pred_scores = pad_tensor(logits, batch.nb_candidates, normalize=False)
#pred_scores = torch.sigmoid(pred_scores)
true_scores = pad_tensor(batch.candidate_scores, batch.nb_candidates, normalize=False)
assert not torch.isnan(pred_scores).any()
assert not torch.isnan(true_scores).any()
#assert not torch.isnan(batch.candidate_choices).any()
if args.loss == "cross_entropy":
# Compute the usual cross-entropy classification loss
loss = F.cross_entropy(pred_scores, batch.candidate_choices)
elif args.loss == "bce":
multi_hot_labels = multi_hot_encoding(true_scores)
#print("lost function is bce")
raw_loss = bce_loss_function(pred_scores, multi_hot_labels)
batch_loss = torch.mean(raw_loss, 1)
loss_sum = torch.sum(torch.mul(batch_loss, batch.batch_weight))
loss = torch.div(loss_sum, torch.sum(batch.batch_weight))
elif args.loss == "nt_xent":
# # Try https://kevinmusgrave.github.io/pytorch-metric-learning/losses/#ntxentloss
# # Can also try https://kevinmusgrave.github.io/pytorch-metric-learning/losses/#supconloss.
# assert False # TBD
# loss = loss_function(pred_labels, true_labels)
#embed()
batch_size = pred_scores.shape[0]
multi_hot_labels = multi_hot_encoding(true_scores)
embeddings = torch.sigmoid(pred_scores)
anchor_positive = []
anchor_negative = []
positive_idx = []
negative_idx = []
total_sample = batch_size
#embed()
for i in range(batch_size):
if batch.batch_weight[i].item() == 1:
#embed()
#anchor.append(i)
if len(batch.info["positive_samples"][i]) == 0: #due to unknown bugs for SC
#embed()
continue
ground_truth_improvement = max(batch.info["positive_labels"][i])
for j in range(len(batch.info["positive_samples"][i])):
improvement_j = batch.info["positive_labels"][i][j]
if improvement_j >= ground_truth_improvement * 0.5:
anchor_positive.append(i)
positive_idx.append(total_sample)
embeddings = torch.cat([embeddings, torch.tensor([batch.info["positive_samples"][i][j]]).to(DEVICE)])
total_sample += 1
n_positive_samples += 1
for j in range(len(batch.info["negative_samples"][i])):
improvement_j = batch.info["negative_labels"][i][j]
if improvement_j <= ground_truth_improvement * 0.05:
anchor_negative.append(i)
negative_idx.append(total_sample)
embeddings = torch.cat([embeddings, torch.tensor([batch.info["negative_samples"][i][j]]).to(DEVICE)])
total_sample += 1
n_negative_samples += 1
triplets = (torch.tensor(anchor_positive).to(DEVICE), torch.tensor(positive_idx).to(DEVICE), torch.tensor(anchor_negative).to(DEVICE), torch.tensor(negative_idx).to(DEVICE))
loss = infoNCE_loss_function(embeddings, indices_tuple = triplets)
else:
# use the log or linear score loss
normalized_scores = normalize_tensor(batch.candidate_scores)
loss = loss_function(logits[batch.candidates], normalized_scores)
if math.isnan(loss.item()):
continue
assert not math.isnan(loss.item())
if not (loss.item() >= 0 or torch.sum(batch.batch_weight).item() == 0):
print("Error")
embed()
assert loss.item() >= 0 or torch.sum(batch.batch_weight).item() == 0, f"loss = {loss.item()}, #samples = {torch.sum(batch.batch_weight).item()}"
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
#embed()
mean_loss += loss.item() * torch.sum(batch.batch_weight).item()
#mean_loss += loss_sum.item()
n_samples_processed += torch.sum(batch.batch_weight).item()# batch.num_graphs
n_iters += 1
#embed()
for i in range(multi_hot_labels.shape[0]):
if batch.batch_weight[i].item() == 0:
continue
mean_auc += auroc(torch.sigmoid(pred_scores)[i], multi_hot_labels.int()[i], pos_label = 1).item()
if n_iters % args.checkpoint_every == 0:
end = time.time()
speed = (n_samples_processed - n_samples_previously_processed) / (end - start)
start = time.time()
n_samples_previously_processed = n_samples_processed
print(f"{prefix} loss: {mean_loss/n_samples_processed:0.3f}, auc: {mean_auc/n_samples_processed:0.3f}, speed: {speed} samples/s")
if optimizer:
print("Checkpointing model")
torch.save(policy, args.checkpoint)
if n_samples_processed > 0:
mean_loss /= n_samples_processed
mean_acc /= n_samples_processed
mean_auc /= n_samples_processed
mean_offby /= n_samples_processed
for i in range(len(k_acc)):
k_acc[i] /= n_samples_processed
else:
mean_loss = float("inf")
mean_acc = 0
mean_offby = float("inf")
mean_auc = 0
for i in range(len(k_acc)):
k_acc[i] = 0
print("n_samples_processed", n_samples_processed)
return mean_loss, mean_auc #, mean_offby, k_acc
def train_model(args):
train_loader = BipartiteGraphLoader(args.train_db, shuffle=True, first_k=args.train_db_first_k)
valid_loader = BipartiteGraphLoader(args.valid_db, shuffle=False)
print(f"Training on {train_loader.num_examples()} examples")
print(f"Evaluating on {valid_loader.num_examples()} examples")
#from IPython import embed; embed()
print(F"Using DEVICE {DEVICE}")
tb_writer = SummaryWriter(log_dir=args.tensorboard, comment="neural_LNS")
policy = GNNPolicy(args.gnn_type).to(DEVICE)
if not (args.warmstart is None):
print("Warnstarting training, loading from checkpoint %s"%(args.warmstart))
policy = load_policy_from_checkpoint(args)
policy = policy.to(DEVICE)
print(f"Checkpoint will be saved to {args.checkpoint}")
num_of_parameters = sum(p.numel() for p in policy.parameters() if p.requires_grad)
print("number of parameters =", num_of_parameters)
learning_rate = args.lr
best_valid_loss = float("inf")
last_improved = 0
optimizer = torch.optim.AdamW(policy.parameters(), lr=learning_rate, weight_decay=args.weight_decay, amsgrad=True)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 5, eta_min=learning_rate/10, verbose=False)
for epoch in range(args.num_epochs):
start = time.time()
print(f"Starting epoch {epoch+1}", flush=True)
with autograd.set_detect_anomaly(args.detect_anomalies):
train_iterator = train_loader.load(batch_size=args.batch_size) #32
train_loss, train_auc = process(args, policy, train_iterator, optimizer)
print(f"Train loss: {train_loss:0.3f}, Train auc: {train_auc:0.3f}")
valid_iterator = valid_loader.load(batch_size=args.batch_size) #32
valid_loss, valid_auc = process(args, policy, valid_iterator, None)
print(f"Valid loss: {valid_loss:0.3f}, Valid auc: {valid_auc:0.3f}")
end = time.time()
tb_writer.add_scalar("Train/Loss", train_loss, global_step=epoch)
tb_writer.add_scalar("Train/Auc", train_auc, global_step=epoch)
tb_writer.add_scalar("Valid/Loss", valid_loss, global_step=epoch)
tb_writer.add_scalar("Valid/Auc", valid_auc, global_step=epoch)
# Done with one epoch, we can freeze the normalization
policy.freeze_normalization()
# Anneal the learning rate if requested
if args.anneal_lr:
scheduler.step()
# Save the trained model
print(f"Done with epoch {epoch+1} in {end-start:.1f}s, checkpointing model", flush=True)
torch.save(policy, args.checkpoint+"_epoch%d"%(epoch))
# Check if we need to abort, adjust the learning rate, or just give up
if math.isnan(train_loss) or math.isnan(valid_loss):
print("NaN detected in loss, aborting")
break
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
last_improved = epoch
print("Checkpointing new best model in " + args.checkpoint + "_best")
torch.save(policy, args.checkpoint + "_best")
elif epoch - last_improved > args.give_up_after:
print("Validation loss didn't improve for too many epochs, giving up")
break
elif epoch - last_improved > args.decay_lr_after:
learning_rate /= 2
print(f"Adjusting the learning rate to {learning_rate}")
optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 5, eta_min=learning_rate/10, verbose=False)
# Give the model some time to improve with the new learning rate
last_improved = epoch
def train(problem, gnn_type = "gat", feature_set = "feat2", batch_size = 32, warmstart = None, loss = "bce", notes = '', data_loc = None):
print("Starting training model on " + problem, flush=True)
print("gnn_type = ", gnn_type, "feature_set=", feature_set)
assert not (data_loc is None), "no training data location provided"
save_to_folder = "model/model_%s_%s_%s_%s_%s/" % (problem, feature_set, "no" if warmstart is None else "warmstart", loss, notes)
try:
os.mkdir(save_to_folder)
except OSError as error:
print(error)
args = Args()
args.problem = problem
args.num_epochs=30
args.batch_size = batch_size
args.lr=0.001
args.anneal_lr = False
args.decay_lr_after=20
args.give_up_after=100
args.train_db_first_k=None
args.weight_decay=0.00005
args.window_size = 3
args.loss = loss
args.gnn_type = gnn_type
experiment = feature_set + "_" + args.gnn_type
args.experiment = experiment
args.warmstart = warmstart
args.tensorboard = save_to_folder + "neural_LNS_" + problem + "_" + experiment + ".tb"
args.checkpoint = save_to_folder + "neural_LNS_" + problem + "_" + experiment + ".pt"
args.checkpoint_every=40
train_dbs = []
valid_dbs = []
dir = data_loc+"/*.db"
num_data_file = 0
for dataset in glob.glob(dir):
num_data_file += 1
validation_cutoff = int( num_data_file * 0.125)
for i, dataset in enumerate(glob.glob(dir)):
try:
train_loader = BipartiteGraphLoader(dataset, shuffle=True)
except:
continue
if train_loader.num_examples() == 0:
continue
if i >= validation_cutoff:
train_dbs.append(dataset)
else:
valid_dbs.append(dataset)
args.train_db = "+".join(train_dbs)
args.valid_db = "+".join(valid_dbs)
args.detect_anomalies = False
train_model(args)
torch.cuda.empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--problem-set", default="INDSET_train",
help="Problem set")
parser.add_argument("--gnn-type", default="gat", type=str,
help="GNN type: gasse or gat")
parser.add_argument("--feature-set", default="feat2", type=str,
help="feat1: Gasse's feature only; feat2: Gasse+Khalil features; feat3: feat2+LB RELAX features")
parser.add_argument("--loss", default="nt_xent", type=str,
help="nt_xent: contrastive loss; bce: bce loss")
parser.add_argument("--data-loc", default=None, type=str,
help="Provide the dataset folder location")
parser.add_argument("--wind-size", default=3, type = int,
help="window size = the number of past incumbent features in features")
input_args = parser.parse_args()
if input_args.data_loc is None:
input_args.data_loc = "training_data/" + input_args.problem_set
train(input_args.problem_set, gnn_type = input_args.gnn_type, feature_set = input_args.feature_set, loss = input_args.loss, data_loc = input_args.data_loc)
| CL-LNS-main | train_neural_LNS.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from graph_datasets.bipartite_graph import *
from graph_datasets.bipartite_graph_dataset import BipartiteGraphDataset
import graph_datasets.bipartite_graph_observations as bgo
from instance_loader import InstanceLoader
from ilp_model import Solution
import argparse
import copy
import random
import pyscipopt
from neural_nets.gnn_policy import GNNPolicy
from pyscipopt import quicksum
import time
import ecole
import networkx as nx
import pickle
import statistics
from graph_datasets.featurization_test import make_obs
import os
import sys
from IPython import embed
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
global WIND_SIZE
WIND_SIZE = 3
COLLECT_SOLVE_TIME_LIMIT = 60 * 60 * 1 #2hours
STEP_PER_COLLECT = 1
class MyEvent(pyscipopt.Eventhdlr):
def eventinit(self):
print("init event")
self._start_time = time.monotonic()
self.scip_log = []
self.start_time = time.monotonic()
self.model.catchEvent(pyscipopt.SCIP_EVENTTYPE.BESTSOLFOUND, self)
def eventexit(self):
print("exit event")
#self.model.dropEvent(pyscipopt.SCIP_EVENTTYPE.BESTSOLFOUND, self)
def eventexec(self, event):
print("exec event")
self.end_time = time.monotonic()
#obj = self.model.getPrimalbound()
#print(obj, self.end_time - self._start_time)
sol = self.model.getBestSol()
obj = self.model.getSolObjVal(sol)
Sol = Solution(self.model, sol, obj)
log_entry = dict()
log_entry['best_primal_sol'] = Sol
log_entry['best_primal_scip_sol'] = sol
log_entry['primal_bound'] = obj
log_entry['solving_time'] = self.end_time - self.start_time
log_entry['iteration_time'] = self.end_time - self.start_time
log_entry['selection_time'] = 0
var_index_to_value = dict()
for v in self.model.getVars():
v_name = v.name
v_value = Sol.value(v)
var_index_to_value[v_name] = v_value
log_entry['var_index_to_value'] = copy.deepcopy(var_index_to_value)
self.scip_log.append(log_entry)
self.start_time = self.end_time
#print(log_entry['primal_bound'], log_entry['solving_time'], self.end_time - self._start_time)
def run_vanilla_scip(model, args):
model = model.__repr__.__self__
event = MyEvent()
model.includeEventhdlr(
event,
"",
""
)
model.setParam("limits/time", args.time_limit)
if "AGGR" in args.destroy_heuristic:
print("Enabled aggressive mode for BnB with SCIP")
model.setHeuristics(pyscipopt.scip.PY_SCIP_PARAMSETTING.AGGRESSIVE)
model.optimize()
return event.scip_log
NUM_OF_EXPERT_SAMPLES = 50
def isInteger(x):
return abs(x - round(x)) <=1e-8
def add_scip_config_to_mip_model(scip_config):
for param, value in scip_config.items():
model.setRealParam(param, value)
return model
def scip_solve(model, incumbent_solution = None, scip_config = None, timer = None, get_initial_solution = False, primal_bound = None, prev_LNS_log = None, get_num_solutions = 1, mute = False, isMIPLIB = False):
start_time = time.monotonic()
if primal_bound is not None:
objective_sense = model.getObjectiveSense()
if objective_sense == "minimize":
model.addCons(model.getObjective() <= primal_bound + 1e-8)
#if not mute:
#print("---added a new constraint using the primal bound for minimization")
else:
model.addCons(model.getObjective() >= primal_bound - 1e-8)
#if not mute:
#print("---added a new constraint using the primal bound for maximization")
#print("added a new constraint using the primal bound")
#model = add_scip_config_to_mip_model(model, scip_config)
if scip_config is not None:
for param, value in scip_config.items():
#print(param, value)
model.setParam(param, value)
found = True
init_time = None
if get_initial_solution == True:
found = False
#runtime_left = model.getParam('limits/time')
runtime_left = 900
#time_limit = 610 if isMIPLIB else 10
time_limit = model.getParam('limits/time')
while not found and time_limit <= runtime_left:
#if time_limit * 2 >= runtime_left:
# time_limit = runtime_left
#time_limit = min(time_limit, runtime_left)
model.setParam('limits/time', time_limit)
start_time = time.monotonic()
#embed()
model.optimize()
end_time = time.monotonic()
init_time = end_time - start_time
num_solutions_found = model.getNSols()
found = (num_solutions_found > 0)
#runtime_left -= time_limit
if time_limit >= runtime_left-1e3:
break
time_limit *= 2
time_limit = min(time_limit, runtime_left)
else:
model.optimize()
end_time = time.monotonic()
init_time = end_time - start_time
if not mute:
print("finished optimizing sub mip")
end_time = time.monotonic()
status = model.getGap()#model.getStatus()
log_entry = None
if found == True:
if model.getNSols() == 0: # if no solution in a LNS iteration, then return the same copy of the previous log but change the runtime
if prev_LNS_log is None:
return -1, None
log_entry = dict()
for k, v in prev_LNS_log.items():
log_entry[k] = v
#log_entry = copy.deepcopy(prev_LNS_log)
log_entry['solving_time'] = init_time
return status, log_entry
sol = model.getBestSol()
obj = model.getSolObjVal(sol)
Sol = Solution(model, sol, obj)
log_entry = {}
log_entry['best_primal_sol'] = Sol
log_entry['best_primal_scip_sol'] = sol
log_entry['primal_bound'] = obj
if not (init_time is None):
log_entry['solving_time'] = init_time
log_entry['iteration_time'] = init_time
else:
log_entry['solving_time'] = end_time - start_time
log_entry['iteration_time'] = end_time - start_time
log_entry['selection_time'] = 0
var_index_to_value = dict()
for v in model.getVars():
v_name = v.name
v_value = Sol.value(v)
var_index_to_value[v_name] = v_value
log_entry['var_index_to_value'] = copy.deepcopy(var_index_to_value)
if get_num_solutions > 1:
var_index_to_values = dict()
for v in model.getVars():
var_index_to_values[v.name] = []
#embed()
sol_list = model.getSols()
obj_list = []
sol_list.reverse()
#if len(sol_list) > 30:
# sol_list= sol_list[:30]
for sol in sol_list:
Sol = Solution(model, sol, obj)
obj = model.getSolObjVal(sol)
if primal_bound is not None:
objective_sense = model.getObjectiveSense()
if objective_sense == "minimize":
if obj >= primal_bound - 1e-8: continue
#model.addCons(model.getObjective() <= primal_bound + 1e-8)
else:
if obj <= primal_bound + 1e-8: continue
#model.addCons(model.getObjective() >= primal_bound - 1e-8)
for v in model.getVars():
v_name = v.name
v_value = Sol.value(v)
v_incumbent_value = incumbent_solution.value(v)
var_index_to_values[v_name].append(0 if round(v_value) == round(v_incumbent_value) else 1)
obj_list.append((obj, primal_bound))
log_entry['var_index_to_values'] = copy.deepcopy(var_index_to_values)
log_entry['primal_bounds'] = copy.deepcopy(obj_list)
#embed()
else:
log_entry['var_index_to_values'] = None
log_entry['primal_bounds'] = None
#log_entry['solving_time_calibrated'] = timer.elapsed_calibrated_time
#sol_data.write(log_entry, force_save_sol=True)
#print(sol)
return status, log_entry
def get_LP_relaxation_solution(model):
LP_relaxation = pyscipopt.Model(sourceModel = model, origcopy = True)
for var in LP_relaxation.getVars():
LP_relaxation.chgVarType(var, 'C')
scip_solve_LP_relaxation_config = {
'limits/time' : 300,
}
#status, log_entry = scip_solve(LP_relaxation, scip_config = scip_solve_LP_relaxation_config)
return scip_solve(LP_relaxation, scip_config = scip_solve_LP_relaxation_config)
def random_sample_variable_based(model, G, variables_to_nodes, neighborhood_size, pre_selected_pivot = None, pivot_num = 1):
all_int_variables = [v.name for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
pivot_node = []
for i in range(pivot_num):
sample_var = random.choice(all_int_variables)
while variables_to_nodes[sample_var] in pivot_node:
sample_var = random.choice(all_int_variables)
pivot_node.append(variables_to_nodes[sample_var])
if pre_selected_pivot is not None:
pivot_node = [variables_to_nodes[var] for var in pre_selected_pivot]
destroy_nodes = pivot_node
current_group = pivot_node
top = [v for v in G.nodes() if G.nodes[v]['bipartite'] == 0]
pos = nx.bipartite_layout(G, top)
for u, v in G.edges():
assert(G.nodes[u]['bipartite'] == 0)
assert(G.nodes[v]['bipartite'] == 1)
while len(destroy_nodes) < neighborhood_size:
new_group = []
for v in current_group:
for n in G.neighbors(v):
new_group.append(n)
#print(G.in_degree(n))
assert(G.nodes[n]['bipartite'] == 1)
new_group = list(set(new_group))
G_predecessors = []
for v in new_group:
for n in G.predecessors(v):
if not (G.nodes[n]["scip_variable"] in all_int_variables):
continue
G_predecessors.append(n)
assert(G.nodes[n]['bipartite'] == 0)
#new_group = [n for v in current_group for n in G.neighbors(v)]
#G_predecessors = [n for v in new_group for n in G.predecessors(v)]
G_predecessors = list(set(G_predecessors) - set(destroy_nodes))
if len(G_predecessors) == 0: break
for v in G_predecessors:
assert G.nodes[v]['bipartite'] == 0, str(v)
if len(G_predecessors) + len(destroy_nodes) <= neighborhood_size:
destroy_nodes = destroy_nodes + G_predecessors
else:
destroy_nodes = destroy_nodes + random.sample(G_predecessors, neighborhood_size - len(destroy_nodes))
current_group = copy.deepcopy(G_predecessors)
for v in destroy_nodes:
assert(G.nodes[v]["scip_variable"] in all_int_variables)
destroy_variables = [G.nodes[v]["scip_variable"] for v in destroy_nodes]
assert(len(destroy_variables) <= neighborhood_size)
return destroy_variables
def normalize_score(score, neighborhood_size):
l = 0
r = 100
while r - l > 1e-8:
m = (l + r) * 0.5
tp_score = torch.pow(score, m)
tp_sum = torch.sum(tp_score).item()
if tp_sum > neighborhood_size:
l = m
else:
r = m
return torch.pow(score, l)
def normalize_score2(logit, neighborhood_size):
l = 0
r = 1
while r - l > 1e-8:
m = (l + r) * 0.5
tp_logit = torch.mul(logit, m)
tp_score = torch.sigmoid(tp_logit)
tp_sum = torch.sum(tp_score).item()
if tp_sum < neighborhood_size:
r = m
else:
l = m
tp_logit = torch.mul(logit, l)
tp_score = torch.sigmoid(tp_logit)
return tp_score
#ML_info = (policy, observation, incumbent_history, LB_relaxation_history)
def create_neighborhood_with_heuristic(model, LNS_log, neighborhood_size = 20, heuristic = "RANDOM", bipartite_graph = None, variables_to_nodes = None, improved = None, num_samples = 30, eps_clip = 0.05, ML_info = None, original_neighborhood_size = None, get_num_solutions = 1):
if original_neighborhood_size is None:
original_neighborhood_size = neighborhood_size
all_variables = model.getVars()
all_int_variables = [v.name for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]] # currently only considering binary variables
objective_sense = model.getObjectiveSense()
obj_sense = 1 if objective_sense == "minimize" else -1
if heuristic == "RANDOM":
if neighborhood_size >= len(all_int_variables):
return all_int_variables, None
else:
return random.sample(all_int_variables, neighborhood_size), None
elif heuristic == "VARIABLE":
assert(bipartite_graph is not None)
assert(variables_to_nodes is not None)
return random_sample_variable_based(model, bipartite_graph, variables_to_nodes, neighborhood_size), None
elif "ML" in heuristic:
#embed()
ML_inference_start_time = time.monotonic()
assert ML_info is not None
local_branching_mip = pyscipopt.Model(sourceModel = model, origcopy = True)
#print(model)
#print(local_branching_mip)
incumbent_solution = LNS_log[-1]['best_primal_sol']
variables_equal_one = []
variables_equal_zero = []
all_int_variables = [v.name for v in local_branching_mip.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
for v in local_branching_mip.getVars():
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
if round(v_value) == 1:
variables_equal_one.append(v)
else:
variables_equal_zero.append(v)
#need to decide whether to use original neighborhood size or adaptive one
if "ORINH" in heuristic:
local_branching_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= original_neighborhood_size)
print("constructed mip for local branching with neighorhood size %d" % (original_neighborhood_size))
else:
local_branching_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= neighborhood_size)
print("constructed mip for local branching with neighorhood size %d" % (neighborhood_size))
int_var = [v for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
LB_relaxation_solution = []
if "feat1" in args.mode or "feat2" in args.mode:
#print("No LP solving")
for var in int_var:
LB_relaxation_solution.append(0)
LB_LP_relaxation_solution = LNS_log[-1]['best_primal_sol']
else:
LB_LP_relaxation_status, LB_LP_relaxation_log_entry = get_LP_relaxation_solution(local_branching_mip)
LB_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
for var in int_var:
LB_relaxation_solution.append(LB_LP_relaxation_solution.value(var))
#embed()
policy, observation, incumbent_history, _LB_relaxation_history = ML_info
LB_relaxation_history = copy.deepcopy(_LB_relaxation_history)
LB_relaxation_history.append(LB_relaxation_solution)
dynamic_features = torch.zeros((observation.column_features.shape[0], WIND_SIZE * 3), dtype = torch.float32)
number_of_history_added = 0
assert(len(incumbent_history) == len(LB_relaxation_history))
for i in reversed(range(len(LB_relaxation_history))):
dynamic_features[:, number_of_history_added*3] = torch.FloatTensor([1]*len(int_var))
dynamic_features[:, number_of_history_added*3+1] = torch.FloatTensor(incumbent_history[i])
if not ("feat1" in args.mode or "feat2" in args.mode):
dynamic_features[:, number_of_history_added*3+2] = torch.FloatTensor(LB_relaxation_history[i])
else:
dynamic_features[:, number_of_history_added*3+2] = torch.zeros(len(LB_relaxation_history[i]))
#print("No relaxation features")
number_of_history_added += 1
if number_of_history_added == WIND_SIZE:
break
observation.column_features[:, -WIND_SIZE * 3:] = dynamic_features
with torch.no_grad():
obs = (observation.row_features.to(DEVICE),
observation.edge_features.indices.to(DEVICE),
observation.edge_features.values.to(DEVICE),
observation.column_features.to(DEVICE))
logits = policy(*obs)
score = torch.sigmoid(logits)
info = dict()
#info["LB_gap"] = status
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
distribution_destroy_variable = []
all_int_variables = [v.name for v in int_var]
for i, v in enumerate(model.getVars()):
if v.name in all_int_variables:
v_value = score[i].item()
v_logit = logits[i].item()
distribution_destroy_variable.append((v.name, v_value, v_logit))
distribution_destroy_variable.sort(key = lambda x: x[2])
#from IPython import embed; embed();
num_cand = len(distribution_destroy_variable)
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
destroy_variables = []
ML_inference_end_time = time.monotonic()
print("ML inference time=", ML_inference_end_time-ML_inference_start_time)
info["ML_time"] = ML_inference_end_time-ML_inference_start_time
#embed()
best_primal_bound = None
if "SAMPLE" in heuristic:
#embed()
normalized_score = normalize_score(score, neighborhood_size)
if torch.sum(normalized_score).item() > neighborhood_size * 1.5: #numerical issues
normalized_score = normalize_score2(logits, neighborhood_size)
#embed()
for i, v in enumerate(model.getVars()):
if v.name in all_int_variables:
v_value = normalized_score[i].item() #score[i].item()
coin_flip = random.uniform(0, 1)
if coin_flip <= v_value:
destroy_variables.append(v.name)
return destroy_variables, info
elif "GREEDY" in heuristic:
return [v_name for v_name, _, __ in distribution_destroy_variable[-min(neighborhood_size, num_cand):]], info
else:
assert False, "Unknown sampling methods for ML"
return destroy_variables, info
elif heuristic.startswith("LOCAL"):
local_branching_mip = pyscipopt.Model(sourceModel = model, origcopy = True)
#print(model)
#print(local_branching_mip)
incumbent_solution = LNS_log[-1]['best_primal_sol']
variables_equal_one = []
variables_equal_zero = []
#embed()
all_int_variables = [v.name for v in local_branching_mip.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
for v in local_branching_mip.getVars():
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
if round(v_value) == 1:
variables_equal_one.append(v)
else:
variables_equal_zero.append(v)
original_LP_relaxation_status, original_LP_relaxation_log_entry = None, None # get_LP_relaxation_solution(local_branching_mip)
local_branching_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= neighborhood_size)
print("constructed mip for local branching")
scip_solve_local_branching_config = {
'limits/time' : 3600 if "LONG" in heuristic else 600,
}
if args.mode == "COLLECT" or args.collect_along_test == 1:
scip_solve_local_branching_config['limits/time'] = COLLECT_SOLVE_TIME_LIMIT
destroy_variables = []
if "RELAXATION" in heuristic:
LB_LP_relaxation_status, LB_LP_relaxation_log_entry = get_LP_relaxation_solution(local_branching_mip)
#original_LP_relaxation_solution = original_LP_relaxation_log_entry['best_primal_sol']
original_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
LB_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
both_integer = 0
LB_integer = 0
original_integer = 0
for v in all_variables:
if v.name in all_int_variables:
v_orignal_value = original_LP_relaxation_solution.value(v)
v_LB_value = LB_LP_relaxation_solution.value(v)
if isInteger(v_orignal_value) and isInteger(v_LB_value):
both_integer += 1
elif isInteger(v_orignal_value):
original_integer +=1
elif isInteger(v_LB_value):
LB_integer += 1
#print("---LB LP runtime", LB_LP_relaxation_log_entry['solving_time'])#, "original LP runtime", original_LP_relaxation_log_entry['solving_time'])
#print("---both integer", both_integer, "original integer", original_integer, "LB integer", LB_integer)
#print("---selecting using LP relaxation")
same_integer_value_inc_and_LB_LP = 0
same_integer_value_LB_and_LB_LP = 0
if "RS" in heuristic:
distribution_destroy_variable = []
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
#v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
distribution_destroy_variable.append((v.name, abs(v_LB_LP_value - v_value), v_value))
best_destroy_variables = None
best_primal_bound = None
NUM_OF_EXPERT_SAMPLES = num_samples
for _ in range(NUM_OF_EXPERT_SAMPLES):
tmp_destroy_variables = []
for v_name, prob, t in distribution_destroy_variable:
coin_flip = random.uniform(0, 1)
#if coin_flip <= max(min(1 - eps_clip, prob), eps_clip):
if coin_flip <= (1 - 2 * eps_clip) * prob + eps_clip:
tmp_destroy_variables.append(v_name)
if NUM_OF_EXPERT_SAMPLES == 1:
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
return tmp_destroy_variables, info
sub_mip = create_sub_mip(model, tmp_destroy_variables, LNS_log[-1]['best_primal_sol'])
scip_solve_destroy_config = {
'limits/time' : 120,
}
status, log_entry = scip_solve(sub_mip, primal_bound = LNS_log[-1]['primal_bound'],
scip_config = scip_solve_destroy_config)
print("sample improvement", log_entry['primal_bound'])
if best_destroy_variables is None or log_entry['primal_bound'] * obj_sense < best_primal_bound * obj_sense:
best_primal_bound = log_entry['primal_bound']
best_destroy_variables = copy.deepcopy(tmp_destroy_variables)
print("best destroy variable chosen with obj =", best_primal_bound)
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
info["num_ori_relax_integer"] = original_integer
info["num_LB_relax_integer"] = LB_integer
info["num_both_integer"] = both_integer
return best_destroy_variables, info
elif "MI" in heuristic or "LI" in heuristic:
distribution_destroy_variable = []
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
#v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
if True or abs(v_LB_LP_value - v_value) > 1e-8:
#distribution_destroy_variable.append((v.name, max(abs(v_LB_LP_value - v_value), 1 - abs(v_LB_LP_value - v_value)), v_value))
distribution_destroy_variable.append((v.name, abs(v_LB_LP_value - v_value), v_value))
distribution_destroy_variable.sort(key = lambda x: x[1])
#from IPython import embed; embed();
num_cand = len(distribution_destroy_variable)
info = dict()
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
info["num_ori_relax_integer"] = original_integer
info["num_LB_relax_integer"] = LB_integer
info["num_both_integer"] = both_integer
if "MI" in heuristic:
return [v_name for v_name, _, __ in distribution_destroy_variable[:min(num_cand, neighborhood_size)]], info
else:
return [v_name for v_name, _, __ in distribution_destroy_variable[-min(neighborhood_size, num_cand):]], info
#elif "LI" in heuristic:
# pass
else:
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
#v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) == round(v_value):
same_integer_value_inc_and_LB_LP += 1
#print("---selecting using LP relaxation")
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) != round(v_value):
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
else:
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
#print("---num same integer values LB and LB LP", same_integer_value_LB_and_LB_LP)
#print("---num same integer values inc and LB LP", same_integer_value_inc_and_LB_LP)
#print("---num destroy variables", len(destroy_variables))
if len(destroy_variables) > neighborhood_size:
destroy_variables = random.sample(destroy_variables, neighborhood_size)
#print("num of variables selected by LB relaxation", len(destroy_variables), "with LP obj =", LB_LP_relaxation_log_entry['primal_bound'])
info = dict()
#"num_LB_relax_integer"
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
info["num_ori_relax_integer"] = original_integer
info["num_LB_relax_integer"] = LB_integer
info["num_both_integer"] = both_integer
return destroy_variables, info
else:
status, log_entry = scip_solve(local_branching_mip,
incumbent_solution = incumbent_solution,
primal_bound = LNS_log[-1]['primal_bound'],
prev_LNS_log = LNS_log[-1],
scip_config = scip_solve_local_branching_config,
get_num_solutions = get_num_solutions)
local_branching_solution = log_entry['best_primal_sol']
LB_LP_relaxation_status, LB_LP_relaxation_log_entry = get_LP_relaxation_solution(local_branching_mip)
#original_LP_relaxation_solution = original_LP_relaxation_log_entry['best_primal_sol']
if LB_LP_relaxation_log_entry is None:
original_LP_relaxation_solution = local_branching_solution
LB_LP_relaxation_solution = local_branching_solution
else:
original_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
LB_LP_relaxation_solution = LB_LP_relaxation_log_entry['best_primal_sol']
tmp_observation = dict()
tmp_observation["selected_by_LB"] = []
tmp_observation["selected_by_LB_relax"] = []
same_integer_value_inc_and_LB_LP = 0
same_integer_value_LB_and_LB_LP = 0
all_variables = local_branching_mip.getVars()
for v in all_variables:
if v.name in all_int_variables:
v_value = incumbent_solution.value(v)
v_LB_value = local_branching_solution.value(v)
v_LB_LP_value = LB_LP_relaxation_solution.value(v)
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) == round(v_LB_value):
same_integer_value_LB_and_LB_LP += 1
if round(v_LB_LP_value) == round(v_value):
same_integer_value_inc_and_LB_LP += 1
if heuristic.endswith("RELAXATION"):
print("---selecting using LP relaxation")
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) != round(v_value):
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
else:
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
#tmp_observation.append((v.name, v_value, v_LB_value, v_LB_LP_value))
else:
if round(v_LB_value) == round(v_value): continue
#destroy_variables.append(v.getIndex())
destroy_variables.append(v.name)
tmp_observation["selected_by_LB"].append((v.name, v_value, v_LB_value, v_LB_LP_value))
if isInteger(v_LB_LP_value):
if round(v_LB_LP_value) != round(v_value):
#destroy_variables.append(v.getIndex())
tmp_observation["selected_by_LB_relax"].append((v.name, v_value, v_LB_value, v_LB_LP_value))
else:
#destroy_variables.append(v.getIndex())
#destroy_variables.append(v.name)
tmp_observation["selected_by_LB_relax"].append((v.name, v_value, v_LB_value, v_LB_LP_value))
#print("---num same integer values LB and LB LP", same_integer_value_LB_and_LB_LP)
#print("---num same integer values inc and LB LP", same_integer_value_inc_and_LB_LP)
#print("num of variables selected by LB", len(destroy_variables), "with obj =", log_entry['primal_bound'], "runtime =", log_entry['solving_time'])
#print("selected by LB =", tmp_observation["selected_by_LB"])
#print("selected by LB relax=", tmp_observation["selected_by_LB_relax"])
assert(heuristic.endswith("RELAXATION") or len(destroy_variables) <= neighborhood_size)
info = dict()
info["LB_primal_solution"] = log_entry["primal_bound"]
info["LB_gap"] = status
info["LB_LP_relaxation_solution"] = LB_LP_relaxation_solution
if get_num_solutions > 1:
info["multiple_solutions"] = copy.deepcopy(log_entry['var_index_to_values'])
info["multiple_primal_bounds"] = copy.deepcopy(log_entry['primal_bounds'])
#return random.sample(all_int_variables, neighborhood_size)
return destroy_variables, info
def create_sub_mip(model, destroy_variables, incumbent_solution, local_branching_distance = None, mute = False):
sub_mip = pyscipopt.Model(sourceModel = model, origcopy = True)
num_free_variables = 0
all_variables = sub_mip.getVars()
if len(destroy_variables) > 0:
if type(destroy_variables[0]) == type("string"):
destroy_variables_name = copy.deepcopy(destroy_variables)
else:
destroy_variables_name = [v.name for v in model.getVars() if v.getIndex() in destroy_variables]
else:
destroy_variables_name = []
variables_equal_one = []
variables_equal_zero = []
for v in all_variables:
if not (v.name in destroy_variables_name):
if not (v.vtype() in ["BINARY", "INTEGER"]):
continue
fixed_value = incumbent_solution.value(v)
sub_mip.chgVarLb(v, fixed_value)
sub_mip.chgVarLbGlobal(v, fixed_value)
sub_mip.chgVarUb(v, fixed_value)
sub_mip.chgVarUbGlobal(v, fixed_value)
#sub_mip.addCons(v >= fixed_value)
else:
assert v.vtype() in ["BINARY", "INTEGER"], "destroy variable %s not binary is instead %s"%(v.name, v.vtype())
v_value = incumbent_solution.value(v)
if round(v_value) == 1:
variables_equal_one.append(v)
else:
variables_equal_zero.append(v)
num_free_variables += 1
if not mute:
print("num_free_variables =", num_free_variables)
if not (local_branching_distance is None):
if not mute:
print("added local branching constraint in sub-mip")
sub_mip.addCons(quicksum(v for v in variables_equal_zero) + quicksum( (1-v) for v in variables_equal_one) <= local_branching_distance)
return sub_mip
def get_bipartite_graph_representation(m, model): #m is a ecole mip model
model = m.as_pyscipopt()
bg = nx.DiGraph()
#don't know why ecole.observation.NodeBipartite() won't work properly
#implementing my own get_bipartite_graph_representation()
var_name_to_index = dict()
for var in model.getVars():
var_name_to_index[var.name] = var.getIndex()
num_var = model.getNVars()
num_cons = model.getNConss()
for i in range(num_var):
bg.add_node(i)
bg.nodes[i]['bipartite'] = 0
for i in range(num_cons):
bg.add_node(i+num_var)
bg.nodes[i+num_var]['bipartite'] = 1
all_constraints = model.getConss()
for i, cons in enumerate(all_constraints):
var_in_cons = model.getValsLinear(cons)
for key, value in var_in_cons.items():
var_index = var_name_to_index[key]
bg.add_edge(var_index, i + num_var)
all_variables = list(model.getVars())
variables_to_nodes = dict()
for i, feat_dict in bg.nodes(data = True):
if i < len(all_variables):
#assert(i == all_variables[i].getIndex())
feat_dict.update({"scip_variable": all_variables[i].name})
variables_to_nodes.update({all_variables[i].name: i})
else:
break
for u, v in bg.edges():
assert(bg.nodes[u]['bipartite'] == 0)
assert(bg.nodes[v]['bipartite'] == 1)
return bg, variables_to_nodes
def print_log_entry_to_file(save_to_file, LNS_log):
with open(save_to_file, "wb") as f:
for log_entry in LNS_log:
log_entry.pop('best_primal_sol', None)
log_entry.pop('best_primal_scip_sol', None)
pickle.dump(LNS_log, f)
def extract_root_features(m, args, id):
m.disable_presolve()
observation, khalil = make_obs((bgo.BipartiteGraphObservations(), ecole.observation.Khalil2016(pseudo_candidates = True)), m, branching = False)
extract_end_time = time.monotonic()
branching_vars = np.array([i for i in range(observation.column_features.shape[0])])
observation.add_khalil_features(khalil, branching_vars)
return observation
def load_policy_from_checkpoint(args):
policy = GNNPolicy(args.gnn_type)
try:
ckpt = torch.load(args.model, map_location=DEVICE)
try_again = False
except Exception as e:
print("Checkpoint " + args.model + " not found, bailing out: " + str(e))
sys.exit(1)
policy.load_state_dict(ckpt.state_dict())
#policy = policy.to(DEVICE)
#model_version = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("Loaded checkpoint")
print(f"Will run evaluation on {DEVICE} device", flush=True)
return policy
def get_perturbed_samples(args, model, destroy_variables, LNS_log, scip_solve_destroy_config, new_improvement, num_of_samples_to_generate, int_var):
var_name_to_index = dict()
fixed_variables = []
for i, var in enumerate(int_var):
var_name_to_index[var.name] = i
if not (var.name in destroy_variables):
fixed_variables.append(var.name)
primal_bound = LNS_log[-1]['primal_bound']
objective_sense = model.getObjectiveSense()
obj_sense = 1 if objective_sense == "minimize" else -1
collected_samples = []
primal_bounds = []
negative_labels = []
#embed()
for num_of_replaced_variables in range(5, len(destroy_variables)-1, 5):
no_negative_sample = 0
for t in range(90):
perturbed_destroy_variables = random.sample(destroy_variables, len(destroy_variables) - num_of_replaced_variables) + random.sample(fixed_variables, num_of_replaced_variables)
sub_mip = create_sub_mip(model, perturbed_destroy_variables, LNS_log[-1]['best_primal_sol'], mute = True)
scip_solve_destroy_config = {
'limits/time' : 240, # 240 for facilities 120 for the others
}
status, log_entry = scip_solve(sub_mip, incumbent_solution = LNS_log[-1]['best_primal_scip_sol'],
primal_bound = LNS_log[-1]['primal_bound'], scip_config = scip_solve_destroy_config, timer = None, prev_LNS_log = LNS_log[-1], mute = True)
improvement = abs(primal_bound - log_entry["primal_bound"])
improved = (obj_sense * (primal_bound - log_entry["primal_bound"]) > 1e-5)
new_primal_bound = log_entry["primal_bound"]
if (not improved) or (improvement < 0.05 * new_improvement):
print(f"Found negative samples with {num_of_replaced_variables} replaced, primal bound = {primal_bound}, new primal bound = {new_primal_bound}")
negative_sample = [0] * len(int_var)
for var_name in perturbed_destroy_variables:
negative_sample[var_name_to_index[var_name]] = 1
collected_samples.append(negative_sample)
primal_bounds.append((log_entry["primal_bound"], primal_bound))
negative_labels.append(improvement)
no_negative_sample = 0
else:
no_negative_sample += 1
if no_negative_sample >= 10:
print(f"No negative samples for 10 consecutive samples with {num_of_replaced_variables} variables replaced")
break
#print(f"This is not negative samples, primal bound = {primal_bound}, new primal bound = {new_primal_bound}")
if len(collected_samples) == num_of_samples_to_generate:
return collected_samples, primal_bounds, negative_labels
return collected_samples, primal_bounds, negative_labels
def run_LNS(m, args, id):
# m: ecole.scip.model, a mip model from ecole
instance_id = m
if type(instance_id) == int:
loader = InstanceLoader(presolve = args.presolve, competition_settings = False)
for i, _m in enumerate(loader.load(args.problem_set)):
if i == instance_id:
m = _m
break
observation = None
if (args.mode in ["COLLECT", "TEST_ML"]) or ("TEST_ML" in args.mode):
print("Initializing Ecole for feature extraction...This might take a few minutes")
observation = extract_root_features(m, args, id)
if type(instance_id) == int:
loader = InstanceLoader(presolve = args.presolve, competition_settings = False)
for i, _m in enumerate(loader.load(args.problem_set)):
if i == instance_id:
m = _m
break
model = m.as_pyscipopt()
int_var = [v for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
num_int_var = len(int_var) # currently only considering binary variables
args_neighborhood_size = args.neighborhood_size
if args.neighborhood_size == 0:
args.neighborhood_size = int(num_int_var * 0.2)
collection_local_branching_runtime = COLLECT_SOLVE_TIME_LIMIT
neighborhood_size = args.neighborhood_size
destroy_heuristic = args.destroy_heuristic
objective_sense = model.getObjectiveSense()
obj_sense = 1 if objective_sense == "minimize" else -1
print("Problem:",args.problem_set, instance_id)
print("Using destroy heuristics:", destroy_heuristic)
print("Neighborhood size:", neighborhood_size)
print("Preprocessing...")
if "VANILLA" in args.destroy_heuristic:
scip_log = run_vanilla_scip(model, args)
if args.save_log == 1:
print_log_entry_to_file("tmp/log/%s_%s_nhsize%d.txt"%(id, destroy_heuristic, 0), scip_log)
return
bg ,variables_to_nodes = get_bipartite_graph_representation(m, model)
if args.mode == "COLLECT" or args.collect_along_test == 1:
db = BipartiteGraphDataset(args.data_loc + "%s_%d.db"%(args.problem_set, instance_id))
#LB_relaxation_history = []
#incumbent_history = []
# find initial solution with SCIP
scip_solve_init_config = {
'limits/solutions' :10000,
'limits/time' : 610 if "MIPLIB" in args.problem_set else args.init_time_limit,
}
# scip_solve_init_config['limits/time'] = 300
timer = None
status, log_entry = scip_solve(model, scip_config = scip_solve_init_config, timer = timer,
get_initial_solution = True, isMIPLIB = "MIPLIB" in args.problem_set)
if log_entry is None:
print('Did not find incumbent solution for MIP: skipping this instance; try a longer runtime')
return
else:
print("initial solution obj =", log_entry['primal_bound'], "found in time", log_entry['solving_time'])
log_entry['limits/time'] = scip_solve_init_config['limits/time']
LNS_log = [log_entry]
improved = True
runtime_used = log_entry['solving_time']
count_no_improve = 0
print("solving steps limit =", args.num_solve_steps)
# initialize incumbent_history with the initial solution
if args.mode == "COLLECT" or "TEST_ML" in args.mode:
incumbent_solution = []
incumbent_history = []
improvement_history = []
LB_relaxation_history = []
for var in int_var:
incumbent_solution.append(log_entry["var_index_to_value"][var.name])
incumbent_history.append(incumbent_solution)
if "TEST_ML" in args.mode:
policy = load_policy_from_checkpoint(args)
policy = policy.to(DEVICE)
if "feat1" in args.mode:
observation.column_features[:, 23:] = torch.zeros(observation.column_features.shape[0], observation.column_features.shape[1]-23)
observation.column_features = torch.hstack((observation.column_features, torch.zeros(observation.column_features.shape[0], args.wind_size*3)))
#embed()
not_switched = True
if args.ml_neighborhood_size == 0:
args.ml_neighborhood_size = args.neighborhood_size
for s in range(args.num_solve_steps):
iteration_start_time = time.monotonic()
incumbent_solution = LNS_log[-1]['best_primal_scip_sol']
primal_bound = LNS_log[-1]['primal_bound']
ML_info = None
if "TEST_ML" in args.mode:
ML_info = (policy, observation, incumbent_history, LB_relaxation_history)
destroy_variables, info_destroy_heuristic = create_neighborhood_with_heuristic(model, LNS_log,
neighborhood_size = neighborhood_size, bipartite_graph = bg, variables_to_nodes = variables_to_nodes,
heuristic = destroy_heuristic, num_samples = args.num_samples, eps_clip = args.eps_clip,
ML_info = ML_info, original_neighborhood_size = args.ml_neighborhood_size, ## alert!!!
get_num_solutions = 20 if args.mode == "COLLECT" else 1)
#print("destroy variables =", destroy_variables)
if "CONSTRAINTED_REPAIR" in args.destroy_heuristic:
sub_mip = create_sub_mip(model, destroy_variables, LNS_log[-1]['best_primal_sol'], local_branching_distance = args.neighborhood_size)
else:
sub_mip = create_sub_mip(model, destroy_variables, LNS_log[-1]['best_primal_sol'])
#print("sub mip created =", sub_mip)
scip_solve_destroy_config = {
'limits/time' : 120,
}
if args.mode == "COLLECT":
scip_solve_destroy_config['limits/time'] = collection_local_branching_runtime
status, log_entry = scip_solve(sub_mip, incumbent_solution = incumbent_solution,
primal_bound = LNS_log[-1]['primal_bound'], scip_config = scip_solve_destroy_config, timer = timer, prev_LNS_log = LNS_log[-1])
iteration_end_time = time.monotonic()
log_entry["iteration_time"] = iteration_end_time - iteration_start_time
log_entry["selection_time"] = log_entry["iteration_time"] - log_entry["solving_time"]
if "ML" in args.mode and "ML" in destroy_heuristic:
log_entry["ML_time"] = info_destroy_heuristic["ML_time"]
else:
log_entry["ML_time"] = 0
log_entry["destroy_variables"] = destroy_variables
log_entry["destroy_heuristic"] = destroy_heuristic
log_entry["neighborhood_size"] = neighborhood_size
if info_destroy_heuristic and "num_LB_relax_integer" in info_destroy_heuristic:
log_entry["num_LB_relax_integer"] = info_destroy_heuristic["num_LB_relax_integer"]
if info_destroy_heuristic and "num_ori_relax_integer" in info_destroy_heuristic:
log_entry["num_ori_relax_integer"] = info_destroy_heuristic["num_ori_relax_integer"]
if info_destroy_heuristic and "num_both_integer" in info_destroy_heuristic:
log_entry["num_both_integer"] = info_destroy_heuristic["num_both_integer"]
improvement = abs(primal_bound - log_entry["primal_bound"])
improved = (obj_sense * (primal_bound - log_entry["primal_bound"]) > 1e-5)
if improved == False:
if round(neighborhood_size * args.adaptive) < round(num_int_var * 0.5):
neighborhood_size = round(neighborhood_size * args.adaptive)
count_no_improve = 0
else:
neighborhood_size = round(num_int_var * 0.5)
count_no_improve += 1
if "GREEDY" in destroy_heuristic:
destroy_heuristic = destroy_heuristic.replace("GREEDY", "SAMPLE")
else:
count_no_improve = 0
LNS_log.append(log_entry)
if "TEST_ML" in args.mode and improved == True:
LB_relaxation_solution = []
incumbent_solution = []
relaxation_value = info_destroy_heuristic["LB_LP_relaxation_solution"]
for var in int_var:
LB_relaxation_solution.append(relaxation_value.value(var))
incumbent_solution.append(log_entry["var_index_to_value"][var.name])
LB_relaxation_history.append(LB_relaxation_solution)
incumbent_history.append(incumbent_solution)
improvement_history.append(improvement)
if (args.mode == "COLLECT" and improved == True) or (args.collect_along_test == 1 and s % STEP_PER_COLLECT == 0):
if args.collect_along_test == 1:
destroy_variables, info_destroy_heuristic = create_neighborhood_with_heuristic(model, LNS_log[:-1],
neighborhood_size = args.neighborhood_size if args.ml_neighborhood_size == 0 else args.ml_neighborhood_size,
bipartite_graph = bg, variables_to_nodes = variables_to_nodes,
heuristic = "LOCAL_BRANCHING", num_samples = args.num_samples, eps_clip = args.eps_clip,
ML_info = ML_info, original_neighborhood_size = args.neighborhood_size if args.ml_neighborhood_size == 0 else args.ml_neighborhood_size,
get_num_solutions = 20)
print("destroy variables =", destroy_variables)
assert info_destroy_heuristic is not None
relaxation_value = info_destroy_heuristic["LB_LP_relaxation_solution"]
assert relaxation_value is not None
#candidate_scores = [0] * num_int_var
candidate_scores = []
LB_relaxation_solution = []
incumbent_solution = []
for var in int_var:
if var.name in destroy_variables:
candidate_scores.append(1)
else:
candidate_scores.append(0)
LB_relaxation_solution.append(relaxation_value.value(var))
incumbent_solution.append(log_entry["var_index_to_value"][var.name])
new_improvement = abs(primal_bound - info_destroy_heuristic["LB_primal_solution"])
new_improved = (obj_sense * (primal_bound - info_destroy_heuristic["LB_primal_solution"]) > 1e-5)
if args.mode == "COLLECT" or (args.collect_along_test == 1 and improved == False and new_improved == True):
LB_relaxation_history.append(LB_relaxation_solution)
incumbent_history.append(incumbent_solution)
improvement_history.append(improvement)
negative_samples, negative_info, negative_labels = get_perturbed_samples(args, model, destroy_variables, LNS_log[:-1], scip_solve_destroy_config, new_improvement, 90, int_var)
candidates = [str(var.name) for var in int_var]
candidate_choice = None
info = dict()
positive_samples = []
positive_labels = []
for i in range(len(info_destroy_heuristic["multiple_primal_bounds"])):
positive_sample = [0] * len(int_var)
for j, var in enumerate(int_var):
positive_sample[j] = info_destroy_heuristic["multiple_solutions"][var.name][i]
positive_samples.append(positive_sample)
obj_info = info_destroy_heuristic["multiple_primal_bounds"][i]
positive_labels.append( abs(obj_info[0] - obj_info[1]))
info["num_positive_samples"] = len(positive_samples)
info["positive_samples"] = positive_samples
info["positive_labels"] = positive_labels
info["num_negative_samples"] = len(negative_samples)
info["negative_samples"] = negative_samples
info["negative_labels"] = negative_labels
info["#iteration"] = s
info["instance_id"] = id
info["incumbent_history"] = incumbent_history
info["LB_relaxation_history"] = LB_relaxation_history
info["neighborhood_size"] = args.neighborhood_size
info["LB_gap"] = info_destroy_heuristic["LB_gap"]
info["primal_bound"] = log_entry["primal_bound"] if args.mode == "COLLECT" else info_destroy_heuristic["LB_primal_solution"]
info["LB_runtime"] = log_entry["iteration_time"]
candidate_scores = torch.LongTensor(np.array(candidate_scores, dtype = np.int32))
graph = BipartiteGraph(observation.row_features, observation.edge_features.indices,
observation.edge_features.values, observation.column_features[:,:95],
candidates, candidate_choice, candidate_scores, info,
iteration = i, instance_id = id, incumbent_history = incumbent_history, LB_relaxation_history = LB_relaxation_history, improvement_history = improvement_history,
neighborhood_size = neighborhood_size)
if args.mode == "COLLECT" or (args.collect_along_test == 1 and new_improved == True):
assert len(LB_relaxation_history) + 1 == len(incumbent_history)
assert len(LB_relaxation_history) > 0
rslt = db.add(graph)
if not rslt:
print("Skipping duplicate datapoint")
else:
print("Saving to database")
if (improved == False and args.collect_along_test == 1 and new_improved == True):
LB_relaxation_history.pop()
incumbent_history.pop()
improvement_history.pop()
runtime_used += log_entry['iteration_time']
print("Finished LNS iteration #%d: obj_val = %.2f with time %.2f (total time used %.2f)" % (s, log_entry['primal_bound'], log_entry['iteration_time'], runtime_used))# -log_entry["ML_time"]))
if runtime_used >= args.time_limit: break
if args.save_log == 1:
print_log_entry_to_file("tmp/log/%s_%s_nhsize%d.txt"%(id, args.destroy_heuristic, args_neighborhood_size), LNS_log)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", default=0, type = int,
help="random seed")
parser.add_argument("--problem-set", default="INDSET_test",
help="Problem set")
parser.add_argument("--adaptive", default=1, type = float,
help = "adaptive neighborhood size")
parser.add_argument("--num-solve-steps", default=100000, type=int,
help="Number of LNS iterations")
parser.add_argument("--neighborhood-size", default=100, type=int,
help="Upper bound on the neighborhood size")
parser.add_argument("--ml-neighborhood-size", default=0, type=int,
help="ML neighborhood size")
parser.add_argument("--eps-clip", default=0.05, type=float,
help="Clipping on LB_relax::RS probablity, will affect actual neighborhood size")
parser.add_argument("--time-limit", default=3600, type=int,
help="Time limit per instance")
parser.add_argument("--init-time-limit", default=10, type=int,
help="Initial solution time limit")
parser.add_argument("--destroy-heuristic", default="RANDOM", type=str,
help="Destroy heuristics: RANDOM, LOCAL_BRANCHING, LOCAL_BRANCHING::RELAXATION, VARIABLE")
parser.add_argument("--mode", default="TEST", type=str,
help="Solving mode: COLLECT, TEST, TEST_ML")
parser.add_argument("--gnn-type", default="gat", type=str,
help="GNN type: gasse or gat")
parser.add_argument("--model", default=None, type=str,
help="Path to the ML model")
parser.add_argument("--num-samples", default=30, type=int,
help="Number of samples with sample-and-select-best heuristics")
parser.add_argument("--save-log", default=0, type = int,
help="save log (1) or not (0)")
parser.add_argument("--collect-along-test", default=0, type=int,
help="collect data along the trajectory generated by this one")
parser.add_argument("--wind-size", default=3, type = int,
help="window size = the number of past incumbent features in features")
parser.add_argument("--presolve", default=False, type = bool,
help="presolve or not")
args = parser.parse_args()
WIND_SIZE = args.wind_size
if args.mode == "COLLECT" or args.collect_along_test == 1:
if args.mode == "COLLECT":
args.destroy_heuristic = "LOCAL_BRANCHING"
try:
os.mkdir("training_data")
except OSError as error:
print(error)
try:
os.mkdir("training_data/" + args.problem_set)
except OSError as error:
print(error)
args.data_loc = "training_data/" + args.problem_set + "/"
print(args)
random.seed(args.seed)
loader = InstanceLoader(presolve = args.presolve, competition_settings = False) # default False if change presolve here, also
if args.destroy_heuristic == "VANILLA":
args.adaptive = 1
for i, m in enumerate(loader.load(args.problem_set)):
model = m.as_pyscipopt()
#all_int_variables = [v.getIndex() for v in model.getVars() if v.vtype() in ["BINARY", "INTEGER"]]
name = args.problem_set + str(i)
if args.adaptive > 1:
name = args.problem_set + str(round(args.adaptive*100)) + "_" + str(i)
if args.mode == "COLLECT" or args.collect_along_test == 1:
name += "COLLECT"
run_LNS(i, args, id = name)
print("Finish LNS for MIP solving")
| CL-LNS-main | LNS.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pyscipopt as scip
# Wrap is the scip solver under a common API
class ScipSolver:
def __init__(self, timeout_s=None):
self.constraints = []
self.maximize = True
self.timeout = timeout_s
self.model = scip.Model()
def create_integer_var(self, name, lower_bound, upper_bound):
v = self.model.addVar(name=name, lb=lower_bound, ub=upper_bound, vtype="I")
return v
def create_real_var(self, name, lower_bound, upper_bound):
v = self.model.addVar(name=name, lb=lower_bound, ub=upper_bound, vtype="C")
return v
def create_binary_var(self, name):
v = self.model.addVar(name=name, vtype="B")
return v
def set_objective_function(self, equation, maximize=True):
self.model.setObjective(equation)
if maximize:
self.model.setMaximize()
else:
self.model.setMinimize()
def add_constraint(self, cns):
self.model.addCons(cns)
def disable_presolver(self):
self.model.setPresolve(scip.SCIP_PARAMSETTING.OFF)
self.model.setBoolParam("lp/presolving", False)
def disable_cuts(self):
self.model.setSeparating(scip.SCIP_PARAMSETTING.OFF)
def disable_heuristics(self):
self.model.setHeuristics(scip.SCIP_PARAMSETTING.OFF)
def solve(self):
# Solve the problem. Return the result as a dictionary of values
# indexed by the corresponding variables or an empty dictionary if the
# problem is infeasible.
if self.timeout:
self.model.setParam('limits/time', self.timeout)
self.model.optimize()
sol = None
if self.model.getNSols() > 0:
sol = self.model.getBestSol()
return sol
def primal_dual_gap(self):
return (self.model.getObjVal(), self.model.getDualbound())
def primal_dual_integral(self):
# TBD
return None
def load(self, mps_filename):
self.model.readProblem(mps_filename)
def export(self, lp_output_filename):
assert False
def as_scip_model(self):
return self.model
| CL-LNS-main | scip_solver.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import torch.nn.init as init
#from neural_nets import prenorm
# GINConv network derived from https://arxiv.org/abs/1810.00826
# Added the ability to embed edge information as well.
class GINConv(torch_geometric.nn.MessagePassing):
def __init__(self, eps: float = 0.5, train_eps: bool = True):
#kwargs.setdefault('aggr', 'add')
#super(GINEConv, self).__init__(**kwargs)
super().__init__('add')
emb_size = 64
#self.final_norm = prenorm.Prenorm(emb_size, shift=False)
#self.feature_module_final = torch.nn.Sequential(
# self.final_norm,
# torch.nn.ReLU(),
# torch.nn.Linear(emb_size, emb_size)
#)
#self.feature_module_final = torch.nn.ReLU()
#self.post_conv_module = prenorm.Prenorm(emb_size, shift=False)
# output_layers
self.output_module = torch.nn.Sequential(
torch.nn.Linear(emb_size, emb_size * 2),
torch.nn.ReLU(),
torch.nn.Linear(emb_size * 2, emb_size * 4),
torch.nn.ReLU(),
torch.nn.Linear(emb_size * 4, emb_size * 2),
torch.nn.ReLU(),
torch.nn.Linear(emb_size * 2, emb_size),
)
#self.nn = nn
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def reset_parameters(self):
for t in self.parameters():
if len(t.shape) == 2:
init.orthogonal_(t)
else:
init.normal_(t)
self.eps.data.fill_(self.initial_eps)
# def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
# edge_attr: OptTensor = None, size: Size = None) -> Tensor:
# """"""
# if isinstance(x, Tensor):
# x: OptPairTensor = (x, x)
# # Node and edge feature dimensionalites need to match.
# if isinstance(edge_index, Tensor):
# assert edge_attr is not None
# assert x[0].size(-1) == edge_attr.size(-1)
# elif isinstance(edge_index, SparseTensor):
# assert x[0].size(-1) == edge_index.size(-1)
# # propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
# out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
# x_r = x[1]
# if x_r is not None:
# out += (1 + self.eps) * x_r
# return self.nn(out)
#def message(self, x_j: Tensor, edge_attr: Tensor) -> Tensor:
#return F.relu(x_j + edge_attr)
def freeze_normalization(self):
pass
#self.final_norm.freeze_normalization()
#self.post_conv_module.freeze_normalization()
def forward(self, left_features, edge_indices, edge_features, right_features):
"""
This method sends the messages, computed in the message method.
"""
output = self.propagate(edge_indices, size=(left_features.shape[0], right_features.shape[0]),
node_features=(left_features, right_features), edge_features=edge_features)
output += (1 + self.eps) * right_features
return self.output_module(output)
def message(self, node_features_j, edge_features):
output = torch.nn.functional.relu(node_features_j + edge_features)
return output
| CL-LNS-main | neural_nets/gin_convolution.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import torch.nn.init as init
from neural_nets import prenorm
# GATConvolution network derived https://arxiv.org/abs/2105.14491
# Added edge embedding as well
class GATConvolution(torch_geometric.nn.MessagePassing):
"""
Graph convolution layer. THis is the heart of our GNNPolicy
"""
def __init__(self,
negative_slope: float = 0.2, dropout: float = 0.,
**kwargs):
super().__init__('add')
emb_size = 64
self.heads = 8
self.in_channels = emb_size
self.out_channels = emb_size // self.heads
self.negative_slope = negative_slope
self.dropout = dropout
self.lin_l = torch.nn.Linear(self.in_channels, self.heads * self.out_channels, bias=True)
self.lin_r = torch.nn.Linear(self.in_channels, self.heads * self.out_channels, bias=True)
self.att = torch.nn.Parameter(torch.Tensor(1, self.heads, self.out_channels * 3))
# output_layer
self.output_module = torch.nn.Sequential(
torch.nn.Linear(2*emb_size, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
)
self.reset_parameters()
def reset_parameters(self):
init.orthogonal_(self.lin_l.weight)
init.orthogonal_(self.lin_r.weight)
init.orthogonal_(self.att)
def freeze_normalization(self):
pass
def reset_normalization(self):
pass
@property
def frozen(self):
return False
def forward(self, left_features, edge_indices, edge_features, right_features):
"""
This method sends the messages, computed in the message method.
"""
H, C = self.heads, self.out_channels
x_l = self.lin_l(left_features)
x_r = self.lin_r(right_features)
out = self.propagate(edge_indices, x=(x_l, x_r), size=(left_features.shape[0], right_features.shape[0]), edge_features=edge_features)
return self.output_module(torch.cat([out, right_features], dim=-1))
def message(self, x_j, x_i,
index,
edge_features):
x = torch.cat([x_i, x_j, edge_features], dim=-1)
x = torch.nn.functional.leaky_relu(x, self.negative_slope)
x = x.view(-1, self.heads, self.out_channels * 3)
alpha = (x * self.att).sum(dim=-1)
alpha = torch_geometric.utils.softmax(alpha, index)
alpha = torch.nn.functional.dropout(alpha, p=self.dropout, training=self.training)
x = x_j.view(-1, self.heads, self.out_channels) * alpha.unsqueeze(-1)
return x.view(-1, self.heads * self.out_channels)
| CL-LNS-main | neural_nets/gat_convolution.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class LogScoreLoss(torch.nn.Module):
"""
Loss function to weight sample loss by confidence in the target value
"""
def __init__(self):
super().__init__()
self.register_buffer("eps", torch.tensor([1e-6]))
def weight(self, input, target):
max_tgt = torch.max(target, dim=-1, keepdim=True).values
return torch.maximum(input, target) / max_tgt
def forward(self, input, target):
# Avoid division by zero
target = torch.maximum(target, self.eps)
main_loss = torch.log(input / target).abs()
# Handle predictions smaller than eps
neg_domain = (input / target - self.eps).abs() + torch.log(self.eps).abs()
loss = torch.where(input / target < self.eps, neg_domain, main_loss)
assert not torch.isnan(loss).any()
weighted = loss * self.weight(input, target)
assert not torch.isnan(weighted).any()
return weighted.mean()
class LinearScoreLoss(torch.nn.Module):
"""
Loss function to weight sample loss by confidence in the target value
"""
def __init__(self):
super().__init__()
self.register_buffer("eps", torch.tensor([1e-6]))
def weight(self, input, target):
max_tgt = torch.max(target, dim=-1, keepdim=True).values
return torch.maximum(input, target) / max_tgt
def forward(self, input, target):
# Avoid division by zero
target = torch.maximum(target, self.eps)
loss = (input - target).abs() / target
weighted = loss * self.weight(input, target)
return weighted.mean()
| CL-LNS-main | neural_nets/losses.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn.init as init
from neural_nets import gat_convolution
from neural_nets import gin_convolution
from neural_nets import gasse_convolution
from neural_nets import prenorm
# Implements the branching policy described in
# https://papers.nips.cc/paper/2019/hash/d14c2267d848abeb81fd590f371d39bd-Abstract.html
class GNNPolicy(torch.nn.Module):
def __init__(self, gnn_type="gasse"):
super().__init__()
emb_size = 64
cons_nfeats = 10
edge_nfeats = 2
var_nfeats = 104 # hard-coded no good
# Constraint embedding
self.cons_norm = prenorm.Prenorm(cons_nfeats)
self.cons_embedding = torch.nn.Sequential(
self.cons_norm,
torch.nn.Linear(cons_nfeats, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
torch.nn.ReLU(),
)
# Edge embedding
self.edge_norm = prenorm.Prenorm(edge_nfeats)
self.edge_embedding = torch.nn.Sequential(
self.edge_norm,
torch.nn.Linear(edge_nfeats, emb_size),
)
#self.edge_embedding = torch.nn.Linear(edge_nfeats, emb_size)
# Variable embedding
self.var_norm = prenorm.Prenorm(var_nfeats, preserve_features=[2])
self.var_embedding = torch.nn.Sequential(
self.var_norm,
torch.nn.Linear(var_nfeats, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
torch.nn.ReLU(),
)
if gnn_type == "gasse":
self.conv_v_to_c = gasse_convolution.GasseConvolution()
self.conv_c_to_v = gasse_convolution.GasseConvolution()
elif gnn_type == "gin":
self.conv_v_to_c = gin_convolution.GINConv()
self.conv_c_to_v = gin_convolution.GINConv()
else:
self.conv_v_to_c = gat_convolution.GATConvolution()
self.conv_c_to_v = gat_convolution.GATConvolution()
self.output_module = torch.nn.Sequential(
torch.nn.Linear(emb_size, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, 1, bias=False),
)
self.reset_parameters()
def reset_parameters(self):
for t in self.parameters():
if len(t.shape) == 2:
init.orthogonal_(t)
else:
init.normal_(t)
def freeze_normalization(self):
if not self.cons_norm.frozen:
self.cons_norm.freeze_normalization()
self.edge_norm.freeze_normalization()
self.var_norm.freeze_normalization()
self.conv_v_to_c.reset_normalization()
self.conv_c_to_v.reset_normalization()
return False
if not self.conv_v_to_c.frozen:
self.conv_v_to_c.freeze_normalization()
self.conv_c_to_v.reset_normalization()
return False
if not self.conv_c_to_v.frozen:
self.conv_c_to_v.freeze_normalization()
return False
return True
def forward(self, constraint_features, edge_indices, edge_features, variable_features):
reversed_edge_indices = torch.stack([edge_indices[1], edge_indices[0]], dim=0)
# First step: linear embedding layers to a common dimension (64)
constraint_features = self.cons_embedding(constraint_features)
edge_features = self.edge_embedding(edge_features)
variable_features = self.var_embedding(variable_features)
# Two half convolutions
constraint_features = self.conv_v_to_c(variable_features, reversed_edge_indices, edge_features, constraint_features)
variable_features = self.conv_c_to_v(constraint_features, edge_indices, edge_features, variable_features)
# A final MLP on the variable features
output = self.output_module(variable_features).squeeze(-1)
return output
| CL-LNS-main | neural_nets/gnn_policy.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class PrenormOld(torch.nn.Module):
def __init__(self, num_features, shift=True, scale=True, eps=1e-5):
super().__init__()
self.num_features = num_features
self.register_buffer("eps", torch.tensor([eps], requires_grad=False))
self.reset_normalization()
def _check_input_dim(self, input):
if input.dim() != 2:
raise ValueError("expected a 2D input (got {}D input)".format(input.dim()))
def freeze_normalization(self):
self.frozen = torch.tensor([True], dtype=torch.bool)
def reset_normalization(self):
self.register_buffer("running_mean", torch.zeros([num_features]))
self.register_buffer("running_var", torch.ones([num_features]))
self.register_buffer("num_batches_tracked", torch.tensor(0, dtype=torch.long, requires_grad=False))
self.register_buffer("frozen", torch.tensor([False], dtype=torch.bool, requires_grad=False))
# The input format should be [batch, features]
def forward(self, input):
self._check_input_dim(input)
running_mean = self.running_mean
if self.training and not self.frozen:
batch_mean = torch.mean(input, dim=[0])
batch_var = torch.mean((input - batch_mean).pow(2), dim=[0])
if self.num_batches_tracked < 10000:
momentum = (
float(self.num_batches_tracked) / (self.num_batches_tracked + 1)
)
else:
momentum = 0.9999
self.num_batches_tracked += 1
running_mean = (
momentum * self.running_mean + (1.0 - momentum) * batch_mean
).detach()
self.running_mean = running_mean.detach()
self.running_var = (
momentum * self.running_var + (1.0 - momentum) * batch_var
).detach()
return (input - self.running_mean) / torch.sqrt(
torch.max(self.eps, self.running_var)
)
class Prenorm(torch.nn.Module):
def __init__(self, num_features, shift=True, scale=True, preserve_features=[]):
super().__init__()
self.num_features = num_features
self.preserve_features = preserve_features
self.register_buffer("avg", torch.zeros([num_features], dtype=torch.double))
self.register_buffer("var", torch.zeros([num_features], dtype=torch.double))
self.register_buffer("count", torch.zeros([1]))
self.register_buffer("frozen", torch.tensor([False], dtype=torch.bool, requires_grad=False))
if shift:
self.register_buffer("shift", torch.zeros([num_features]))
else:
self.shift = None
if scale:
self.register_buffer("scale", torch.ones([num_features]))
else:
self.scale = None
def freeze_normalization(self):
self.frozen = torch.tensor([True], dtype=torch.bool).detach()
def reset_normalization(self):
self.avg.zero_()
self.var.zero_()
self.count.zero_()
self.count += 1
self.frozen.zero_()
def forward(self, input):
if self.training and not self.frozen:
# Online mean and variance estimation from Chan et al
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
assert len(input.shape) == 2
assert self.num_features == input.shape[-1], f"Expected input dimension of size {self.num_features}, got {input.shape[-1]}."
with torch.no_grad():
assert not torch.isnan(input).any()
assert not torch.isnan(self.var).any()
assert not torch.isnan(self.scale).any()
assert not torch.isnan(self.count).any()
sample_count = float(input.shape[0])
sample_var, sample_avg = torch.var_mean(input.to(torch.float64), dim=0)
assert not torch.isnan(sample_avg).any()
assert not torch.isnan(sample_var).any()
delta = sample_avg - self.avg
assert self.count + sample_count > 0
m2 = (self.var * self.count + sample_var * sample_count + torch.square(delta) * self.count * sample_count / (
self.count + sample_count))
assert not torch.isnan(m2).any()
self.avg = (self.avg * self.count + sample_avg * sample_count) / (self.count + sample_count)
assert not torch.isnan(self.avg).any()
self.count += sample_count
self.var = m2 / self.count
if self.shift is not None:
self.shift = -self.avg.to(torch.float32)
assert not torch.isnan(self.shift).any()
if self.scale is not None:
var = torch.where(torch.eq(self.var, 0), self.var.new_ones([self.num_features]), self.var)
assert not torch.isnan(var).any()
#assert not torch.isinf(var).any()
assert (var > 0).all()
self.scale = torch.rsqrt(var).to(torch.float32)
assert not torch.isnan(self.scale).any()
for f in self.preserve_features:
self.shift[f] = 0.0
self.scale[f] = 1.0
output = input
if self.shift is not None:
output = output + self.shift
if self.scale is not None:
output = output * self.scale
assert not torch.any(torch.isnan(output))
return output
| CL-LNS-main | neural_nets/prenorm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import torch.nn.init as init
from neural_nets import prenorm
# Implements the graph convolution described in
# https://papers.nips.cc/paper/2019/hash/d14c2267d848abeb81fd590f371d39bd-Abstract.html
class GasseConvolution(torch_geometric.nn.MessagePassing):
"""
Graph convolution layer. THis is the heart of our GNNPolicy
"""
def __init__(self):
super().__init__('add')
emb_size = 64
self.feature_module_left = torch.nn.Linear(emb_size, emb_size)
self.feature_module_edge = torch.nn.Linear(emb_size, emb_size, bias=False)
self.feature_module_right = torch.nn.Linear(emb_size, emb_size, bias=False)
self.final_norm = prenorm.Prenorm(emb_size, shift=False)
self.feature_module_final = torch.nn.Sequential(
self.final_norm,
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size)
)
self.post_conv_module = prenorm.Prenorm(emb_size, shift=False)
# output_layers
self.output_module = torch.nn.Sequential(
torch.nn.Linear(2*emb_size, emb_size),
torch.nn.ReLU(),
torch.nn.Linear(emb_size, emb_size),
)
self.reset_parameters()
def reset_parameters(self):
for t in self.parameters():
if len(t.shape) == 2:
init.orthogonal_(t)
else:
init.normal_(t)
def reset_normalization(self):
self.final_norm.reset_normalization()
self.post_conv_module.reset_normalization()
@property
def frozen(self):
return self.final_norm.frozen and self.post_conv_module.frozen
def freeze_normalization(self):
if not self.final_norm.frozen:
self.final_norm.freeze_normalization()
self.post_conv_module.reset_normalization()
return False
if not self.post_conv_module.frozen:
self.post_conv_module.freeze_normalization()
return False
return True
def forward(self, left_features, edge_indices, edge_features, right_features):
"""
This method sends the messages, computed in the message method.
"""
output = self.propagate(edge_indices, size=(left_features.shape[0], right_features.shape[0]),
node_features=(left_features, right_features), edge_features=edge_features)
return self.output_module(torch.cat([self.post_conv_module(output), right_features], dim=-1))
def message(self, node_features_i, node_features_j, edge_features):
output = self.feature_module_final(self.feature_module_left(node_features_i)
+ self.feature_module_edge(edge_features)
+ self.feature_module_right(node_features_j))
return output
| CL-LNS-main | neural_nets/gasse_convolution.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| CL-LNS-main | ml4co/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hypothesis
import hypothesis.strategies as st
import unittest
import torch
import torch.nn.functional as F
from ml4co.ops.split_and_pad import SplitAndPadFunction
def split_and_pad_ref(input, sizes, feature_size=0, padding_value=0):
feature_size = max(feature_size, sizes.max().item())
inputs = input.split(sizes.detach().cpu().tolist())
outputs = [
F.pad(x, (0, feature_size - x.size(0)), "constant", padding_value)
for x in inputs
]
return torch.stack(outputs, dim=0)
class SplitAndPadFunctionTest(unittest.TestCase):
def setUp(self):
self.split_and_pad = SplitAndPadFunction.apply
@hypothesis.given(
batch_size=st.integers(1, 200),
inner_size=st.integers(10, 500),
feature_size=st.sampled_from([0, 500]),
padding_value=st.floats(min_value=-10.0, max_value=10.0),
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]))
@hypothesis.settings(deadline=None)
def test_forward(self, batch_size, inner_size, feature_size, padding_value,
device):
sizes = torch.randint(low=1,
high=inner_size,
size=(batch_size, ),
device=device)
input_size = sizes.sum().item()
x = torch.randn(input_size, device=device)
y = self.split_and_pad(x, sizes, feature_size, padding_value)
y_ref = split_and_pad_ref(x, sizes, feature_size, padding_value)
torch.testing.assert_allclose(y, y_ref)
@hypothesis.given(
batch_size=st.integers(1, 100),
inner_size=st.integers(10, 500),
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]))
@hypothesis.settings(deadline=None)
def test_backward(self, batch_size, inner_size, device):
sizes = torch.randint(low=1,
high=inner_size,
size=(batch_size, ),
device=device)
input_size = sizes.sum().item()
x = torch.randn(input_size, device=device)
x_ref = x.detach().clone()
x.requires_grad_(True)
x_ref.requires_grad_(True)
y = self.split_and_pad(x, sizes)
y_ref = split_and_pad_ref(x_ref, sizes)
dy = torch.randn_like(y)
y.backward(dy)
y_ref.backward(dy)
torch.testing.assert_allclose(x.grad, x_ref.grad)
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | ml4co/ops/split_and_pad_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hypothesis
import hypothesis.strategies as st
import unittest
import torch
from ml4co.ops.prenorm import PrenormFunction
def prenorm_ref(input, running_m0, running_m1, running_m2, eps):
m0 = input.size(0)
m2, m1 = torch.var_mean(input, dim=0, unbiased=False)
n = m0 + running_m0
c = 0 if n == 0 else running_m0 / n
delta = running_m1 - m1
m1 += c * delta
m2 = m2 * m0 + running_m2 + delta.square() * c * m0
scale = (m2 / n + eps).rsqrt()
bias = -scale * m1
return m1, m2, scale, bias
class PrenormFunctionTest(unittest.TestCase):
def setUp(self):
self.prenorm = PrenormFunction.apply
@hypothesis.given(
outer_size=st.integers(2, 100),
inner_size=st.integers(1, 200),
running_m0=st.integers(0, 10),
eps=st.floats(min_value=0, max_value=1e-3),
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]))
@hypothesis.settings(deadline=None)
def test_prenorm(self, outer_size, inner_size, running_m0, eps, device):
x = torch.randn(outer_size, inner_size, device=device)
if running_m0 == 0:
running_m1 = torch.zeros((inner_size, ), device=device)
running_m2 = torch.zeros((inner_size, ), device=device)
else:
running_m1 = torch.randn((inner_size, ), device=device)
running_m2 = torch.randn((inner_size, ), device=device)
m1, m2, scale, bias = self.prenorm(x, running_m0, running_m1,
running_m2, eps)
m1_ref, m2_ref, scale_ref, bias_ref = prenorm_ref(
x, running_m0, running_m1, running_m2, eps)
torch.testing.assert_allclose(m1, m1_ref)
torch.testing.assert_allclose(m2, m2_ref)
torch.testing.assert_allclose(scale, scale_ref)
torch.testing.assert_allclose(bias, bias_ref)
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | ml4co/ops/prenorm_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| CL-LNS-main | ml4co/ops/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
import ml4co
torch.ops.load_library(
os.path.join(os.path.dirname(os.path.dirname(ml4co.__file__)),
"libml4co_ops.so"))
class SplitAndPadFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, sizes, feature_size=0, padding_value=0.0):
ctx.save_for_backward(sizes)
return torch.ops.ml4co_ops.split_and_pad(input, sizes, feature_size,
padding_value)
@staticmethod
def backward(ctx, grad_output):
sizes, = ctx.saved_tensors
grad_input = torch.ops.ml4co_ops.split_and_pad_backward(
grad_output, sizes)
return grad_input, None, None, None
| CL-LNS-main | ml4co/ops/split_and_pad.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
import ml4co
torch.ops.load_library(
os.path.join(os.path.dirname(os.path.dirname(ml4co.__file__)),
"libml4co_ops.so"))
class PrenormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, running_m0, running_m1, running_m2, eps=0.0):
return torch.ops.ml4co_ops.prenorm(input, running_m0, running_m1,
running_m2, eps)
@staticmethod
def backward(ctx, grad_m1, grad_m2, grad_scale, grad_bias):
raise NotImplementedError
| CL-LNS-main | ml4co/ops/prenorm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole
import ilp_model
import numpy as np
import torch
from typing import Any, Callable, Optional, Tuple
import rloptim.utils.data_utils as data_utils
from rloptim.core.types import Tensor, NestedTensor
from rloptim.envs.env import Env
from rloptim.envs.wrappers import TimeLimitWrapper
import competition.common.environments as competition_env
import competition.common.rewards as competition_rewards
from instance_loader import InstanceLoader
class EcoleWrapper(Env):
def __init__(self,
dataset: str,
problem_set: str,
observation_function: ecole.observation.NodeBipartite,
timeout: int = 900):
super(EcoleWrapper, self).__init__()
self._env = None
self._dataset = dataset
self._problem_set = problem_set
self._observation_function = observation_function
self._timeout = timeout
self._instance_loader = InstanceLoader(dataset_loc=self._dataset,
load_metadata=True)
@property
def env(self) -> Optional[ecole.environment.Environment]:
return self._env
@property
def dataset(self) -> str:
return self._dataset
@property
def problem_set(self) -> str:
return self._problem_set
@property
def observation_function(self) -> ecole.observation.NodeBipartite:
return self._observation_function
def reset(self, **kwargs) -> NestedTensor:
instance_data = self._instance_loader(self._problem_set)
if isinstance(instance_data, ecole.core.scip.Model):
instance = instance_data
model = ilp_model.Model(instance)
model.find_initial_solution()
bounds = model.get_primal_dual_bounds()
initial_primal_bound = bounds[0]
initial_dual_bound = bounds[1]
else:
instance, metadata = instance_data
initial_primal_bound = metadata["primal_bound"]
initial_dual_bound = metadata["dual_bound"]
reward_function = competition_rewards.TimeLimitDualIntegral()
reward_function.set_parameters(
initial_primal_bound=initial_primal_bound,
initial_dual_bound=initial_dual_bound,
objective_offset=0)
self._env = competition_env.Branching(
time_limit=self._timeout,
observation_function=(self._observation_function,
ecole.observation.Khalil2016(
pseudo_candidates=True)),
reward_function=-reward_function,
information_function={
"nb_nodes": ecole.reward.NNodes(),
"time": ecole.reward.SolvingTime()
})
obs, action_set, reward, done, _ = self._env.reset(
instance, objective_limit=initial_primal_bound)
obs = self._parse_obs(obs, action_set)
action_set = torch.from_numpy(action_set.astype(np.int64))
reward = torch.tensor([reward])
done = torch.tensor([done])
return {
"obs": obs,
"action_set": action_set,
"reward": reward,
"done": done
}
def step(self, action: NestedTensor) -> NestedTensor:
if isinstance(action, dict):
action = action["action"]
action = data_utils.to_numpy(action)
obs, action_set, reward, done, _ = self._env.step(action)
obs = self._parse_obs(obs, action_set)
action_set = torch.from_numpy(action_set.astype(np.int64))
reward = torch.tensor([reward], dtype=torch.float32)
done = torch.tensor([done])
return {
"obs": obs,
"action_set": action_set,
"reward": reward,
"done": done
}
def _parse_obs(self, obs: Any, action_set: Any) -> Tuple[torch.Tensor]:
bgo, khalil = obs
bgo.add_khalil_features(khalil, action_set)
bgo.check_features()
obs = (bgo.row_features, bgo.edge_features.indices,
bgo.edge_features.values, bgo.column_features)
return obs
def close(self) -> None:
if self._env is not None:
self._env.close()
def seed(self, seed: Optional[int] = None) -> None:
if self._env is not None:
self._env.seed(seed)
| CL-LNS-main | ml4co/rl/env/ecole_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import numpy as np
import string
import random
import os
import sys
import graph_datasets.evaluation_data as ed
import ilp_solver
class EvaluationDataMiningTest(unittest.TestCase):
def setUp(self):
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
solver.add_constraint(10 * x1 + 15 * x2 >= 100)
solver.add_constraint(20 * x1 + 16 * x2 >= 160)
solver.add_constraint(17 * x1 + 11 * x2 >= 130)
# Minimize the objective
solver.set_objective_function(80 * x1 + 95 * x2, maximize=False)
self.instance1 = solver.as_scip_model()
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
x3 = solver.create_integer_var("x3")
solver.add_constraint(10 * x1 + 15 * x2 >= 100)
solver.add_constraint(20 * x1 + 16 * x2 >= 160)
solver.add_constraint(17 * x3 + 11 * x2 >= 130)
# Minimize the objective
solver.set_objective_function(80 * x1 + 95 * x2 + 17 * x3, maximize=False)
self.instance2 = solver.as_scip_model()
letters = string.ascii_letters
self.db = []
for i in range(3):
self.db.append('/tmp/' + ''.join(random.choice(letters) for i in range(10)))
def tearDown(self):
for db in self.db:
try:
os.remove(db)
except:
pass
def testSingleVersion(self):
data = ed.EvaluationData(self.db[0])
# Format is instance, model_version, step_id, primal, dual, nb_nodes, timestamp
data.add(self.instance1, "v1", 1, 0, 123, 2, 1.0)
data.add(self.instance1, "v1", 2, 0, 125, 4, 1.5)
data.commit()
miner = ed.EvaluationDataMining(self.db[0], ["v1"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5]})
def testMultipleVersions(self):
data = ed.EvaluationData(self.db[1])
# Format is instance, model_version, step_id, primal, dual, nb_nodes, timestamp
data.add(self.instance1, "v1", 1, 0, 123, 2, 1.0)
data.add(self.instance1, "v1", 2, 0, 125, 4, 1.5)
data.add(self.instance1, "v2", 4, 0, 321, 3, 2.0)
data.add(self.instance1, "v2", 5, 0, 432, 7, 2.7)
data.commit()
miner = ed.EvaluationDataMining(self.db[1], ["v1"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5]})
miner = ed.EvaluationDataMining(self.db[1], ["v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v2": [7]})
self.assertEqual(integrals_over_nodes, {"v2": [1284.0]})
self.assertEqual(integrals_over_time, {"v2": [224.70000000000005]})
miner = ed.EvaluationDataMining(self.db[1], ["v1", "v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4], "v2": [4]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0], "v2": [321.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5], "v2": [0.0]})
def testMultipleVersionsMultipleInstances(self):
data = ed.EvaluationData(self.db[2])
# Format is instance, model_version, step_id, primal, dual, nb_nodes, timestamp
data.add(self.instance1, "v1", 1, 0, 123, 2, 1.0)
data.add(self.instance1, "v1", 2, 0, 125, 4, 1.5)
data.add(self.instance1, "v2", 4, 0, 321, 3, 2.0)
data.add(self.instance1, "v2", 5, 0, 432, 7, 2.7)
data.add(self.instance2, "v1", 11, 0, 1123, 12, 11.0)
data.add(self.instance2, "v1", 12, 0, 1125, 14, 11.5)
data.add(self.instance2, "v2", 14, 0, 1321, 13, 12.0)
data.add(self.instance2, "v2", 15, 0, 1432, 17, 12.7)
data.commit()
miner = ed.EvaluationDataMining(self.db[2], ["v1"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4, 14]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0, 2246.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5, 561.5]})
miner = ed.EvaluationDataMining(self.db[2], ["v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v2": [7, 17]})
self.assertEqual(integrals_over_nodes, {"v2": [1284.0, 5284.0]})
self.assertEqual(integrals_over_time, {"v2": [224.70000000000005, 924.699999999999]})
miner = ed.EvaluationDataMining(self.db[2], ["v1", "v2"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
self.assertEqual(nb_nodes, {"v1": [4, 14], "v2": [4, 14]})
self.assertEqual(integrals_over_nodes, {"v1": [246.0, 2246.0], "v2": [321.0, 1321.0]})
self.assertEqual(integrals_over_time, {"v1": [61.5, 561.5], "v2": [0, 0]})
def _testRealResults(self):
miner = ed.EvaluationDataMining("/data/home/benoitsteiner/ml4co-dev/ml4co/results.db", ["SCIP", "09/09/2021 17:26:14"])
nb_nodes, integrals_over_nodes, integrals_over_time = miner.compute_metrics()
print(str(nb_nodes))
print(str(integrals_over_nodes))
print(str(integrals_over_time))
self.assertEqual(1, 2) | CL-LNS-main | graph_datasets/evaluation_data_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
import pickle
from pathlib import Path
import hashlib
import string
import random
import base64
import functools
class SolvedMilpDataset():
"""
This class stores the best solution found for a collection of milp instances.
"""
def __init__(self, sample_db, read_only=False, best_solution_only=True):
self.best_solution_only = best_solution_only
if best_solution_only:
self.sql_insert = "REPLACE"
else:
self.sql_insert = "INSERT"
p = Path(sample_db)
if not p.parent.exists():
p.parent.mkdir(exist_ok=True)
already_created = p.exists()
assert already_created or not read_only
uri = "file:" + sample_db
if read_only:
uri += "?mode=ro"
self.db = sqlite3.connect(uri, uri=True)
self.cur = self.db.cursor()
# Create table if needed
if not already_created:
if best_solution_only:
self.cur.execute('''CREATE TABLE milp (id text primary key, problem text, solution text, objective_sense text, objective_value float, gap float)''')
else:
self.cur.execute('''CREATE TABLE milp (id text key, problem text, solution text, objective_sense text, objective_value float, gap float)''')
self.cur.execute('''CREATE INDEX id_index ON milp (id)''')
def __del__(self):
self.db.close()
@functools.lru_cache(maxsize=16)
def _model_to_key_pb(self, model):
letters = string.ascii_letters
tmp_file = '/tmp/' + ''.join(random.choice(letters) for i in range(10)) + '.lp'
model.writeProblem(tmp_file)
with open(tmp_file, 'r') as f:
problem = f.read()
problem = problem.encode()
key = hashlib.sha256(problem).hexdigest()
return key, problem
def _better_solution_exists(self, key, obj_sense, obj_value):
try:
query = f"SELECT objective_value FROM milp WHERE id = \'{key}\'"
self.cur.execute(query)
rslt = self.cur.fetchone()
old_value = rslt[0]
found = True
except:
found = False
if found and ((obj_sense == "minimize" and old_value < obj_value) or (obj_sense == "maximize" and old_value > obj_value)):
return True
else:
return False
def get_one(self, model):
"""
Load the solution(s) and variable assignment(s) for the specified model.
Encodes the solutions as the ({key, value}, obj_value) tuple, where key is the
index of a variable in the array returned by model.getVars(transformed=True),
value is the value of this variable in the solution, and obj_value is the
objective value of the solution.
"""
key, _ = self._model_to_key_pb(model)
query = f"SELECT solution, objective_value FROM milp WHERE id = \'{key}\'"
self.cur.execute(query)
rslt = self.cur.fetchone()
solution = base64.b64decode(rslt[0].encode())
solution = pickle.loads(solution)
obj_value = rslt[1]
return (solution, obj_value)
def get_all(self, model):
"""
Load the solution(s) and variable assignment(s) for the specified model.
Encodes the solutions as the ({key, value}, obj_value) tuple, where key is the
index of a variable in the array returned by model.getVars(transformed=True),
value is the value of this variable in the solution, and obj_value is the
objective value of the solution.
"""
key, _ = self._model_to_key_pb(model)
query = f"SELECT solution, objective_value FROM milp WHERE id = \'{key}\'"
self.cur.execute(query)
rslts = self.cur.fetchmany()
while len(rslts) > 0:
for rslt in rslts:
solution = base64.b64decode(rslt[0].encode())
solution = pickle.loads(solution)
obj_value = rslt[1]
yield (solution, obj_value)
rslts = self.cur.fetchmany()
def add(self, model, solution, objective_value, gap):
"""
Stores the solution and variable assignment for the specified model.
"""
# Insert a row of data or replace it if a better solution is found
key, problem = self._model_to_key_pb(model)
obj_sense = model.getObjectiveSense()
if self.best_solution_only and self._better_solution_exists(key, obj_sense, objective_value):
return
sol = {}
vars = model.getVars(transformed=True)
for i in range(len(vars)):
v = vars[i]
val = solution[v]
sol[i] = val
sol = pickle.dumps(sol)
problem = base64.b64encode(problem).decode()
sol = base64.b64encode(sol).decode()
query = f"{self.sql_insert} INTO milp VALUES (\'{key}\', \'{problem}\', \'{sol}\', \'{obj_sense}\', {objective_value}, {gap})"
self.cur.execute(query)
self.db.commit()
def merge(self, dataset):
"""
Add another dataset to the current one
"""
query = "SELECT id, problem, solution, objective_sense, objective_value, gap FROM milp"
sample_cnt = 0
for milp in dataset.cur.execute(query):
key = milp[0]
obj_sense = milp[3]
obj_value = milp[4]
if self.best_solution_only and self._better_solution_exists(key, obj_sense, obj_value):
continue
insert = f"{self.sql_insert} INTO milp VALUES (\'{milp[0]}\', \'{milp[1]}\', \'{milp[2]}\', \'{milp[3]}\', {milp[4]}, {milp[5]})"
self.cur.execute(insert)
sample_cnt += 1
if sample_cnt % 1000 == 0:
self.db.commit()
self.db.commit()
| CL-LNS-main | graph_datasets/solved_milp_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole
import torch
import numpy as np
import math
import time
def augment_variable_features_with_dynamic_ones(batch, args, initial_solution = {}):
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#DEVICE = 'cpu'
nh_size_threshold = dict() # filter out training data below certain neighborhood size threshold
window_size = args.window_size
# to add features of the last $window_size improving solutions in LNS
# each window contains 1. whether we have the solution 2. incumbent values 3. LB relax values
dynamic_feature_size = window_size * 3
static_feature_size = batch.variable_features.shape[-1]
dynamic_features = torch.zeros((batch.variable_features.shape[0], window_size * 3), dtype = torch.float32)
if "feat1" in args.experiment: #feat1: no Khalil's feature and no LB relax feature
batch.variable_features[:,23:] = torch.zeros(batch.variable_features.shape[0], batch.variable_features.shape[1] - 23)
assert len(batch.incumbent_history) == len(batch.LB_relaxation_history)
tot_variables = 0
batch_weight = []
batch_n_candidates = []
#embed()
for i in range(len(batch.LB_relaxation_history)):
#pop the incumbent solution
batch.incumbent_history[i].pop()
assert len(batch.incumbent_history[i]) == len(batch.LB_relaxation_history[i])
number_of_history_added = 0
number_of_variables = len(batch.LB_relaxation_history[i][0])
total_candidates = torch.sum(batch.candidate_scores[tot_variables:tot_variables+number_of_variables])
batch_n_candidates.append(total_candidates)
#print(total_candidates)
if args.problem in nh_size_threshold and total_candidates<nh_size_threshold[args.problem]:
batch_weight.append(0)
#print("============No positive labels=============")
else:
batch_weight.append(1)
for j in reversed(range(len(batch.LB_relaxation_history[i]))):
assert number_of_variables == len(batch.incumbent_history[i][j])
assert number_of_variables == len(batch.LB_relaxation_history[i][j])
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3] = torch.FloatTensor([1]*number_of_variables)
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3+1] = torch.FloatTensor(batch.incumbent_history[i][j])
if "feat1" in args.experiment or "feat2" in args.experiment:
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3+2] = torch.zeros(len(batch.LB_relaxation_history[i][j]))
else:
dynamic_features[tot_variables:tot_variables+number_of_variables, number_of_history_added*3+2] = torch.FloatTensor(batch.LB_relaxation_history[i][j])
number_of_history_added += 1
if number_of_history_added == window_size:
break
#print(number_of_history_added)
tot_variables += number_of_variables
#embed()
assert tot_variables == batch.variable_features.shape[0]
dynamic_features = dynamic_features.to(DEVICE)
#this implementation is bad, again due to a bug during data collection
if batch.variable_features.shape[-1] == 104:
batch.variable_features[:,-9:] = dynamic_features
else:
all_features = torch.hstack((batch.variable_features, dynamic_features))
batch.variable_features = all_features
#print("batch valid sample %d / %d"% (sum(batch_weight), len(batch_weight)))
batch_weight = torch.tensor(batch_weight)
#embed()
batch.batch_weight = batch_weight.to(DEVICE)
return batch
class MilpEdgeFeatures():
def __init__(self, indices, values):
self.indices = indices
self.values = values
class MilpProblemObservation():
def __init__(self, column_features, row_features, edge_features):
self.column_features = column_features
self.row_features = row_features
self.edge_features = edge_features
def add_LB_relaxation_value(self, LB_relaxation_value):
pass
def add_khalil_features(self, khalil, action_set):
# Validate and cleanup the Khalil features
assert khalil.features.shape[0] == len(action_set)
khalil_features = np.nan_to_num(khalil.features.astype(np.float32),
posinf=1e6,
neginf=-1e6)
# Concatenate the khalil features with the existing features
column_feature_size = self.column_features.shape[-1]
khalil_feature_size = khalil_features.shape[-1]
total_feature_size = column_feature_size + khalil_feature_size
col_features = torch.zeros(
(self.column_features.shape[0], total_feature_size),
dtype=torch.float32)
col_features[:, :column_feature_size] = self.column_features
col_features[action_set.astype(np.int32),
column_feature_size:] = torch.from_numpy(khalil_features)
self.column_features = col_features
def check_features(self):
assert not torch.any(torch.isinf(self.row_features))
assert not torch.any(torch.isinf(self.column_features))
assert not torch.any(torch.isinf(self.edge_features.indices))
assert not torch.any(torch.isinf(self.edge_features.values))
assert not torch.any(torch.isnan(self.row_features))
assert not torch.any(torch.isnan(self.column_features))
assert not torch.any(torch.isnan(self.edge_features.indices))
assert not torch.any(torch.isnan(self.edge_features.values))
# Completement the basic Gasse features with some of our own:
# Lower and upper bound for each variable
# Coefficients associated with each variable in the objective function
# Lower and upper bound for each constraint
class BipartiteGraphObservations(ecole.observation.NodeBipartite):
def __init__(self, check_for_nans=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_for_nans = check_for_nans
self.num_calls = 0
self.feature_extraction_time = 0
self.feature_cleanup_time = 0
self.extra_col_feature_extraction_time = 0
self.extra_row_feature_extraction_time = 0
self.feature_normalization_time = 0
self.feature_merge_time = 0
self.total_time = 0
def before_reset(self, model):
super().before_reset(model)
#model.write_problem("/tmp/pb.lp")
def extract(self, model, done):
if done:
return None
start = time.monotonic()
# Extract the Gasse features
base_obs = super().extract(model, done)
stop = time.monotonic()
self.feature_extraction_time += stop - start
scip_model = model.as_pyscipopt()
#sense = scip_model.getObjectiveSense()
#assert(sense == "minimize")
# Delete the incubent column features. They are always NaN when the scip heuristics are turned off.
print(base_obs.variable_features.shape)
base_obs.variable_features = np.delete(base_obs.variable_features, 14, axis=1)
base_obs.variable_features = np.delete(base_obs.variable_features, 13, axis=1)
stop = time.monotonic()
self.feature_cleanup_time += stop - start
assert not np.isinf(base_obs.variable_features.astype(np.float32)).any()
#total_col_features = 3 + base_obs.column_features.shape[-1]
extra_col_features = np.empty((base_obs.variable_features.shape[0], 6), dtype=np.float32)
cols = scip_model.getLPColsData()
assert(len(cols) == base_obs.variable_features.shape[0])
vars = scip_model.getVars(transformed=True)
assert(len(vars) == base_obs.variable_features.shape[0])
for i in range(base_obs.variable_features.shape[0]):
col = cols[i]
assert i == col.getLPPos()
#print("BASIS = " + str(col.getBasisStatus()))
#print("POS = " + str(col.getLPPos()))
#print("POVArS = " + str(col.getVar()))
#print(str(base_obs.column_features[i]), flush=True)
#print(str(base_obs.column_features[i][6]))
#print("LB = " + str(col.getLb()))
#print("UB = " + str(col.getUb()))
extra_col_features[i, 0] = col.getLb()
extra_col_features[i, 1] = col.getUb()
var = vars[i]
assert i == var.getCol().getLPPos()
assert var.ptr() == col.getVar().ptr()
extra_col_features[i, 2] = var.getLbGlobal()
extra_col_features[i, 3] = var.getUbGlobal()
extra_col_features[i, 4] = var.getObj()
assert var.getLPSol() == col.getPrimsol()
extra_col_features[i, 5] = var.getLPSol()
#print("OBJ = " + str(var.getObj()))
#print("LP SOL = " + str(var.getLPSol()))
assert col.getLb() == var.getLbLocal()
assert col.getUb() == var.getUbLocal()
#var_map[var.getIndex()] = var
stop = time.monotonic()
self.extra_col_feature_extraction_time += stop - start
assert not np.isinf(extra_col_features).any()
#extra_col_features[:, 3:] = base_obs.column_features
#base_obs.column_features = torch.from_numpy(extra_col_features)
#total_row_features = 3 + base_obs.row_features.shape[-1]
extra_row_features = np.empty((base_obs.row_features.shape[0], 5), dtype=np.float32)
rows = scip_model.getLPRowsData()
assert len(rows) <= base_obs.row_features.shape[0]
ecole_cns_id = 0
for i in range(len(rows)):
row = rows[i]
assert i == row.getLPPos()
# If a constraint has both a lhs and a rhs, ecole will create 2 constraints under the hood
lhs_set = not scip_model.isInfinity(abs(row.getLhs()))
rhs_set = not scip_model.isInfinity(abs(row.getRhs()))
assert lhs_set or rhs_set
if lhs_set:
cns = -row.getLhs()
extra_row_features[ecole_cns_id, 0] = cns
extra_row_features[ecole_cns_id, 1] = math.copysign(1, cns)
extra_row_features[ecole_cns_id, 2] = row.getConstant()
extra_row_features[ecole_cns_id, 3] = row.getOrigintype()
extra_row_features[ecole_cns_id, 4] = row.isIntegral()
ecole_cns_id += 1
if rhs_set:
cns = row.getRhs()
extra_row_features[ecole_cns_id, 0] = cns
extra_row_features[ecole_cns_id, 1] = math.copysign(1, cns)
extra_row_features[ecole_cns_id, 2] = row.getConstant()
extra_row_features[ecole_cns_id, 3] = row.getOrigintype()
extra_row_features[ecole_cns_id, 4] = row.isIntegral()
ecole_cns_id += 1
#extra_row_features[i, 0] = -row.getLhs()
#extra_row_features[i, 1] = row.getRhs()
#extra_row_features[i, 1] = row.getConstant()
#lhs = row.getLhs()
#print("- LHS = " + str(lhs))
#rhs = row.getRhs()
#print("- RHS = " + str(rhs))
#cons = row.getConstant()
#print("- CONS = " + str(cons))
#print("- POS: " + str(pos))
#val = row.getVals()
#print("- VALS = " + str(val))
#for col in row.getCols():
# print("- COLS: " + str(cols))
#row = scip_model.getTransformedCons(row)
#lhs = row.getLhs()
#print("- LHS = " + str(lhs))
#rhs = row.getRhs()
#print("- RHS = " + str(rhs))
#cons = row.getConstant()
#print("- CONS = " + str(cons))
#pos = row.getLPPos()
#print("- POS: " + str(pos))
#val = row.getVals()
#print("- VALS = " + str(val))
#node_id += 1
assert ecole_cns_id == base_obs.row_features.shape[0]
stop = time.monotonic()
self.extra_row_feature_extraction_time += stop - start
#extra_row_features[:, 3:] = base_obs.row_features
#base_obs.row_features = torch.from_numpy(extra_row_features)
#vars = scip_model.getVars(transformed=False)
#for var in vars:
# print("VAR = " + str(var) + ": " + str(var.getCol()) + " " + str(var.getObj()))
#vars = scip_model.getVars(transformed=True)
#i = 0
#for var in vars:
# print("TRANSFORMED VAR = " + str(var) + ": " + str(var.getCol()) + " " + str(var.getObj()))
# assert i == var.getCol().getLPPos()
# i += 1
# #print("LB = " + str(var.getLbOriginal()) + "/" + str(var.getLbLocal()) + "/" + str(var.getLbGlobal()))
# #print("UB = " + str(var.getUbOriginal()) + "/" + str(var.getUbLocal()) + "/" + str(var.getUbGlobal()))
#conss = scip_model.getConss()
#assert(len(conss) == base_obs.row_features.shape[0])
#for cons in conss:
# print(str(cons))
#obj = scip_model.getObjective()
#print("OBJ = " + str(obj))
#params = model.get_params()
#print("PARAMS: " + str(params))
#lp_columns = model.lp_columns()
#print("LP_COLUMNS " + str(lp_columns))
#lp_rows = model.lp_rows()
#print("LP_ROWS " + str(lp_rows))
#constraints = scip_model.getConss()
#print("CNS: " + str(constraints))
#constraints = scip_model.getNConss()
#print("NCNS: " + str(len(cols)) + " vs " + str(base_obs.column_features.shape[0]), flush=True)
#print("NROWS: " + str(len(rows)) + " vs " + str(base_obs.row_features.shape[0]), flush=True)
#print("CNS: " + str(base_obs.row_features))
#print("EDGES: " + str(base_obs.edge_features.indices))
#print("EDG VALS: " + str(base_obs.edge_features.values))
#print("VARS: " + str(base_obs.column_features), flush=True)
#print("WHOLE FEATURIZATION" + str(base_obs))
##############
# MORE STUFF
#scip_model.getRowLPActivity()
# Normalize the objective features
factor = 1.0 / np.max(np.absolute(extra_col_features[:, 4]))
extra_col_features[:, 4] *= factor
# Store both normalized and unormalized constraints
new_edge_values = np.tile(base_obs.edge_features.values.astype(np.float32).reshape(-1, 1), (1, 2))
#assert not np.any(np.isnan(new_edge_values))
cns_id = base_obs.edge_features.indices[0, :]
cns = extra_row_features[cns_id, 0]
div = np.maximum(1e-6, np.abs(cns))
new_edge_values[:, 1] /= div
#assert not np.any(np.isnan(new_edge_values))
stop = time.monotonic()
self.feature_normalization_time += stop - start
column_features = torch.from_numpy(np.concatenate([extra_col_features, base_obs.variable_features.astype(np.float32)], axis=1))
row_features = torch.from_numpy(np.concatenate([extra_row_features, base_obs.row_features.astype(np.float32)], axis=1))
edge_features = MilpEdgeFeatures(torch.from_numpy(base_obs.edge_features.indices.astype(np.int64)), torch.from_numpy(new_edge_values))
obs = MilpProblemObservation(column_features, row_features, edge_features)
stop = time.monotonic()
self.feature_merge_time += stop - start
if self.check_for_nans:
assert not torch.any(torch.isnan(obs.row_features))
assert not torch.any(torch.isnan(obs.column_features))
assert not torch.any(torch.isnan(obs.edge_features.indices))
assert not torch.any(torch.isnan(obs.edge_features.values))
assert not torch.any(torch.isinf(obs.row_features))
assert not torch.any(torch.isinf(obs.column_features))
assert not torch.any(torch.isinf(obs.edge_features.indices))
assert not torch.any(torch.isinf(obs.edge_features.values))
stop = time.monotonic()
self.total_time += stop - start
self.num_calls += 1
'''
print("feature_extraction_time", self.feature_extraction_time)
print("feature_cleanup_time", self.feature_cleanup_time)
print("extra_col_feature_extraction_time", self.extra_col_feature_extraction_time)
print("extra_row_feature_extraction_time", self.extra_row_feature_extraction_time)
print("feature_normalization_time", self.feature_normalization_time)
print("feature_merge_time", self.feature_merge_time)
print("total_time", self.total_time)
'''
return obs
def timings(self):
if self.num_calls == 0:
return ""
timing = f"observation time = {self.feature_extraction_time/self.num_calls: >.4f} {self.feature_cleanup_time/self.num_calls: >.4f} {self.extra_col_feature_extraction_time/self.num_calls: >.4f} {self.extra_row_feature_extraction_time/self.num_calls: >.4f} {self.feature_normalization_time/self.num_calls: >.4f} {self.feature_merge_time/self.num_calls: >.4f} {self.total_time/self.num_calls: >.4f}"
return timing
| CL-LNS-main | graph_datasets/bipartite_graph_observations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole.typing
class DualBound(ecole.typing.InformationFunction):
def __init__(self):
super().__init__()
def before_reset(self, model):
super().before_reset(model)
def extract(self, model, done):
m = model.as_pyscipopt()
dual_bound = m.getDualbound()
return dual_bound
class Gap(ecole.typing.InformationFunction):
def __init__(self):
super().__init__()
def before_reset(self, model):
super().before_reset(model)
def extract(self, model, done):
m = model.as_pyscipopt()
gap = m.getGap()
return gap
| CL-LNS-main | graph_datasets/informations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import torch
import random
import string
import os
import graph_datasets.bipartite_graph as bg
import graph_datasets.bipartite_graph_dataset as bgd
import graph_datasets.bipartite_graph_loader as bgl
class BipartiteGraphLoaderTest(unittest.TestCase):
def build_db(self, seed=None):
random.seed(seed)
letters = string.ascii_letters
name = '/tmp/' + ''.join(random.choice(letters) for i in range(10))
if os.path.exists(name):
os.remove(name)
db = bgd.BipartiteGraphDataset(name)
# create a graph with 2 variables and 1 constraint
db.add(bg.BipartiteGraph(torch.FloatTensor([123]), torch.IntTensor([[0, 1], [0, 0]]), torch.FloatTensor([32, 21]), torch.FloatTensor([78, 910]), torch.LongTensor([0]), [0], torch.FloatTensor([0.65]), [0]))
# create a graph with 3 variables and 2 constraints
db.add(bg.BipartiteGraph(torch.FloatTensor([456, 567]), torch.IntTensor([[0, 1, 1, 2], [0, 0, 1, 1]]), torch.FloatTensor([654, 765, 876, 987]), torch.FloatTensor([987, 109, 111]), torch.LongTensor([1, 2]), [0], torch.FloatTensor([0.56, 0.12]), [0]))
db.commit()
return name
def testLoadAsPTGeom(self):
name = self.build_db(seed="pt_geom")
loader = bgl.BipartiteGraphLoader(name, shuffle=False)
gen = loader.load(batch_size=1, format="pt_geom")
g1 = next(gen)
self.assertEqual(g1.constraint_features, torch.FloatTensor([123]))
self.assertTrue(torch.equal(g1.variable_features, torch.FloatTensor([78, 910])))
self.assertTrue(torch.equal(g1.edge_attr, torch.FloatTensor([32, 21])))
g2 = next(gen)
self.assertTrue(torch.equal(g2.constraint_features, torch.FloatTensor([456, 567])))
self.assertTrue(torch.equal(g2.variable_features, torch.FloatTensor([987, 109, 111])))
self.assertTrue(torch.equal(g2.edge_attr, torch.FloatTensor([654, 765, 876, 987])))
def testLoadAsDGL(self):
name = self.build_db(seed="dgl")
loader = bgl.BipartiteGraphLoader(name, shuffle=False)
gen = loader.load(batch_size=1, format="dgl")
g1 = next(gen)
self.assertTrue(torch.equal(g1.nodes['variables'].data['variable_features'], torch.FloatTensor([78, 910])))
self.assertTrue(torch.equal(g1.nodes['variables'].data['fsb_scores'], torch.FloatTensor([0.65, -1.0e10])))
self.assertEqual(g1.nodes['constraints'].data['constraint_features'], torch.FloatTensor([123]))
self.assertTrue(torch.equal(g1.edges['edges'].data['edge_attr'], torch.FloatTensor([32, 21])))
self.assertTrue(g1.has_edges_between(0, 0, ("variables", "edges", "constraints")))
self.assertTrue(g1.has_edges_between(1, 0, ("variables", "edges", "constraints")))
g2 = next(gen)
self.assertTrue(torch.equal(g2.nodes['variables'].data['variable_features'], torch.FloatTensor([987, 109, 111])))
self.assertTrue(torch.equal(g2.nodes['variables'].data['fsb_scores'], torch.FloatTensor([-1.0e10, 0.56, 0.12])))
self.assertTrue(torch.equal(g2.nodes['constraints'].data['constraint_features'], torch.FloatTensor([456, 567])))
self.assertTrue(torch.equal(g2.edges['edges'].data['edge_attr'], torch.FloatTensor([654, 765, 876, 987])))
self.assertTrue(g2.has_edges_between(0, 0, ("variables", "edges", "constraints")))
self.assertTrue(g2.has_edges_between(1, 0, ("variables", "edges", "constraints")))
self.assertTrue(g2.has_edges_between(1, 1, ("variables", "edges", "constraints")))
self.assertTrue(g2.has_edges_between(2, 1, ("variables", "edges", "constraints")))
def testLoadAsNTX(self):
name = self.build_db(seed="ntx")
loader = bgl.BipartiteGraphLoader(name, shuffle=False)
gen = loader.load(batch_size=1, format="ntx")
g1 = next(gen)
# TODO: figure out how to check the graph
#nx.write_gpickle(g1, "/tmp/g1.pickle")
#with open('/tmp/g1.txt', mode='w') as f:
# print(str(g1), file=f)
g2 = next(gen)
#nx.write_gpickle(g2, "/tmp/g2.pickle")
#with open('/tmp/g2.txt', mode='w') as f:
# print(str(g2), file=f)
reached_end = False
try:
_ = next(gen)
except:
reached_end = True
self.assertTrue(reached_end)
| CL-LNS-main | graph_datasets/bipartite_graph_loader_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pyscipopt import Eventhdlr
from pyscipopt import SCIP_EVENTTYPE
class DualBoundEventHandler(Eventhdlr):
def __init__(self, initial_bound=None):
super().__init__()
self.initial_bound = initial_bound
if initial_bound:
self.events = [(initial_bound, 0, 0)]
self.last_dual = initial_bound
else:
self.events = []
self.last_dual = float("NaN")
def eventinit(self):
self.model.catchEvent(SCIP_EVENTTYPE.LPEVENT, self)
def eventexit(self):
self.model.dropEvent(SCIP_EVENTTYPE.LPEVENT, self)
def eventexec(self, event):
dual = self.model.getDualbound()
if dual != self.last_dual:
if self.initial_bound:
if self.model.getObjectiveSense() == "minimize":
dual = max(dual, self.initial_bound)
else:
dual = min(dual, self.initial_bound)
self.last_dual = dual
time = self.model.getSolvingTime()
nb_nodes = self.model.getNNodes()
self.events.append((dual, time, nb_nodes))
#print(f"CAUGHT EVENT {dual} at t={time} nb_nodes={nb_nodes}", flush=True)
return {} | CL-LNS-main | graph_datasets/event_handlers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
from pathlib import Path
import hashlib
import string
import random
import functools
from collections import defaultdict
class EvaluationData():
def __init__(self, db, read_only=False):
p = Path(db)
if not p.parent.exists():
p.parent.mkdir(exist_ok=True)
already_created = p.exists()
assert already_created or not read_only
uri = "file:" + db
if read_only:
uri += "?mode=ro"
self.db = sqlite3.connect(uri, uri=True)
self.cur = self.db.cursor()
# Create table if needed
if not already_created:
self.cur.execute('''CREATE TABLE eval_data (instance_id string not null, model_version string not null, step_id integer not null, nb_nodes integer not null, timestamp float, primal float, dual float)''')
self.cur.execute('''CREATE INDEX per_instance_id ON eval_data(instance_id)''')
self.cur.execute('''CREATE INDEX per_model_version ON eval_data(model_version)''')
self.db.commit()
self.added_rows = 0
def __del__(self):
self.db.commit()
self.db.close()
@functools.lru_cache(maxsize=16)
def _instance_to_key(self, model):
letters = string.ascii_letters
tmp_file = '/tmp/' + ''.join(random.choice(letters) for i in range(10)) + '.lp'
model.writeProblem(tmp_file)
with open(tmp_file, 'r') as f:
problem = f.read()
problem = problem.encode()
key = hashlib.sha256(problem).hexdigest()
return key
def add(self, instance, model_version, step_id, primal, dual, nb_nodes, timestamp):
instance_id = self._instance_to_key(instance)
self.cur.execute(f"INSERT INTO eval_data VALUES (\'{instance_id}\', \'{model_version}\', {step_id}, {nb_nodes}, {timestamp}, {primal}, {dual})")
self.added_rows += 1
if self.added_rows % 1000 == 0:
self.db.commit()
def commit(self):
self.db.commit()
class EvaluationDataMining():
def __init__(self, db, models):
self.db = EvaluationData(db, read_only=True)
self.models = models
def compute_metrics(self):
model_filter = f"model_version == '{self.models[0]}' "
for m in self.models[1:]:
model_filter += f"OR model_version == '{m}' "
query = f"SELECT DISTINCT instance_id FROM eval_data WHERE {model_filter}"
#print(query)
self.db.cur.execute(query)
instances = self.db.cur.fetchall()
#print(str(instances))
integrals_over_time = defaultdict(lambda: [])
integrals_over_nodes = defaultdict(lambda: [])
nb_nodes = defaultdict(lambda: [])
for instance in instances:
instance_id = instance[0]
max_nb_nodes = 1e100
for version in self.models:
query = f"SELECT MAX(nb_nodes) FROM eval_data WHERE instance_id == '{instance_id}' AND model_version == '{version}'"
#print(query)
self.db.cur.execute(query)
num_nodes = self.db.cur.fetchone()
#print(str(num_nodes))
max_nb_nodes = min(max_nb_nodes, int(num_nodes[0]))
for version in self.models:
#print(version)
nb_nodes[version].append(max_nb_nodes)
integral_over_time = 0
integral_over_nodes = 0
query = f"SELECT nb_nodes, dual, timestamp FROM eval_data WHERE instance_id == '{instance_id}' AND model_version == '{version}' AND nb_nodes <= {max_nb_nodes} ORDER BY nb_nodes ASC"
#print(query)
first = True
last_dual = 0
last_nb_nodes = 0
last_timestamp = 0
for rslt in self.db.cur.execute(query):
#print("ORDERED RSLT:" + str(rslt))
if not first:
integral_over_time += last_dual * (float(rslt[2]) - last_timestamp)
integral_over_nodes += last_dual * (int(rslt[0]) - last_nb_nodes)
first = False
last_dual = float(rslt[1])
last_nb_nodes = int(rslt[0])
last_timestamp = float(rslt[2])
if last_nb_nodes < max_nb_nodes:
integral_over_nodes += last_dual * (max_nb_nodes - last_nb_nodes)
integrals_over_time[version].append(integral_over_time)
integrals_over_nodes[version].append(integral_over_nodes)
return (nb_nodes, integrals_over_nodes, integrals_over_time)
def draw_in_tensorboard(self):
pass
| CL-LNS-main | graph_datasets/evaluation_data.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import sqlite3
import pickle
import base64
import random
from pathlib import Path
from graph_datasets.bipartite_graph import BipartiteGraph
import intervaltree
import zlib
import torch
class BipartiteGraphDataset(torch_geometric.data.Dataset):
"""
This class encodes a collection of graphs, as well as a method to load such graphs from the disk.
It can be used in turn by the data loaders provided by pytorch geometric.
"""
def __init__(self, sample_db, query_opt=False, read_only=False, first_k=None):
super().__init__(root=None, transform=None, pre_transform=None)
self.query_opt = query_opt
p = Path(sample_db)
if not p.parent.exists():
p.parent.mkdir(exist_ok=True)
already_created = p.exists()
assert already_created or not read_only
uri = "file:" + sample_db
if read_only:
uri += "?mode=ro"
self.db = sqlite3.connect(uri, uri=True)
self.cur = self.db.cursor()
# Create table if needed
if not already_created:
self.cur.execute('''CREATE TABLE samples (id integer primary key asc, features text not null unique)''')
#self.cur.execute('''CREATE UNIQUE INDEX per_id ON samples(id)''')
self.cur.execute('''INSERT INTO samples VALUES (-1, \'0\')''')
self.db.commit()
self.sample_cnt = 0
else:
self.cur.execute("SELECT features FROM samples WHERE id = -1")
rslt = self.cur.fetchone()
self.sample_cnt = int(rslt[0])
if first_k is not None:
self.sample_cnt = min(self.sample_cnt, first_k)
print(f"Use first_k = {first_k}. Dataset size = {self.sample_cnt}")
def __del__(self):
self.db.close()
def len(self):
return self.sample_cnt
def get(self, index):
"""
Load a bipartite graph observation as saved on the disk during data collection.
"""
#print("here: get")
#assert False
#from IPython import embed; embed()
if self.query_opt:
# Ignore the requested index, so we can stream data
rslt = self.cur.fetchone()
if rslt is None:
query = "SELECT features FROM samples WHERE id >= 0"
self.cur.execute(query)
rslt = self.cur.fetchone()
assert rslt is not None
else:
# Fetch the data at the requested index. This is much slower
query = f"SELECT features FROM samples WHERE id = {index}"
self.cur.execute(query)
rslt = self.cur.fetchone()
entry = base64.b64decode(rslt[0].encode())
try:
raw = zlib.decompress(entry)
except:
# Old uncompressed dataset
raw = entry
graph = pickle.loads(raw)
#from IPython import embed; embed()
#if torch.sum(graph.candidate_scores).item() < 25:
# return None
#if index % 2 ==0 :
# return None
return graph
def add(self, graph):
"""
Add a bipartite graph observation to the dataset. Only adds the observation if it wasn't
already present in the dataset
"""
# Insert a row of data
raw = pickle.dumps(graph)
compressed = zlib.compress(raw, level=9)
sample = base64.b64encode(compressed).decode()
query = f"INSERT INTO samples VALUES ({self.sample_cnt}, \'{sample}\')"
try:
self.cur.execute(query)
self.sample_cnt += 1
self.commit()
return True
except sqlite3.IntegrityError:
return False
def merge(self, dataset):
"""
Add another dataset to the current one
"""
query = "SELECT features FROM samples WHERE id >= 0"
for sample in dataset.cur.execute(query):
insert = f"INSERT INTO samples VALUES ({self.sample_cnt}, \'{sample[0]}\')"
try:
self.cur.execute(insert)
self.sample_cnt += 1
except sqlite3.IntegrityError:
continue
if self.sample_cnt % 1000 == 0:
self.commit()
self.commit()
def merge_multiple(self, datasets):
"""
Add several other datasets to the current one
"""
query = "SELECT features FROM samples WHERE id >= 0"
for dataset in datasets:
dataset.cur.execute(query)
done = False
while not done:
idx = random.randint(0, len(datasets)-1)
dataset = datasets[idx]
sample = dataset.cur.fetchone()
if sample is None:
datasets.pop(idx)
if len(datasets) == 0:
done = True
else:
insert = f"INSERT INTO samples VALUES ({self.sample_cnt}, \'{sample[0]}\')"
try:
self.cur.execute(insert)
self.sample_cnt += 1
except sqlite3.IntegrityError:
continue
if self.sample_cnt % 1000 == 0:
self.commit()
self.commit()
def commit(self):
query = f"INSERT OR REPLACE INTO samples VALUES (-1, \'{self.sample_cnt}\')"
self.cur.execute(query)
self.db.commit()
class BipartiteGraphDatasets(torch_geometric.data.Dataset):
"""
Allows training on the data from multiple datasets.
"""
def __init__(self, databases, query_opt=False, first_k=None):
super().__init__(root=None, transform=None, pre_transform=None)
if first_k:
first_k = max(1,first_k // len(databases))
self.dbs = intervaltree.IntervalTree()
self.sample_cnt = 0
for db in databases:
p = Path(db)
assert p.exists()
dataset = BipartiteGraphDataset(db, query_opt, read_only=True, first_k=first_k)
new_samples = dataset.len()
self.dbs[self.sample_cnt:self.sample_cnt+new_samples] = dataset
self.sample_cnt += new_samples
def len(self):
return self.sample_cnt
def get(self, index):
"""
Load a bipartite graph observation as saved on the disk during data collection.
"""
rslt = None
#while rslt is None:
d = self.dbs[index].pop()
db = d.data
index -= d.begin
rslt = db.get(index)
return rslt
| CL-LNS-main | graph_datasets/bipartite_graph_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import ecole
import torch
import torch_geometric
import numpy as np
import pyscipopt
import graph_datasets.bipartite_graph as bg
import graph_datasets.bipartite_graph_dataset as bgd
import graph_datasets.bipartite_graph_observations as bgo
import ilp_solver
import os
import time
def advance_to_root_node(model, branching):
"""Utility to advance a model to the root node."""
if branching:
dyn = ecole.dynamics.BranchingDynamics()
#print("BranchingDynamics")
else:
dyn = ecole.dynamics.PrimalSearchDynamics()
#print("PrimalSearchDynamics")
model = dyn.reset_dynamics(model)
return model
def make_obs(obs_func, model, branching=True):
"""Utility function to extract observation on root node."""
start = time.monotonic()
if isinstance(obs_func, tuple):
for f in obs_func:
f.before_reset(model)
else:
obs_func.before_reset(model)
stop = time.monotonic()
advance_to_root_node(model, branching)
stop = time.monotonic()
if isinstance(obs_func, tuple):
rslt = []
for f in obs_func:
rslt.append(f.extract(model, False))
return rslt
else:
return obs_func.extract(model, False)
def disable_all(solver):
solver.disable_presolver()
solver.disable_cuts()
solver.disable_heuristics()
class FeaturizationTest(unittest.TestCase):
def setUp(self):
# Create a small ILP problem
solver1 = ilp_solver.ILPSolver(engine="scip")
x1 = solver1.create_integer_var("x1")
x2 = solver1.create_integer_var("x2")
solver1.add_constraint(10 * x1 + 15 * x2 >= 100.23)
solver1.add_constraint(20 * x1 + 16 * x2 >= 161.8)
solver1.add_constraint(17 * x1 + 11 * x2 >= 129.42)
# Minimize the objective
solver1.set_objective_function(80 * x1 + 95 * x2, maximize=False)
disable_all(solver1)
scip_model = solver1.as_scip_model()
self.model1 = ecole.scip.Model.from_pyscipopt(scip_model)
#self.model1.transform_prob()
solver2 = ilp_solver.ILPSolver(engine="scip")
x1 = solver2.create_integer_var("x1")
x2 = solver2.create_integer_var("x2")
solver2.add_constraint(20 * x1 + 30 * x2 <= 200)
solver2.add_constraint(40 * x1 + 32 * x2 <= 320)
solver2.add_constraint(34 * x1 + 22 * x2 <= 260)
# Minimize the objective
solver2.set_objective_function(80 * x1 + 95 * x2, maximize=True)
disable_all(solver2)
scip_model = solver2.as_scip_model()
self.model2 = ecole.scip.Model.from_pyscipopt(scip_model)
#self.model2.transform_prob()
solver3 = ilp_solver.ILPSolver(engine="scip")
x0 = solver3.create_integer_var("x0")
x1 = solver3.create_integer_var("x1")
x2 = solver3.create_integer_var("x2")
solver3.add_constraint(20 * x1 + 30 * x2 >= 200)
solver3.add_constraint(40 * x1 + 32 * x2 >= 320)
solver3.add_constraint(34 * x1 + 22 * x2 >= 260)
solver3.add_constraint(2 * x0 + 3 * x1 == 12)
# Minimize the objective
solver3.set_objective_function(87.3 * x1 + 93.2 * x2, maximize=False)
disable_all(solver3)
scip_model = solver3.as_scip_model()
self.model3 = ecole.scip.Model.from_pyscipopt(scip_model)
#self.model3.transform_prob()
def testBranchingFeaturization(self):
observation = make_obs(bgo.BipartiteGraphObservations(), self.model1)
#print("VARS1: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 8.4211e-01,
5.8809e+00, 6.4414e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8809e+00,
8.8086e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7614e+00, 7.6491e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7614e+00,
7.6143e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS1: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-1.0023e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5598e+00, -9.9375e-01, 1.0000e+00, -1.9779e-03, 0.0000e+00],
[-1.6180e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3172e+00, -9.8082e-01, 1.0000e+00, -5.6137e-04, 0.0000e+00],
[-1.2942e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3916e+00, -9.5634e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01]])""")
#print("EDGES1: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])""")
#print("EDGE VALS1: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-10.0000, -0.0998],
[-15.0000, -0.1497],
[-20.0000, -0.1236],
[-16.0000, -0.0989],
[-17.0000, -0.1314],
[-11.0000, -0.0850]])""")
observation = make_obs(bgo.BipartiteGraphObservations(), self.model2)
#print("VARS2: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, -8.4211e-01,
5.7143e+00, -6.4414e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.7143e+00,
7.1429e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, -1.0000e+00,
2.8571e+00, -7.6491e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.8571e+00,
8.5714e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS2: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[ 2.0000e+02, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
5.5470e+00, -9.9375e-01, 1.0000e+00, -4.9448e-04, 0.0000e+00],
[ 3.2000e+02, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
6.2470e+00, -9.8082e-01, 1.0000e+00, -1.4034e-04, 0.0000e+00],
[ 2.6000e+02, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
6.4202e+00, -9.5634e-01, 0.0000e+00, -0.0000e+00, 1.6667e-01]])""")
#print("EDGES2: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])""")
#print("EDGE VALS2: " + str(observation.edge_features.values), flush=True)
self.assertEqual(str(observation.edge_features.values),
"""tensor([[20.0000, 0.1000],
[30.0000, 0.1500],
[40.0000, 0.1250],
[32.0000, 0.1000],
[34.0000, 0.1308],
[22.0000, 0.0846]])""")
observation = make_obs(bgo.BipartiteGraphObservations(), self.model3)
#print("VARS3: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 0.0000e+00,
-2.7931e+00, 0.0000e+00, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, -2.7931e+00,
2.0690e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 9.3670e-01,
5.8621e+00, 6.8363e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8621e+00,
8.6207e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7586e+00, 7.2983e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7586e+00,
7.5862e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS3: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-2.0000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5470e+00, -9.8646e-01, 1.0000e+00, -4.6740e-04, 0.0000e+00],
[-3.2000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.2470e+00, -9.8975e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01],
[-2.6000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.4202e+00, -9.7044e-01, 1.0000e+00, -2.5171e-04, 0.0000e+00],
[-1.2000e+01, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-3.3282e+00, -5.6881e-01, 1.0000e+00, 0.0000e+00, 1.6667e-01],
[ 1.2000e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
3.3282e+00, 5.6881e-01, 1.0000e+00, -0.0000e+00, 1.6667e-01]])""")
#print("EDGES3: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[1, 2, 1, 2, 1, 2, 0, 1, 0, 1]])""")
#print("EDGE VALS3: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-20.0000, -0.1000],
[-30.0000, -0.1500],
[-40.0000, -0.1250],
[-32.0000, -0.1000],
[-34.0000, -0.1308],
[-22.0000, -0.0846],
[ -2.0000, -0.1667],
[ -3.0000, -0.2500],
[ 2.0000, 0.1667],
[ 3.0000, 0.2500]])""")
def testPrimalSearchFeatures(self):
observation = make_obs(bgo.BipartiteGraphObservations(), self.model3, branching=False)
#print("VARS: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 0.0000e+00,
-2.7931e+00, 0.0000e+00, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, -2.7931e+00,
2.0690e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 9.3670e-01,
5.8621e+00, 6.8363e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8621e+00,
8.6207e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7586e+00, 7.2983e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7586e+00,
7.5862e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00]])""")
#print("CNS: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-2.0000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5470e+00, -9.8646e-01, 1.0000e+00, -4.6740e-04, 0.0000e+00],
[-3.2000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.2470e+00, -9.8975e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01],
[-2.6000e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.4202e+00, -9.7044e-01, 1.0000e+00, -2.5171e-04, 0.0000e+00],
[-1.2000e+01, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-3.3282e+00, -5.6881e-01, 1.0000e+00, 0.0000e+00, 1.6667e-01],
[ 1.2000e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
3.3282e+00, 5.6881e-01, 1.0000e+00, -0.0000e+00, 1.6667e-01]])""")
#print("EDGES: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[1, 2, 1, 2, 1, 2, 0, 1, 0, 1]])""")
#print("EDGE VALS: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-20.0000, -0.1000],
[-30.0000, -0.1500],
[-40.0000, -0.1250],
[-32.0000, -0.1000],
[-34.0000, -0.1308],
[-22.0000, -0.0846],
[ -2.0000, -0.1667],
[ -3.0000, -0.2500],
[ 2.0000, 0.1667],
[ 3.0000, 0.2500]])""")
def testKhalilFeaturization(self):
observation, khalil = make_obs((bgo.BipartiteGraphObservations(), ecole.observation.Khalil2016()), self.model1)
branching_vars = np.array([0, 1])
observation.add_khalil_features(khalil, branching_vars)
print("VARS: " + str(observation.column_features), flush=True)
self.assertEqual(str(observation.column_features),
"""tensor([[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 8.4211e-01,
5.8809e+00, 6.4414e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 5.8809e+00,
8.8086e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00, 1.6000e+01, 1.6000e+01,
0.0000e+00, 3.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 3.0000e+00, 1.5667e+01, 4.1899e+00, 1.0000e+01,
2.0000e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 1.1914e-01, 1.1914e-01, 1.1914e-01, 8.8086e-01,
1.3526e-01, 1.0000e+00, 1.0495e-01, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 5.0000e-01, 5.0000e-01, 5.0000e-01, 1.0000e+00,
-1.0000e+00, -1.1610e-01, -9.0719e-02, 4.0000e-01, 6.0714e-01,
1.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 1.0000e+00,
0.0000e+00, 2.0000e+00, 3.0000e+01, 1.5000e+01, 3.5355e+00,
1.0000e+01, 2.0000e+01, 6.7778e-02, 9.5556e-01, 4.7778e-01,
5.4997e-02, 4.0000e-01, 5.5556e-01, 6.7778e-02, 9.5556e-01,
4.7778e-01, 5.4997e-02, 4.0000e-01, 5.5556e-01, 1.2429e+00,
1.6000e+01, 8.0000e+00, 6.0609e-01, 7.1429e+00, 8.8571e+00],
[-9.2234e+18, 9.2234e+18, -9.2234e+18, 9.2234e+18, 1.0000e+00,
2.7614e+00, 7.6491e-01, 0.0000e+00, 1.0000e+00, 0.0000e+00,
0.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 2.7614e+00,
7.6143e-01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.0000e+00, 0.0000e+00, 0.0000e+00, 1.9000e+01, 1.9000e+01,
0.0000e+00, 3.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 3.0000e+00, 1.4000e+01, 2.1602e+00, 1.1000e+01,
1.6000e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 2.3857e-01, 2.3857e-01, 2.3857e-01, 7.6143e-01,
3.1332e-01, 1.0000e+00, 1.8166e-01, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 2.0000e+00, 0.0000e+00, 2.0000e+00,
2.0000e+00, 5.0000e-01, 5.0000e-01, 5.0000e-01, 1.0000e+00,
-1.0000e+00, -1.3017e-01, -7.8336e-02, 3.9286e-01, 6.0000e-01,
1.0000e+00, 1.0000e+00, 1.0000e+00, 0.0000e+00, 1.0000e+00,
0.0000e+00, 2.0000e+00, 3.1000e+01, 1.5500e+01, 3.5355e-01,
1.5000e+01, 1.6000e+01, 6.7778e-02, 1.0444e+00, 5.2222e-01,
5.4997e-02, 4.4444e-01, 6.0000e-01, 6.7778e-02, 1.0444e+00,
5.2222e-01, 5.4997e-02, 4.4444e-01, 6.0000e-01, 1.2429e+00,
1.9000e+01, 9.5000e+00, 2.6769e+00, 5.7143e+00, 1.3286e+01]])""")
print("CNS: " + str(observation.row_features))
self.assertEqual(str(observation.row_features),
"""tensor([[-1.0023e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-5.5598e+00, -9.9375e-01, 1.0000e+00, -1.9779e-03, 0.0000e+00],
[-1.6180e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3172e+00, -9.8082e-01, 1.0000e+00, -5.6137e-04, 0.0000e+00],
[-1.2942e+02, -1.0000e+00, 0.0000e+00, 2.0000e+00, 1.0000e+00,
-6.3916e+00, -9.5634e-01, 0.0000e+00, 0.0000e+00, 1.6667e-01]])""")
print("EDGES: " + str(observation.edge_features.indices))
self.assertEqual(str(observation.edge_features.indices),
"""tensor([[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])""")
print("EDGE VALS: " + str(observation.edge_features.values))
self.assertEqual(str(observation.edge_features.values),
"""tensor([[-10.0000, -0.0998],
[-15.0000, -0.1497],
[-20.0000, -0.1236],
[-16.0000, -0.0989],
[-17.0000, -0.1314],
[-11.0000, -0.0850]])""")
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | graph_datasets/featurization_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| CL-LNS-main | graph_datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import graph_datasets.bipartite_graph_dataset as bgd
import torch_geometric
#import dgl
import random
import torch
class BipartiteGraphLoader:
def __init__(self, db, shuffle=True, first_k=None):
self.shuffle = shuffle
dbs = db.split('+')
if len(dbs) == 1:
self.data = bgd.BipartiteGraphDataset(db, query_opt=not shuffle, read_only=True, first_k=first_k)
else:
self.data = bgd.BipartiteGraphDatasets(dbs, query_opt=not shuffle, first_k=first_k)
def num_examples(self):
return self.data.sample_cnt
def load(self, batch_size=32, format="pt_geom"):
#from IPython import embed;embed()
if format == "pt_geom":
#print("here")
def my_collate(batch):
#embed()
#print(len(batch))
#batch = list(filter(lambda x: torch.sum(x.candidate_scores) > 0.5 * x.info["neighborhood_size"], batch))
#return None
#from IPython import embed; embed()
batch = list(filter(lambda x: x is not None), batch)
return torch.utils.data.dataloader.default_collate(batch)
loader = torch_geometric.loader.DataLoader(self.data, batch_size, shuffle=self.shuffle)#, collate_fn=my_collate)
for ptg in loader:
#from IPython import embed;embed()
yield ptg
return
elif format == 'dgl':
k = self.data.len()
permutation = random.sample(range(k), k)
graphs = []
for loc in permutation:
ptg = self.data.get(loc)
ntx = ptg.to_networkx()
#print("here")
#from IPython import embed;embed()
dgl_graph = dgl.bipartite_from_networkx(ntx, utype='variables', etype='edges', vtype='constraints',
u_attrs=['variable_features'], e_attrs=['edge_attr'], v_attrs=['constraint_features'])
# Annotate the variables with other information
num_variables = dgl_graph.nodes("variables").size(0)
fsb_scores = torch.full((num_variables,), -1.0e10) #, dype=torch.float)
candidate_scores = ntx.graph["candidate_scores"]
branching_candidates = ntx.graph["candidates"]
num_candidates = branching_candidates.size(0)
for i in range(num_candidates):
candidate_id = branching_candidates[i]
candidate_score = candidate_scores[i]
assert candidate_score >= 0
fsb_scores[candidate_id] = candidate_score
dgl_graph.nodes['variables'].data['fsb_scores'] = fsb_scores
graphs.append(dgl_graph)
if len(graphs) == batch_size:
yield dgl.batch(graphs)
graphs = []
return
assert format == 'ntx'
k = self.data.len()
permutation = random.sample(range(k), k)
batch = []
for loc in permutation:
ptg = self.data.get(loc)
ntx = ptg.to_networkx()
batch.append(ntx)
if len(batch) == batch_size:
yield batch
batch = []
| CL-LNS-main | graph_datasets/bipartite_graph_loader.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch_geometric
import torch
import numpy as np
import networkx as nx
class BipartiteGraph(torch_geometric.data.Data):
"""
This class encode a node bipartite graph observation as returned by the `ecole.observation.NodeBipartite`
observation function in a format understood by the pytorch geometric data handlers.
"""
def __init__(self, constraint_features, edge_indices, edge_features, variable_features,
candidates, candidate_choice, candidate_scores, info,
iteration = None, instance_id = None, incumbent_history = None, LB_relaxation_history = None, improvement_history = None, neighborhood_size = None):
super().__init__()
self.constraint_features = constraint_features
self.edge_index = edge_indices
self.edge_attr = edge_features
self.variable_features = variable_features
#print("Variable features shape", variable_features.shape)
self.candidates = candidates
self.nb_candidates = len(candidates) if candidates is not None else 0
self.candidate_choices = candidate_choice
self.candidate_scores = candidate_scores
self.info = info
# We must tell pytorch geometric how many nodes there are, for indexing purposes
self.num_nodes = constraint_features.shape[0] if constraint_features is not None else 0
self.num_nodes += variable_features.shape[0] if variable_features is not None else 0
self.iteration = iteration
self.instance_id = instance_id
self.incumbent_history = incumbent_history
self.LB_relaxation_history = LB_relaxation_history
self.improvement_history = improvement_history
self.neighborhood_size = neighborhood_size
def __inc__(self, key, value, *args, **kwargs):
"""
We overload the pytorch geometric method that tells how to increment indices when concatenating graphs
for those entries (edge index, candidates) for which this is not obvious.
"""
if key == 'edge_index':
return torch.tensor([[self.constraint_features.shape[0]], [self.variable_features.shape[0]]])
elif key == 'candidates':
return self.variable_features.shape[0]
else:
return super().__inc__(key, value)
def to_networkx(self):
G = nx.DiGraph(candidates=self.candidates, candidate_scores=self.candidate_scores,
nb_candidates=self.nb_candidates, candidate_choice=self.candidate_choices,
info=self.info)
G.add_nodes_from(range(self.num_nodes))
num_vars = self.variable_features.shape[0]
#print(num_vars)
for i, (v, u) in enumerate(self.edge_index.T.tolist()):
G.add_edge(u, v+num_vars)
#print(u, v)
assert 0 <= u and u < num_vars, str(u)
assert v >= 0, str(v)
G[u][v+num_vars]["edge_attr"] = self.edge_attr[i]
for i, feat_dict in G.nodes(data=True):
if i < num_vars:
feat_dict.update({"variable_features": self.variable_features[i].squeeze()})
feat_dict.update({"bipartite": 0})
else:
feat_dict.update({"constraint_features": self.constraint_features[i-num_vars].squeeze()})
feat_dict.update({"bipartite": 1})
for u, v in G.edges():
#print(u, v, G.nodes[u]['bipartite'], G.nodes[v]['bipartite'], num_vars)
assert(G.nodes[u]['bipartite'] == 0)
assert(G.nodes[v]['bipartite'] == 1)
return G
| CL-LNS-main | graph_datasets/bipartite_graph.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ecole.typing
import competition.common.rewards as competition_rewards
# Returns the relative improvement in dual bound since the last step
class Dual(ecole.typing.RewardFunction):
def __init__(self):
self.parameters = competition_rewards.IntegralParameters()
super().__init__(wall=True, bound_function=lambda model: (
self.parameters.offset,
self.parameters.initial_primal_bound))
def set_parameters(self, objective_offset=None, initial_primal_bound=None, initial_dual_bound=None):
self.parameters = competition_rewards.IntegralParameters(
offset=objective_offset,
initial_primal_bound=initial_primal_bound,
initial_dual_bound=initial_dual_bound)
def before_reset(self, model):
self.parameters.fetch_values(model)
super().before_reset(model)
self.last_dual_bound = self.parameters.initial_dual_bound
def extract(self, model, done):
if done:
return 0
m = model.as_pyscipopt()
dual_bound = m.getDualbound()
reward = abs(dual_bound - self.last_dual_bound) / abs(self.last_dual_bound)
self.last_dual_bound = dual_bound
return reward
# Returns the relative improvement in the primal/dual gap since the last step
class PrimalDualGrap(ecole.typing.RewardFunction):
def __init__(self):
self.parameters = competition_rewards.IntegralParameters()
super().__init__(wall=True, bound_function=lambda model: (
self.parameters.offset,
self.parameters.initial_primal_bound))
def set_parameters(self, objective_offset=None, initial_primal_bound=None, initial_dual_bound=None):
self.parameters = competition_rewards.IntegralParameters(
offset=objective_offset,
initial_primal_bound=initial_primal_bound,
initial_dual_bound=initial_dual_bound)
def before_reset(self, model):
self.parameters.fetch_values(model)
super().before_reset(model)
self.last_gap = abs(self.parameters.initial_dual_bound - self.parameters.initial_primal_bound) / min(abs(self.parameters.initial_dual_bound), abs(self.parameters.initial_primal_bound))
def extract(self, model, done):
if done:
return 0
m = model.as_pyscipopt()
gap = m.getGap()
reward = (self.last_gap - gap) / self.last_gap
self.last_gap = gap
return reward
| CL-LNS-main | graph_datasets/step_rewards.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import ilp_solver
import random
import string
from graph_datasets.solved_milp_dataset import SolvedMilpDataset
class SolvedMilpDatasetTest(unittest.TestCase):
def setUp(self):
# Create a small ILP problem
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
solver.add_constraint(10 * x1 + 15 * x2 >= 100.23)
solver.add_constraint(20 * x1 + 16 * x2 >= 161.8)
solver.add_constraint(17 * x1 + 11 * x2 >= 129.42)
solver.set_objective_function(80 * x1 + 95 * x2, maximize=False)
self.model = solver.as_scip_model()
self.model.optimize()
self.solution = self.model.getBestSol()
self.obj_value = self.model.getObjVal()
self.gap = self.model.getGap()
letters = string.ascii_letters
self.db_name = '/tmp/' + ''.join(random.choice(letters) for i in range(10))
self.db_name2 = '/tmp/' + ''.join(random.choice(letters) for i in range(10))
def test_read_write(self):
dataset = SolvedMilpDataset(self.db_name)
dataset.add(self.model, self.solution, self.obj_value, self.gap)
a, b = dataset.get_one(self.model)
sol = {}
for v in self.model.getVars():
val = self.solution[v]
sol[v.getIndex()] = val
self.assertEqual(a, sol)
self.assertEqual(b, self.obj_value)
def test_missing_entry(self):
dataset = SolvedMilpDataset(self.db_name)
try:
a, b = dataset.get_one(self.model)
found = True
except:
found = False
self.assertFalse(found)
def test_overwrite(self):
dataset = SolvedMilpDataset(self.db_name)
dataset.add(self.model, self.solution, 10, 23)
dataset.add(self.model, self.solution, 1.0, 21)
a, b = dataset.get_one(self.model)
sol = {}
for v in self.model.getVars():
val = self.solution[v]
sol[v.getIndex()] = val
self.assertEqual(a, sol)
self.assertEqual(b, 1.0)
dataset.add(self.model, self.solution, 2.0, 22)
a, b = dataset.get_one(self.model)
self.assertEqual(b, 1.0)
def test_multiple_entries(self):
dataset = SolvedMilpDataset(self.db_name2, best_solution_only=False)
dataset.add(self.model, self.solution, 50.0, 23)
dataset.add(self.model, self.solution, 10.0, 21)
dataset.add(self.model, self.solution, 2.0, 22)
expected_obj_value = 50.0
for a, b in dataset.get_all(self.model):
sol = {}
for v in self.model.getVars():
val = self.solution[v]
sol[v.getIndex()] = val
self.assertEqual(a, sol)
self.assertEqual(b, expected_obj_value)
expected_obj_value /= 5
def test_aggregate(self):
dataset1 = SolvedMilpDataset(self.db_name)
dataset1.add(self.model, self.solution, 10, 25)
_, b = dataset1.get_one(self.model)
self.assertEqual(b, 10)
dataset2 = SolvedMilpDataset(self.db_name)
dataset2.add(self.model, self.solution, 1, 11)
dataset1.merge(dataset2)
_, b = dataset1.get_one(self.model)
self.assertEqual(b, 1)
dataset3 = SolvedMilpDataset(self.db_name)
dataset3.add(self.model, self.solution, 5, 17)
dataset1.merge(dataset3)
_, b = dataset1.get_one(self.model)
self.assertEqual(b, 1) | CL-LNS-main | graph_datasets/solved_milp_dataset_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import ecole
import torch
import torch_geometric
import numpy as np
import string
import random
import os
import sys
import graph_datasets.bipartite_graph as bg
import graph_datasets.bipartite_graph_dataset as bgd
import ilp_solver
def advance_to_root_node(model):
"""Utility to advance a model to the root node."""
dyn = ecole.dynamics.BranchingDynamics()
model = dyn.reset_dynamics(model)
return model
def make_obs(obs_func, model):
"""Utility function to extract observation on root node."""
obs_func.before_reset(model)
advance_to_root_node(model)
return obs_func.extract(model, False)
class BipartiteGraphDatasetTest(unittest.TestCase):
def setUp(self):
# Create a small ILP problem
solver = ilp_solver.ILPSolver(engine="scip")
x1 = solver.create_integer_var("x1")
x2 = solver.create_integer_var("x2")
solver.add_constraint(10 * x1 + 15 * x2 >= 100)
solver.add_constraint(20 * x1 + 16 * x2 >= 160)
solver.add_constraint(17 * x1 + 11 * x2 >= 130)
# Minimize the objective
solver.set_objective_function(80 * x1 + 95 * x2, maximize=False)
scip_model = solver.as_scip_model()
self.model = ecole.scip.Model.from_pyscipopt(scip_model)
self.model.disable_presolve()
self.model.disable_cuts()
letters = string.ascii_letters
self.db = []
for i in range(6):
self.db.append('/tmp/' + ''.join(random.choice(letters) for i in range(10)))
def tearDown(self):
for db in self.db:
try:
os.remove(db)
except:
pass
def testBipartiteGraphQueries(self):
db = bgd.BipartiteGraphDataset(self.db[0], query_opt=False)
g0 = bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0])
db.add(g0)
g1 = bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0])
db.add(g1)
g2 = bg.BipartiteGraph(np.array([2]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0])
db.add(g2)
t0 = db.get(0)
t1 = db.get(1)
t2 = db.get(2)
self.assertEqual(t0.constraint_features, g0.constraint_features)
self.assertEqual(t1.constraint_features, g1.constraint_features)
self.assertEqual(t2.constraint_features, g2.constraint_features)
def testBipartiteGraphIterationNoOpt(self):
db = bgd.BipartiteGraphDataset(self.db[1], query_opt=False)
db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db.add(bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(2, db.len())
for i in range(5):
_ = db.get(i % 2)
def testBipartiteGraphIterationOpt(self):
db = bgd.BipartiteGraphDataset(self.db[2], query_opt=True)
db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db.add(bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(2, db.len())
for i in range(5):
_ = db.get(i % 2)
def _testDuplicateEntries(self):
db = bgd.BipartiteGraphDataset(self.db[3], query_opt=True)
rslt1 = db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
rslt2 = db.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt1)
self.assertEqual(False, rslt2)
self.assertEqual(1, db.len())
def _testMerge(self):
db1 = bgd.BipartiteGraphDataset(self.db[4], query_opt=True)
rslt1 = db1.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt1)
db2 = bgd.BipartiteGraphDataset(self.db[5], query_opt=True)
rslt2 = db2.add(bg.BipartiteGraph(np.array([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt2)
rslt2 = db2.add(bg.BipartiteGraph(np.array([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(True, rslt2)
db1.merge(db2)
self.assertEqual(2, db1.len())
class BipartiteGraphDatasetTests(unittest.TestCase):
def setUp(self) -> None:
letters = string.ascii_letters
self.db = []
for i in range(6):
self.db.append('/tmp/' + ''.join(random.choice(letters) for i in range(10)))
def tearDown(self):
for db in self.db:
try:
os.remove(db)
except:
pass
def _testBipartiteGraphExtraction(self):
db1 = bgd.BipartiteGraphDataset(self.db[0])
db1.add(bg.BipartiteGraph(torch.tensor([0]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db1.add(bg.BipartiteGraph(torch.tensor([1]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(db1.len(), 2)
db2 = bgd.BipartiteGraphDataset(self.db[1])
db2.add(bg.BipartiteGraph(torch.tensor([2]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
db2.add(bg.BipartiteGraph(torch.tensor([3]), np.array([[0], [0]]), [0], np.array([0]), [0], [0], [0], [0]))
self.assertEqual(db2.len(), 2)
db = bgd.BipartiteGraphDatasets([self.db[0], self.db[1]])
self.assertEqual(db.get(0).constraint_features, torch.tensor([0]))
self.assertEqual(db.get(1).constraint_features, torch.tensor([1]))
self.assertEqual(db.get(2).constraint_features, torch.tensor([2]))
self.assertEqual(db.get(3).constraint_features, torch.tensor([3]))
for i in range(4):
t = db.get(i)
self.assertEqual(t.constraint_features, torch.tensor([i]))
if __name__ == "__main__":
unittest.main()
| CL-LNS-main | graph_datasets/bipartite_graph_dataset_test.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
tokenizer.add_special_tokens({'additional_special_tokens': ['<|belief|>', '<|endofbelief|>', '<|action|>', '<|endofaction|>', \
'<|response|>', '<|endofresponse|>', '<|context|>', '<|endofcontext|>', '<|user|>', '<|system|>', \
'<|task|>', '<|endoftask|>', '<|chitchat|>', '<|endofchitchat|>']})
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = get_dataset(data_args, tokenizer=tokenizer) if training_args.do_train else None
eval_dataset = get_dataset(data_args, tokenizer=tokenizer, evaluate=True) if training_args.do_eval else None
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| accentor-main | run_language_modeling.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import random
import argparse
import os
def clean(x):
return x.replace("\n", "").replace("\r", "").replace("\t", " ").strip()
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="./accentor-sgd/", type=str, required=False, help="path to SGD")
args = parser.parse_args()
random.seed(42)
pairs = {}
for s in ["train", "dev", "test"]:
pairs[s] = []
fns = os.listdir(args.data + s)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue") or not fn.endswith(".json"):
continue
with open(args.data + s + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
for i in range(len(data)):
t = ''
for j in range(len(data[i]["turns"])):
for ps in ["beginning", "end"]:
if ps in data[i]["turns"][j]:
for k in range(len(data[i]["turns"][j][ps])):
if data[i]["turns"][j][ps][k]["label"] == "good":
pair = [t, clean(data[i]["turns"][j][ps][k]["candidate"])]
pairs[s] += [pair]
if t != '':
t += ' '
if j % 2 == 0:
t += 'user: '
else:
t += 'system: '
t += clean(data[i]["turns"][j]["utterance"])
for s in pairs:
print(s, len(pairs[s]))
random.shuffle(pairs["train"])
for s in ["train", "dev", "test"]:
with open("parlai_"+(s if s != "dev" else "valid")+".txt", "w", encoding='utf8') as f:
for i in range(len(pairs[s])):
f.write("text:" + pairs[s][i][0] + "\t" + "labels:" + pairs[s][i][1] + "\tepisode_done:True\n")
| accentor-main | gen_parlai_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from utils import bleuscorer
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--inference", default="dev.inference.gpt2_10epoch_1e-3_fp16.json", type=str, required=False, help='inference file')
parser.add_argument("--datafolder", default="./simpletod/", type=str, required=False, help='data folder')
parser.add_argument("--predictionfolder", default="./prediction/", type=str, required=False, help='prediction folder')
parser.add_argument("--split", default="dev", type=str, required=False, help="[dev,test]")
args = parser.parse_args()
inference = args.inference
datafolder = args.datafolder
predictionfolder = args.predictionfolder
folder = args.split + "/"
if inference.endswith(".txt"):
with open(inference, "r") as f:
predict = f.read().strip().split("\n")
predict = [a.strip() for a in predict]
else:
with open(inference, "r") as f:
predict = json.load(f)
idx = 0
cnt = 0
seen_services = set()
with open(datafolder + "train/" + "schema.json", "r") as f:
schema = json.load(f)
for i in range(len(schema)):
seen_services.add(schema[i]["service_name"])
domain_slots = set()
with open(datafolder + folder + "schema.json", "r") as f:
schema = json.load(f)
for i in range(len(schema)):
for j in range(len(schema[i]["slots"])):
assert(" " not in schema[i]["slots"][j])
domain_slots.add(schema[i]["service_name"].split("_")[0].lower() + " " + schema[i]["slots"][j]["name"].lower())
fns = os.listdir(datafolder + folder)
fns.sort()
act_precision = []
act_recall = []
seen_act_precision = []
seen_act_recall = []
unseen_act_precision = []
unseen_act_recall = []
bleu = []
bleua = []
bleub = []
seenbleu = []
seenbleua = []
seenbleub = []
unseenbleu = []
unseenbleua = []
unseenbleub = []
for fn in fns:
if not fn.startswith("dialogue"):
continue
if fn.startswith("dialogues_and_metrics.json"):
continue
with open(datafolder + folder + fn, "r") as f:
data = json.load(f)
for i in range(len(data)):
for j in range(1, len(data[i]["turns"]), 2):
cnt += 1
if idx >= len(predict):
continue
belief = predict[idx].split("<|belief|>")
if len(belief) >= 2 and "<|endofbelief|>" in belief[1]:
belief = belief[1].split("<|endofbelief|>")[0].strip()
else:
belief = ""
action = predict[idx].split("<|action|>")
if len(action) >= 2 and "<|endofaction|>" in action[1]:
action = action[1].split("<|endofaction|>")[0].strip()
else:
action = ""
response = predict[idx].split("<|response|>")
if len(response) >= 2:
response = response[1].split("<|")[0].strip()
else:
response = ""
data[i]["turns"][j]["response"] = response
seen = True
for k in range(len(data[i]["turns"][j-1]["frames"])):
if data[i]["turns"][j-1]["frames"][k]["service"] not in seen_services:
seen = False
parsedbelief = belief.split(", ")
for k in range(len(parsedbelief)):
parsed = False
for ds in domain_slots:
if parsedbelief[k].startswith(ds):
parsedbelief[k] = [ds, parsedbelief[k][len(ds):].strip()]
parsed = True
break
if not parsed:
parsedbelief[k] = [parsedbelief[k]]
k = 1
while k < len(parsedbelief):
if len(parsedbelief[k]) == 1:
parsedbelief[k-1] += parsedbelief[k]
del parsedbelief[k]
else:
k += 1
if len(parsedbelief) >= 1:
if parsedbelief[0][0] not in domain_slots:
del parsedbelief[0]
parsedbelief = {x[0]:x[1:] for x in parsedbelief}
parsedaction = action.split(", ")
for k in range(len(parsedaction)):
parsedaction[k] = parsedaction[k].strip().split()
k = 0
while k < len(parsedaction):
if len(parsedaction[k]) <= 1 or len(parsedaction[k]) > 3:
del parsedaction[k]
else:
k += 1
act_gt = set()
for k in range(len(data[i]["turns"][j]["frames"][0]["actions"])):
act_gt.add((data[i]["turns"][j]["frames"][0]["actions"][k]["act"].lower() + " " + data[i]["turns"][j]["frames"][0]["actions"][k]["slot"]).strip())
act_p = set()
for k in range(len(parsedaction)):
act_p.add(' '.join(parsedaction[k][1:]))
act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
if seen:
seen_act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
seen_act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
else:
unseen_act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
unseen_act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
bleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
bleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
bleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
if seen:
seenbleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
seenbleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
seenbleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
else:
unseenbleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
unseenbleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
unseenbleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
for k in range(len(data[i]["turns"][j-1]["frames"])):
data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"] = {}
for ds in parsedbelief:
if ds.split()[0].lower() == data[i]["turns"][j-1]["frames"][k]["service"].split("_")[0].lower():
data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"][ds.split()[1]] = parsedbelief[ds]
idx += 1
if not os.path.exists(predictionfolder + folder):
os.makedirs(predictionfolder + folder)
with open(predictionfolder + folder + fn, "w") as f:
json.dump(data, f, indent=1)
act_precision = sum(act_precision) / len(act_precision)
act_recall = sum(act_recall) / len(act_recall)
print("act", act_precision, act_recall, 2*act_precision*act_recall/(act_precision+act_recall))
print('bleu:', sum(bleu)/len(bleu)) #BLEU-4_{orig}
print('bleua:', sum(bleua)/len(bleua)) #BLEU-4_{aug}
#print('bleub:', sum(bleub)/len(bleub))
seen_act_precision = sum(seen_act_precision) / len(seen_act_precision)
seen_act_recall = sum(seen_act_recall) / len(seen_act_recall)
print("act (seen):", seen_act_precision, seen_act_recall, 2*seen_act_precision*seen_act_recall/(seen_act_precision+seen_act_recall))
unseen_act_precision = sum(unseen_act_precision) / len(unseen_act_precision)
unseen_act_recall = sum(unseen_act_recall) / len(unseen_act_recall)
print("act (unseen):", unseen_act_precision, unseen_act_recall, 2*unseen_act_precision*unseen_act_recall/(unseen_act_precision+unseen_act_recall))
print('bleu (seen):', sum(seenbleu)/len(seenbleu))
print('bleua (seen):', sum(seenbleua)/len(seenbleua))
#print('bleub (seen):', sum(seenbleub)/len(seenbleub))
print('bleu (unseen):', sum(unseenbleu)/len(unseenbleu))
print('bleua (unseen):', sum(unseenbleua)/len(unseenbleua))
#print('bleub (unseen):', sum(unseenbleub)/len(unseenbleub))
if __name__ == '__main__':
main()
| accentor-main | gen_predict.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import copy
import random
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--all", default=False, type=bool, required=False, help="use all dialogues rather than only augmented dialogues")
parser.add_argument("--delexlevel", default=2, type=int, required=False, help="0: no delex; 1: delex values in \"slots\"; 2: delex values in both \"slots\" and \"actions\"")
parser.add_argument("--data", default="./accentor-sgd/", type=str, required=False, help="path to SGD")
parser.add_argument("--target", default="./simpletod/", type=str, required=False, help="path to output")
args = parser.parse_args()
datafolder = args.data
targetfolder = args.target
for folder in ["train", "dev", "test"]:
if not os.path.exists(targetfolder + folder):
os.makedirs(targetfolder + folder)
inlm = []
inlme = []
inlma = []
inlmb = []
incc = []
inlmf = []
fns = os.listdir(datafolder + folder)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue"):
with open(datafolder + folder + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
with open(targetfolder + folder + "/" + fn, "w", encoding='utf8') as f:
json.dump(data, f, indent=1)
continue
with open(datafolder + folder + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
i = 0
while i < len(data):
dbs = []
slots = {}
canmap = {}
vmap = {}
for j in range(len(data[i]["turns"])):
if data[i]["turns"][j]["speaker"] != "SYSTEM":
continue
if "service_results" in data[i]["turns"][j]["frames"][0]:
dbs += data[i]["turns"][j]["frames"][0]["service_results"]
if len(data[i]["turns"][j]["frames"][0]["slots"]) != 0:
slots = {}
for k in range(len(data[i]["turns"][j]["frames"][0]["actions"])):
assert(len(data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"]) == len(data[i]["turns"][j]["frames"][0]["actions"][k]["values"]))
for l in range(len(data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"])):
canmap[data[i]["turns"][j]["frames"][0]["actions"][k]["values"][l]] = data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"][l]
vmap[data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"][l]] = data[i]["turns"][j]["frames"][0]["actions"][k]["values"][l]
for k in range(len(data[i]["turns"][j]["frames"][0]["slots"])):
s = data[i]["turns"][j]["frames"][0]["slots"][k]["slot"]
slots[s] = data[i]["turns"][j]["utterance"][data[i]["turns"][j]["frames"][0]["slots"][k]["start"]:data[i]["turns"][j]["frames"][0]["slots"][k]["exclusive_end"]]
db = {}
for k in range(len(dbs)):
matched = True
for s in slots:
if s not in dbs[k]:
matched = False
break
if dbs[k][s] != canmap[slots[s]]:
matched = False
break
if matched:
db = copy.deepcopy(dbs[k])
for s in db:
if db[s] in vmap:
db[s] = vmap[db[s]]
break
data[i]["turns"][j]["frames"][0]["selecteddbslots"] = slots
data[i]["turns"][j]["frames"][0]["selecteddb"] = db
for j in range(1, len(data[i]["turns"]), 2):
domain = data[i]["turns"][j]["frames"][0]["service"].split("_")[0].lower()
assert(data[i]["turns"][j]["speaker"] == "SYSTEM")
assert(len(data[i]["turns"][j]["frames"]) == 1)
slots = copy.deepcopy(data[i]["turns"][j]["frames"][0]["slots"])
slots.sort(key = lambda x : -x["start"])
delex = data[i]["turns"][j]["utterance"]
delexed = set()
if args.delexlevel >= 1:
for k in range(1, len(slots)):
assert(slots[k-1]["start"] >= slots[k]["exclusive_end"])
for k in range(len(slots)):
domain_slot = domain + "_" + slots[k]["slot"]
delex = delex[:slots[k]["start"]] + "[" + domain_slot + "]" + delex[slots[k]["exclusive_end"]:]
delexed.add(domain_slot)
if args.delexlevel >= 2:
slots2 = copy.deepcopy(data[i]["turns"][j]["frames"][0]["actions"])
slots2 = [x for x in slots2 if len(x["values"]) > 0]
slots2.sort(key = lambda x : -len(x["values"][0]))
for k in range(len(slots2)):
domain_slot = domain + "_" + slots2[k]["slot"]
if domain_slot in delexed:
continue
for l in range(len(slots2[k]["values"])):
delex = delex.replace(slots2[k]["values"][l], "[" + domain_slot + "]")
delexed.add(domain_slot)
data[i]["turns"][j]["delex"] = delex
target = ''
belief = []
for k in range(len(data[i]["turns"][j-1]["frames"])):
for slot in data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"]:
belief += [[data[i]["turns"][j-1]["frames"][k]["service"].split("_")[0].lower(), slot, data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"][slot]]]
belief.sort(key = lambda x : x[0] + " " + x[1])
for k in range(len(belief)):
belief[k][2].sort()
belief[k][2] = belief[k][2][0]
belief = [x[0] + " " + x[1] + " " + x[2] for x in belief]
target += '<|belief|> ' + ", ".join(belief) + ' <|endofbelief|> '
action = copy.deepcopy(data[i]["turns"][j]["frames"][0]["actions"])
action.sort(key = lambda x : x["act"])
action = [domain + " " + x["act"].lower() + " " + x["slot"] for x in action]
targetaug = []
delexaug = []
tcpos = []
tcneg = []
for k in range(len(data[i]["turns"][j]["beginning"])):
if "social" in data[i]["turns"][j]["beginning"][k]["justification"] or "useful" in data[i]["turns"][j]["beginning"][k]["justification"]:
delexaug += [data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' ' + delex]
targetaug += [target + '<|action|> ' + "chitchat, " + ", ".join(action) + ' <|endofaction|> ' + '<|response|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' ' + delex + ' <|endofresponse|>']
tcpos += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' <|endofchitchat|> ']
else:
tcneg += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' <|endofchitchat|> ']
for k in range(len(data[i]["turns"][j]["end"])):
if "social" in data[i]["turns"][j]["end"][k]["justification"] or "useful" in data[i]["turns"][j]["end"][k]["justification"]:
delexaug += [delex + ' ' + data[i]["turns"][j]["end"][k]["candidate"].strip()]
targetaug += [target + '<|action|> ' + ", ".join(action) + ", chitchat" + ' <|endofaction|> ' + '<|response|> ' + delex + ' ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofresponse|>']
tcpos += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofchitchat|> ']
else:
tcneg += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofchitchat|> ']
target += '<|action|> ' + ", ".join(action) + ' <|endofaction|> '
target += '<|response|> ' + delex + ' <|endofresponse|>'
data[i]["turns"][j]["target"] = target
data[i]["turns"][j]["targetaug"] = targetaug
data[i]["turns"][j]["delexaug"] = delexaug
context = '<|context|> '
for k in range(j):
if k % 2 == 0:
context += '<|user|> '
else:
context += '<|system|> '
context += data[i]["turns"][k]["utterance"] + " "
context += '<|endofcontext|>'
data[i]["turns"][j]["context"] = context
inlm += [(context + target).replace("\n", " ").replace("\r", "")]
assert("\n" not in inlm[-1])
inlme += [(context).replace("\n", " ").replace("\r", "")]
if len(targetaug) != 0:
for k in range(len(targetaug)):
inlma += [(context + targetaug[k]).replace("\n", " ").replace("\r", "")]
inlmb += [(context + targetaug[k]).replace("\n", " ").replace("\r", "")]
inlmf += [(context + tcpos[k] + targetaug[k]).replace("\n", " ").replace("\r", "")]
for l in range(len(tcneg)):
inlmf += [(context + tcneg[l] + targetaug[k]).replace("\n", " ").replace("\r", "")]
else:
inlmb += [(context + target).replace("\n", " ").replace("\r", "")]
for k in range(len(tcneg)):
inlmf += [(context + tcneg[k] + target).replace("\n", " ").replace("\r", "")]
incc += [context.replace('<|context|>', '').replace('<|endofcontext|>', '').replace('<|user|>', 'user:').replace('<|system|>', 'system:').replace('\t', ' ').strip(), '[DONE]']
i += 1
with open(targetfolder + folder + "/" + fn, "w") as f:
json.dump(data, f, indent=1)
random.shuffle(inlm)
with open("lm.input."+folder+".txt", "w", encoding='utf8') as f: #SimpleTOD
f.write('\n'.join(inlm))
with open("lm.input."+folder+".eval.txt", "w", encoding='utf8') as f: #used as the input during evaluation of SimpleTOD and SimpleTOD extension
f.write('\n'.join(inlme))
with open("lm.input."+folder+".aug.txt", "w", encoding='utf8') as f: #SimpleTOD extension (augmented responses only)
f.write('\n'.join(inlma))
with open("lm.input."+folder+".both.txt", "w", encoding='utf8') as f: #SimpleTOD extension (all responses)
f.write('\n'.join(inlmb))
with open("lm.input."+folder+".cc.txt", "w", encoding='utf8') as f: #cc: chitchat
f.write('\n'.join(incc+['[EXIT]']))
with open("lm.input."+folder+".ff.txt", "w", encoding='utf8') as f: #ff: free-form
f.write('\n'.join(inlmf))
if __name__ == '__main__':
random.seed(42)
main()
| accentor-main | gen_delex.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
import argparse
import numpy as np
import json
from tqdm import tqdm
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
parser = argparse.ArgumentParser()
parser.add_argument("--no_cuda", action="store_true", help="avoid using CUDA when available")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--model_name_or_path", type=str, default="output", help="path to pre-trained model or shortcut name")
parser.add_argument("--input", type=str, help="input text file, each line corresponding to one instance")
parser.add_argument("--output", type=str, help="output file")
parser.add_argument("--eos_token_id", type=int, default=None, help="eos token id")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--jobid", type=int, default=0, help="jobid")
parser.add_argument("--jobnum", type=int, default=1, help="jobnum")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path, pad_token='<PAD>')
model.to(args.device)
with open(args.input, "r") as f:
prompts = f.read().strip().split("\n")
batch_size = args.batch_size
ret = []
for batch in tqdm(range(args.jobid, len(prompts), batch_size * args.jobnum)):
prompt_text = prompts[batch: batch+batch_size]
encodings_dict = tokenizer.batch_encode_plus(prompt_text, max_length=None, pad_to_max_length=True)
input_ids = torch.tensor(encodings_dict['input_ids'])
attn_mask = torch.tensor(encodings_dict['attention_mask'])
seq_len = len(input_ids[0])
num_tokens_to_produce = 1024 - seq_len
pad_token_id = tokenizer.pad_token_id
eos_token_id = args.eos_token_id
if eos_token_id is None:
eos_token_id = tokenizer.eos_token_id
eos_not_in_sents = torch.ones(input_ids.shape[0]).long()
last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1
start_idx = inp_idx = (last_non_masked_idx).view(-1, 1).repeat(1, tokenizer.vocab_size + len(tokenizer.additional_special_tokens)).unsqueeze(1)
past = None
position_ids = torch.tensor([list(range(seq_len)) for i in range(input_ids.shape[0])])
for i, position_ids_slice in enumerate(position_ids):
position_ids_slice[last_non_masked_idx[i]:] = position_ids_slice[last_non_masked_idx[i]]
input_ids = input_ids.to(args.device)
attn_mask = attn_mask.to(args.device)
eos_not_in_sents = eos_not_in_sents.to(args.device)
start_idx = start_idx.to(args.device)
position_ids = position_ids.to(args.device)
for step in range(num_tokens_to_produce):
outputs = model(input_ids, attention_mask=attn_mask, position_ids=position_ids)
if step == 0:
next_token_logits = outputs[0].gather(1, start_idx).squeeze(1)
else:
next_token_logits = outputs[0][:, -1, :]
next_tokens = torch.argmax(next_token_logits, dim=-1)
eos_not_in_sents.mul_(next_tokens.ne(eos_token_id).long())
tokens_to_add = next_tokens * (eos_not_in_sents) + pad_token_id * (1 - eos_not_in_sents)
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
attn_mask = torch.cat([attn_mask, torch.ones((attn_mask.shape[0], 1)).long().to(args.device)], dim=1)
position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
if torch.max(eos_not_in_sents) == 0:
break
ret += [tokenizer.decode(output, skip_special_tokens=False, clean_up_tokenization_spaces=True).replace("<|endoftext|>", "") for output in input_ids]
with open(args.output, "w") as f:
json.dump(ret, f, indent=1)
| accentor-main | run_generation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="./simpletod/", type=str, required=False, help="path to delexed & augmented SGD")
args = parser.parse_args()
def clean(x):
return x.replace("\n", "").replace("\r", "").replace("\t", " ").strip()
random.seed(42)
pairs = {}
pos = {}
tot = {}
for s in ["train", "dev", "test"]:
pairs[s] = []
pos[s] = 0
tot[s] = 0
fns = os.listdir(args.data + s)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue") or not fn.endswith(".json"):
continue
with open(args.data + s + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
for i in range(len(data)):
t = ''
for j in range(len(data[i]["turns"])):
for ps in ["beginning", "end"]:
if ps in data[i]["turns"][j]:
for k in range(len(data[i]["turns"][j][ps])):
tot[s] += 1
if data[i]["turns"][j][ps][k]["label"] == "good":
pair = [t, data[i]["turns"][j]["delex"], clean(data[i]["turns"][j][ps][k]["candidate"]), 1 if ps == "beginning" else 2]
pairs[s] += [pair]
pos[s] += 1
else:
pair = [t, data[i]["turns"][j]["delex"], clean(data[i]["turns"][j][ps][k]["candidate"]), 0]
pairs[s] += [pair]
if t != '':
t += ' '
if j % 2 == 0:
t += 'user: '
else:
t += 'system: '
t += clean(data[i]["turns"][j]["utterance"])
for s in pos:
print(s, pos[s], tot[s], pos[s]/tot[s])
for s in pairs:
print(s, len(pairs[s]))
random.shuffle(pairs["train"])
with open("arranger_input.json", "w", encoding='utf8') as f:
json.dump(pairs, f, indent=1)
| accentor-main | gen_arranger_input.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import nltk
def bleuscorer(hyps, refs):
#print(hyps, refs)
bleu = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split()
ref = [a.split() for a in ref]
#hyp = nltk.word_tokenize(hyp)
#ref = [nltk.word_tokenize(a) for a in ref]
bleu += [nltk.translate.bleu_score.sentence_bleu(ref, hyp)]
return sum(bleu) / len(bleu)
if __name__ == '__main__':
print(bleuscorer(['the the the the the the the', 'there is a cat', 'it is'], [["the cat is on the mat", "there is a cat on the mat"], ["there is a cat on the mat"], ["it is true"]]))
| accentor-main | utils.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForMultipleChoice, BertTokenizer,
RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer)
from transformers import AdamW, get_linear_schedule_with_warmup
import torch.nn as nn
from utils_multiple_choice import (convert_examples_to_features, processors)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForMultipleChoice, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer),
}
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
best_dev_acc, best_dev_loss = 0.0, 99999999999.0
best_steps = 0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert'] else None,
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
if results["eval_acc"] > best_dev_acc:
best_dev_acc = results["eval_acc"]
best_dev_loss = results["eval_loss"]
best_steps = global_step
if args.do_test:
results_test = evaluate(args, model, tokenizer, test=True)
for key, value in results_test.items():
tb_writer.add_scalar('test_{}'.format(key), value, global_step)
logger.info("test acc: %s, loss: %s, global steps: %s", str(results_test['eval_acc']), str(results_test['eval_loss']), str(global_step))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logger.info("Average loss: %s at global step: %s", str((tr_loss - logging_loss)/args.logging_steps), str(global_step))
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_vocabulary(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step, best_steps
def evaluate(args, model, tokenizer, prefix="", test=False):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=not test, test=test)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert'] else None,
'labels': batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
output_logits_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_logits.txt")
with open(output_logits_file, "w") as writer:
logits_list = list(preds)
for i in range(len(logits_list)):
for j in range(len(logits_list[i])):
writer.write(str(logits_list[i][j]))
if j == len(logits_list[i]) - 1:
writer.write("\n")
else:
writer.write(" ")
preds = np.argmax(preds, axis=1)
acc = simple_accuracy(preds, out_label_ids)
result = {"eval_acc": acc, "eval_loss": eval_loss}
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(str(prefix) + " is test:" + str(test)))
writer.write("model =%s\n" % str(args.model_name_or_path))
writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps *
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
writer.write("train num epochs=%d\n" % args.num_train_epochs)
writer.write("fp16 =%s\n" % args.fp16)
writer.write("max seq length =%d\n" % args.max_seq_length)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False):
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
# Load data features from cache or dataset file
if evaluate:
cached_mode = 'dev'
elif test:
cached_mode = 'test'
else:
cached_mode = 'train'
assert (evaluate == True and test == True) == False
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
cached_mode,
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if evaluate:
examples = processor.get_dev_examples(args.data_dir)
elif test:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_train_examples(args.data_dir)
logger.info("Training number: %s", str(len(examples)))
features = convert_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=False,
pad_token_segment_id=0
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long)
all_label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true', help='Whether to run test on the test set')
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
best_steps = 0
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss, best_steps = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if not args.do_train:
args.output_dir = args.model_name_or_path
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if args.do_test and args.local_rank in [-1, 0]:
if not args.do_train:
args.output_dir = args.model_name_or_path
checkpoints = [args.output_dir]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, test=True)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if best_steps:
logger.info("best steps of eval acc is the following checkpoints: %s", best_steps)
return results
if __name__ == "__main__":
main()
| accentor-main | run_multiple_choice.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import os
import sys
from io import open
import json
import csv
import glob
import tqdm
from typing import List
from transformers import PreTrainedTokenizer
import random
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for multiple choice"""
def __init__(self, example_id, question, contexts, endings, label=None):
"""Constructs a InputExample.
Args:
example_id: Unique id for the example.
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
question: string. The untokenized text of the second sequence (question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.example_id = example_id
self.question = question
self.contexts = contexts
self.endings = endings
self.label = label
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for input_ids, input_mask, segment_ids in choices_features
]
self.label = label
class DataProcessor(object):
"""Base class for data converters for multiple choice data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class ACCProcessor(DataProcessor):
def __init__(self):
self.D = [[], [], []]
datasetfile = "arranger_input.json"
with open(datasetfile, "r") as f:
data = json.load(f)
for sid in range(2):
dt = ["train", "dev"][sid]
for i in range(len(data[dt])):
d = [data[dt][i][0].lower(), data[dt][i][1].lower(), data[dt][i][2].lower(), data[dt][i][3]]
self.D[sid] += [d]
sid = 2
for fns in [["lm.input.dev.cc.txt", "lm.output.dev.cc.txt", "dev.inference.gpt2_10epoch_1e-3_fp16.json"],
["lm.input.test.cc.txt", "lm.output.test.cc.txt", "test.inference.gpt2_10epoch_1e-3_fp16.json"]]:
with open(fns[0], "r") as f:
data = f.read().split("\n")[0:-1:2]
data_d = data
with open(fns[1], "r") as f:
data = f.read()
data = data.split("[TransformerGenerator]:")[1:]
for i in range(len(data)):
data[i] = data[i].split("\n")[0].strip()
data_cc = data
with open(fns[2], "r") as f:
data = json.load(f)
for i in range(len(data)):
data[i] = data[i].split("<|response|>")
if len(data[i]) == 1:
data[i] += ['']
elif len(data[i]) > 2:
data[i] = ["<|response|>".join(data[i][:-2]), data[i][-1]]
self.D[2] += [[data_d[i].strip(), data[i][1], data_cc[i].strip(), 0]]
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return ["0", "1", "2"]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
acc_id = "%s-%d" % (set_type, i)
examples.append(
InputExample(
example_id=acc_id,
question="",
contexts=[data[i][0], data[i][0], data[i][0]],
endings=[data[i][1], data[i][2] + " " + data[i][1], data[i][1] + " " + data[i][2]],
label=str(data[i][3])))
return examples
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
pad_token_segment_id=0,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
) -> List[InputFeatures]:
"""
Loads a data file into a list of `InputFeatures`
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
text_a = context
if example.question.find("_") != -1:
text_b = example.question.replace("_", ending)
else:
text_b = example.question + " " + ending
inputs = tokenizer.encode_plus(
text_a,
text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length
assert len(attention_mask) == max_length
assert len(token_type_ids) == max_length
choices_features.append((input_ids, attention_mask, token_type_ids))
label = label_map[example.label]
if ex_index < 2:
logger.info("*** Example ***")
logger.info("race_id: {}".format(example.example_id))
for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask))))
logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids))))
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id=example.example_id,
choices_features=choices_features,
label=label,
)
)
return features
processors = {
"acc": ACCProcessor,
}
MULTIPLE_CHOICE_TASKS_NUM_LABELS = {
"acc", 3
}
| accentor-main | utils_multiple_choice.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
for fns in [["./lm.input.dev.eval.txt", "./lm.output.dev.cc.txt", "./dev.inference.gpt2_10epoch_1e-3_fp16.json", "lm.input.dev.eval.ff.txt"],
["./lm.input.test.eval.txt", "./lm.output.test.cc.txt", "./test.inference.gpt2_10epoch_1e-3_fp16.json", "lm.input.test.eval.ff.txt"]]:
with open(fns[0], "r", encoding='utf8') as f:
context = f.read().strip().split("\n")
with open(fns[1], "r", encoding='utf8') as f:
cc = f.read().strip()
cc = cc.split("[TransformerGenerator]:")[1:]
for i in range(len(cc)):
cc[i] = cc[i].split("\n")[0].strip()
with open(fns[2], "r", encoding='utf8') as f:
task = json.load(f)
print(len(context), len(cc), len(task))
assert(len(context) == len(cc))
assert(len(cc) == len(task))
with open(fns[3], "w", encoding='utf8') as f:
for i in range(len(cc)):
t = task[i].split("<|response|>")
if len(t) >= 2:
t = t[-1].strip()
else:
t = ""
b = task[i].split("<|belief|>")
if len(b) >= 2:
b = b[1].split("<|endofbelief|>")
if len(b) == 2:
b = b[0]
else:
b = ""
else:
b = ""
f.write(context[i] + " <|task|> " + t + " <|endoftask|> <|chitchat|> " + cc[i] + ' <|endofchitchat|> <|belief|>' + b + "<|endofbelief|>\n")
| accentor-main | gen_rewriter_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
with open("./acc_arranger_roberta_base_3epoch/is_test_true_eval_logits.txt", "r") as f:
model_outputs = f.read().strip().split("\n")
for i in range(len(model_outputs)):
model_outputs[i] = model_outputs[i].split()
for j in range(len(model_outputs[i])):
model_outputs[i][j] = float(model_outputs[i][j])
assert(len(model_outputs[i]) == 3)
print(len(model_outputs))
for fns in [["./lm.input.dev.cc.txt", "./lm.output.dev.cc.txt", "./dev.inference.gpt2_10epoch_1e-3_fp16.json", "./dev.inference.arranger_3epoch.json"],
["./lm.input.test.cc.txt", "./lm.output.test.cc.txt", "./test.inference.gpt2_10epoch_1e-3_fp16.json", "./test.inference.arranger_3epoch.json"]]:
with open(fns[0], "r") as f:
data = f.read().split("\n")[0:-1:2]
print(len(data))
data_d = data
with open(fns[1], "r") as f:
data = f.read()
data = data.split("[TransformerGenerator]:")[1:]
for i in range(len(data)):
data[i] = data[i].split("\n")[0].strip()
print(len(data))
data_cc = data
with open(fns[2], "r") as f:
data = json.load(f)
print(len(data))
eval_data = []
for i in range(len(data)):
data[i] = data[i].split("<|response|>")
if len(data[i]) == 1:
data[i] += ['']
elif len(data[i]) > 2:
data[i] = ["<|response|>".join(data[i][:-2]), data[i][-1]]
eval_data += [[data_d[i].strip(), data[i][1], data_cc[i].strip(), 0]]
print(len(eval_data))
stats = {0:0, 1:0, 2:0}
for i in range(len(data)):
assert(len(model_outputs[i]) == 3)
o = 0
for j in range(1, 3):
if model_outputs[i][j] > model_outputs[i][o]:
o = j
stats[o] += 1
if o == 0:
data[i] = "<|response|>".join(data[i])
elif o == 1:
data[i] = data[i][0] + "<|response|> " + data_cc[i].strip() + " " + data[i][1].strip()
else:
data[i] = data[i][0] + "<|response|> " + data[i][1].strip() + " " + data_cc[i].strip()
print(len(data), len(model_outputs))
print(stats)
model_outputs = model_outputs[len(data):]
with open(fns[3], "w", encoding='utf8') as f:
json.dump(data, f, indent=1)
| accentor-main | gen_arranger_output.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source", default="./MultiWOZ_2.1/data.json", type=str, required=False, help="Path to the MultiWOZ dataset.")
args = parser.parse_args()
with open("candidates-multiwoz.json", "r", encoding='utf8') as f:
augmentation = json.load(f)
with open(args.source, "r", encoding='utf8') as f:
data = json.load(f)
data = {x:data[x] for x in data if x in augmentation}
for x in data:
for i in range(1, len(data[x]["log"]), 2):
data[x]["log"][i]["beginning"] = []
data[x]["log"][i]["end"] = []
for cc in augmentation[x]:
data[x]["log"][cc[0]][cc[1]] += [{"candidate": cc[2], "label": cc[3], "justification": cc[4]}]
with open("accentor-multiwoz-1k.json", "w", encoding='utf8') as f:
json.dump(data, f, indent=1, ensure_ascii=False)
| accentor-main | v1.0/accentor-multiwoz.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import argparse
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source", default="./dstc8-schema-guided-dialogue", type=str, required=False, help="Path to the SGD dataset.")
parser.add_argument("--target", default="./accentor-sgd", type=str, required=False, help="The target directory to store ACCENTOR-SGD.")
args = parser.parse_args()
with open("candidates-sgd.json", "r", encoding='utf8') as f:
augmentation = json.load(f)
for subdir in ["train", "dev", "test"]:
targetdir = os.path.join(args.target, subdir)
sourcedir = os.path.join(args.source, subdir)
os.makedirs(targetdir, exist_ok=True)
fns = os.listdir(sourcedir)
for fn in fns:
if not fn.endswith(".json"):
continue
with open(os.path.join(sourcedir, fn), "r", encoding='utf8') as f:
data = json.load(f)
if fn.startswith("dialogue"):
for i in range(len(data)):
for j in range(1, len(data[i]["turns"]), 2):
data[i]["turns"][j]["beginning"] = []
data[i]["turns"][j]["end"] = []
for cc in augmentation[subdir + data[i]["dialogue_id"]]:
data[i]["turns"][cc[0]][cc[1]] += [{"candidate": cc[2], "label": cc[3], "justification": cc[4]}]
with open(os.path.join(targetdir, fn), "w", encoding='utf8') as f:
json.dump(data, f, indent=1, ensure_ascii=False)
| accentor-main | v1.0/accentor-sgd.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from Slimmable - https://github.com/JiahuiYu/slimmable_networks
import torch
class CrossEntropyLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification """
def forward(self, output, target):
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss.mean()
class KLLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification
output: output logits of the student network
target: output logits of the teacher network
T: temperature
KL(p||q) = Ep \log p - \Ep log q
"""
def forward(self, output, soft_logits, target=None, temperature=1., alpha=0.9):
output, soft_logits = output / temperature, soft_logits / temperature
soft_target_prob = torch.nn.functional.softmax(soft_logits, dim=1)
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
kd_loss = -torch.sum(soft_target_prob * output_log_prob, dim=1)
if target is not None:
n_class = output.size(1)
target = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
ce_loss = -torch.bmm(target, output_log_prob).squeeze()
loss = alpha*temperature* temperature*kd_loss + (1.0-alpha)*ce_loss
else:
loss = kd_loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
class CrossEntropyLossSmooth(torch.nn.modules.loss._Loss):
def __init__(self, label_smoothing=0.1):
super(CrossEntropyLossSmooth, self).__init__()
self.eps = label_smoothing
""" label smooth """
def forward(self, output, target):
n_class = output.size(1)
one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = one_hot * (1 - self.eps) + self.eps / n_class
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
loss = -torch.bmm(target, output_log_prob)
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
def f_divergence(q_logits, p_logits, alpha, iw_clip=1e3):
assert isinstance(alpha, float)
q_prob = torch.nn.functional.softmax(q_logits, dim=1).detach()
p_prob = torch.nn.functional.softmax(p_logits, dim=1).detach()
q_log_prob = torch.nn.functional.log_softmax(q_logits, dim=1) #gradient is only backpropagated here
importance_ratio = p_prob / q_prob
if abs(alpha) < 1e-3:
importance_ratio = importance_ratio.clamp(0, iw_clip)
f = -importance_ratio.log()
f_base = 0
rho_f = importance_ratio.log() - 1.0
elif abs(alpha - 1.0) < 1e-3:
f = importance_ratio * importance_ratio.log()
f_base = 0
rho_f = importance_ratio
else:
iw_alpha = torch.pow(importance_ratio, alpha)
iw_alpha = iw_alpha.clamp(0, iw_clip)
f = iw_alpha / alpha / (alpha - 1.0)
f_base = 1.0 / alpha / (alpha - 1.0)
rho_f = iw_alpha / alpha + f_base
loss = torch.sum(q_prob * (f - f_base), dim=1)
grad_loss = -torch.sum(q_prob * rho_f * q_log_prob, dim=1)
return loss, grad_loss
"""
It's often necessary to clip the maximum
gradient value (e.g., 1.0) when using this adaptive KD loss
"""
class AdaptiveLossSoft(torch.nn.modules.loss._Loss):
def __init__(self, alpha_min=-1.0, alpha_max=1.0, iw_clip=5.0):
super(AdaptiveLossSoft, self).__init__()
self.alpha_min = alpha_min
self.alpha_max = alpha_max
self.iw_clip = iw_clip
def forward(self, output, target, alpha_min=None, alpha_max=None):
alpha_min = alpha_min or self.alpha_min
alpha_max = alpha_max or self.alpha_max
loss_left, grad_loss_left = f_divergence(output, target, alpha_min, iw_clip=self.iw_clip)
loss_right, grad_loss_right = f_divergence(output, target, alpha_max, iw_clip=self.iw_clip)
ind = torch.gt(loss_left, loss_right).float()
loss = ind * grad_loss_left + (1.0 - ind) * grad_loss_right
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
| AlphaNet-main | loss_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import random
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import models
from utils.config import setup
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from evaluate import attentive_nas_eval as attentive_nas_eval
import utils.logging as logging
import argparse
"""
using multiple nodes to run evolutionary search:
1) each GPU will evaluate its own sub-networks
2) all evaluation results will be aggregated on GPU 0
"""
parser = argparse.ArgumentParser(description='Test AlphaNet Models')
parser.add_argument('--config-file', default='./configs/parallel_supernet_evo_search.yml')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
parser.add_argument('--seed', default=1, type=int,
help='default random seed')
run_args = parser.parse_args()
logger = logging.get_logger(__name__)
def eval_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging("stdout.log", 'w')
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
torch.cuda.set_device(args.gpu)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# build the supernet
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
model = comm.get_parallel_model(model, args.gpu) #local rank
# define loss function (criterion)
criterion = nn.CrossEntropyLoss().cuda()
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
assert args.resume
#reloading model
model.module.load_weights_from_pretrained_models(args.resume)
if train_sampler:
train_sampler.set_epoch(0)
targeted_min_flops = args.evo_search.targeted_min_flops
targeted_max_flops = args.evo_search.targeted_max_flops
# run evolutionary search
parent_popu = []
for idx in range(args.evo_search.parent_popu_size):
if idx == 0:
cfg = model.module.sample_min_subnet()
else:
cfg = model.module.sample_active_subnet_within_range(
targeted_min_flops, targeted_max_flops
)
cfg['net_id'] = f'net_{idx % args.world_size}_evo_0_{idx}'
parent_popu.append(cfg)
pareto_global = {}
for evo in range(args.evo_search.evo_iter):
# partition the set of candidate sub-networks
# and send them to each GPU for parallel evaluation
# sub-networks to be evaluated on GPU {args.rank}
my_subnets_to_be_evaluated = {}
n_evaluated = len(parent_popu) // args.world_size * args.world_size
for cfg in parent_popu[:n_evaluated]:
if cfg['net_id'].startswith(f'net_{args.rank}_'):
my_subnets_to_be_evaluated[cfg['net_id']] = cfg
# aggregating all evaluation results
eval_results = attentive_nas_eval.validate(
my_subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
)
# update the Pareto frontier
# in this case, we search the best FLOPs vs. accuracy trade-offs
for cfg in eval_results:
f = round(cfg['flops'] / args.evo_search.step) * args.evo_search.step
if f not in pareto_global or pareto_global[f]['acc1'] < cfg['acc1']:
pareto_global[f] = cfg
# next batch of sub-networks to be evaluated
parent_popu = []
# mutate
for idx in range(args.evo_search.mutate_size):
while True:
old_cfg = random.choice(list(pareto_global.values()))
cfg = model.module.mutate_and_reset(old_cfg, prob=args.evo_search.mutate_prob)
flops = model.module.compute_active_subnet_flops()
if flops >= targeted_min_flops and flops <= targeted_max_flops:
break
cfg['net_id'] = f'net_{idx % args.world_size}_evo_{evo}_mutate_{idx}'
parent_popu.append(cfg)
# cross over
for idx in range(args.evo_search.crossover_size):
while True:
cfg1 = random.choice(list(pareto_global.values()))
cfg2 = random.choice(list(pareto_global.values()))
cfg = model.module.crossover_and_reset(cfg1, cfg2)
flops = model.module.compute_active_subnet_flops()
if flops >= targeted_min_flops and flops <= targeted_max_flops:
break
cfg['net_id'] = f'net_{idx % args.world_size}_evo_{evo}_crossover_{idx}'
parent_popu.append(cfg)
if __name__ == '__main__':
# setup enviroments
args = setup(run_args.config_file)
args.dist_url = run_args.dist_url
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
assert args.world_size > 1, "only support DDP settings"
# Use torch.multiprocessing.spawn to launch distributed processes: the
# eval_worker process function
mp.spawn(eval_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
| AlphaNet-main | parallel_supernet_evo_search.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from AttentiveNAS (https://github.com/facebookresearch/AttentiveNAS)
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
import operator
from datetime import date
import torch
import torch.nn as nn
#from torch.utils.tensorboard import SummaryWriter
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from data.data_loader import build_data_loader
from utils.config import setup
import utils.saver as saver
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
import utils.logging as logging
from evaluate import attentive_nas_eval as attentive_nas_eval
from solver import build_optimizer, build_lr_scheduler
import models
from copy import deepcopy
import numpy as np
import loss_ops as loss_ops
parser = argparse.ArgumentParser(description='AlphaNet Training')
parser.add_argument('--config-file', default=None, type=str,
help='training configuration')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
logger = logging.get_logger(__name__)
def build_args_and_env(run_args):
assert run_args.config_file and os.path.isfile(run_args.config_file), 'cannot locate config file'
args = setup(run_args.config_file)
args.config_file = run_args.config_file
#load config
assert args.distributed and args.multiprocessing_distributed, 'only support DDP training'
args.distributed = True
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
args.dist_url = run_args.dist_url
args.models_save_dir = os.path.join(args.models_save_dir, args.exp_name)
if not os.path.exists(args.models_save_dir):
os.makedirs(args.models_save_dir)
#backup config file
saver.copy_file(args.config_file, '{}/{}'.format(args.models_save_dir, os.path.basename(args.config_file)))
args.checkpoint_save_path = os.path.join(
args.models_save_dir, 'alphanet.pth.tar'
)
args.logging_save_path = os.path.join(
args.models_save_dir, f'stdout.log'
)
return args
def main():
run_args = parser.parse_args()
args = build_args_and_env(run_args)
random.seed(args.seed)
torch.manual_seed(args.seed)
#cudnn.deterministic = True
#warnings.warn('You have chosen to seed training. '
# 'This will turn on the CUDNN deterministic setting, '
# 'which can slow down your training considerably! '
# 'You may see unexpected behavior when restarting '
# 'from checkpoints.')
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
assert args.world_size > 1, 'only support ddp training'
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
args.batch_size_total = args.batch_size * args.world_size
#rescale base lr
args.lr_scheduler.base_lr = args.lr_scheduler.base_lr * (max(1, args.batch_size_total // 256))
# set random seed, make sure all random subgraph generated would be the same
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed(args.seed)
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging(args.logging_save_path, 'w')
logger.info(f"Use GPU: {args.gpu}, machine rank {args.machine_rank}, num_nodes {args.num_nodes}, \
gpu per node {ngpus_per_node}, world size {args.world_size}")
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
args.local_rank = args.gpu
torch.cuda.set_device(args.gpu)
# build model
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
# use sync batchnorm
if getattr(args, 'sync_bn', False):
model.apply(
lambda m: setattr(m, 'need_sync', True))
model = comm.get_parallel_model(model, args.gpu) #local rank
logger.info(model)
criterion = loss_ops.CrossEntropyLossSmooth(args.label_smoothing).cuda(args.gpu)
soft_criterion = loss_ops.AdaptiveLossSoft(args.alpha_min, args.alpha_max, args.iw_clip).cuda(args.gpu)
if not getattr(args, 'inplace_distill', True):
soft_criterion = None
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
args.n_iters_per_epoch = len(train_loader)
logger.info( f'building optimizer and lr scheduler, \
local rank {args.gpu}, global rank {args.rank}, world_size {args.world_size}')
optimizer = build_optimizer(args, model)
lr_scheduler = build_lr_scheduler(args, optimizer)
# optionally resume from a checkpoint
if args.resume:
saver.load_checkpoints(args, model, optimizer, lr_scheduler, logger)
logger.info(args)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
args.curr_epoch = epoch
logger.info('Training lr {}'.format(lr_scheduler.get_lr()[0]))
# train for one epoch
acc1, acc5 = train_epoch(epoch, model, train_loader, optimizer, criterion, args, \
soft_criterion=soft_criterion, lr_scheduler=lr_scheduler)
if comm.is_master_process() or args.distributed:
# validate supernet model
validate(
train_loader, val_loader, model, criterion, args
)
if comm.is_master_process():
# save checkpoints
saver.save_checkpoint(
args.checkpoint_save_path,
model,
optimizer,
lr_scheduler,
args,
epoch,
)
def train_epoch(
epoch,
model,
train_loader,
optimizer,
criterion,
args,
soft_criterion=None,
lr_scheduler=None,
):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
end = time.time()
num_updates = epoch * len(train_loader)
for batch_idx, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# total subnets to be sampled
num_subnet_training = max(2, getattr(args, 'num_arch_training', 2))
optimizer.zero_grad()
### compute gradients using sandwich rule ###
# step 1 sample the largest network, apply regularization to only the largest network
drop_connect_only_last_two_stages = getattr(args, 'drop_connect_only_last_two_stages', True)
model.module.sample_max_subnet()
model.module.set_dropout_rate(args.dropout, args.drop_connect, drop_connect_only_last_two_stages) #dropout for supernet
output = model(images)
loss = criterion(output, target)
loss.backward()
with torch.no_grad():
soft_logits = output.clone().detach()
#step 2. sample the smallest network and several random networks
sandwich_rule = getattr(args, 'sandwich_rule', True)
model.module.set_dropout_rate(0, 0, drop_connect_only_last_two_stages) #reset dropout rate
for arch_id in range(1, num_subnet_training):
if arch_id == num_subnet_training-1 and sandwich_rule:
model.module.sample_min_subnet()
else:
model.module.sample_active_subnet()
# calcualting loss
output = model(images)
if soft_criterion:
loss = soft_criterion(output, soft_logits)
else:
assert not args.inplace_distill
loss = criterion(output, target)
loss.backward()
#clip gradients if specfied
if getattr(args, 'grad_clip_value', None):
torch.nn.utils.clip_grad_value_(model.parameters(), args.grad_clip_value)
optimizer.step()
#accuracy measured on the local batch
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
corr1, corr5, loss = acc1*args.batch_size, acc5*args.batch_size, loss.item()*args.batch_size #just in case the batch size is different on different nodes
stats = torch.tensor([corr1, corr5, loss, args.batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1/batch_size, corr5/batch_size, loss/batch_size
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
else:
losses.update(loss.item(), images.size(0))
top1.update(acc1, images.size(0))
top5.update(acc5, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
num_updates += 1
if lr_scheduler is not None:
lr_scheduler.step()
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
return top1.avg, top5.avg
def validate(
train_loader,
val_loader,
model,
criterion,
args,
distributed = True,
):
subnets_to_be_evaluated = {
'attentive_nas_min_net': {},
'attentive_nas_max_net': {},
}
acc1_list, acc5_list = attentive_nas_eval.validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration = True,
)
if __name__ == '__main__':
main()
| AlphaNet-main | train_alphanet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from AttentiveNAS (https://github.com/facebookresearch/AttentiveNAS)
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
from datetime import date
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models
from utils.config import setup
from utils.flops_counter import count_net_flops_and_params
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from utils.progress import AverageMeter, ProgressMeter, accuracy
import argparse
parser = argparse.ArgumentParser(description='Test AlphaNet Models')
parser.add_argument('--config-file', default='./configs/eval_alphanet_models.yml')
parser.add_argument('--model', default='a0', type=str, choices=['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a5_1', 'a6'])
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
run_args = parser.parse_args()
if __name__ == '__main__':
args = setup(run_args.config_file)
args.model = run_args.model
args.gpu = run_args.gpu
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.__dict__['active_subnet'] = args.__dict__['pareto_models'][args.model]
print(args.active_subnet)
train_loader, val_loader, train_sampler = build_data_loader(args)
## init static attentivenas model with weights inherited from the supernet
model = models.model_factory.create_model(args)
model.to(args.gpu)
model.eval()
# bn running stats calibration following Slimmable (https://arxiv.org/abs/1903.05134)
# please consider trying a different random seed if you see a small accuracy drop
with torch.no_grad():
model.reset_running_stats_for_calibration()
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
images = images.cuda(args.gpu, non_blocking=True)
model(images) #forward only
model.eval()
with torch.no_grad():
criterion = nn.CrossEntropyLoss().cuda()
from evaluate.imagenet_eval import validate_one_subnet
acc1, acc5, loss, flops, params = validate_one_subnet(val_loader, model, criterion, args)
print(acc1, acc5, flops, params)
| AlphaNet-main | test_alphanet.py |
"""For pip."""
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup
exec(open("src/fonduer/_version.py").read())
setup(
name="fonduer",
version=__version__,
description="Knowledge base construction system for richly formatted data.",
long_description=open("README.rst").read(),
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
install_requires=[
"beautifulsoup4>=4.8.0",
"editdistance>=0.5.2, <0.6.0",
"snorkel>=0.9.5, <0.10.0",
"emmental>=0.0.6, <0.1.0",
"lxml>=4.2.5, <5.0.0",
"mlflow>=1.1.0, <2.0.0",
"numpy>=1.11, <2.0",
"pyyaml>=5.1, <6.0",
"scipy>=1.1.0, <2.0.0",
"spacy>=2.1.3, <2.4.0",
"sqlalchemy[postgresql]>=1.3.7, <2.0.0",
"torch>=1.3.1,<2.0.0",
"tqdm>=4.36.0, <5.0.0",
"treedlib>=0.1.3, <0.2.0",
"wand>=0.4.4, <0.6.0",
"ipython",
"deprecation",
],
extras_require={
"spacy_ja": ["fugashi[unidic-lite]>=0.2.3"],
"spacy_zh": ["jieba>=0.39, <0.40"],
},
keywords=["fonduer", "knowledge base construction", "richly formatted data"],
package_data={"fonduer": ["py.typed"]},
url="https://github.com/HazyResearch/fonduer",
classifiers=[ # https://pypi.python.org/pypi?:action=list_classifiers
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
],
project_urls={
"Tracker": "https://github.com/HazyResearch/fonduer/issues",
"Source": "https://github.com/HazyResearch/fonduer",
},
python_requires=">=3.6",
author="Hazy Research",
author_email="[email protected]",
license="MIT",
)
| fonduer-master | setup.py |
"""conftest.py file that defines shared fixture functions.
See https://docs.pytest.org/en/stable/fixture.html#conftest-py-sharing-fixture-functions
"""
import os
import psycopg2
import pytest
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from fonduer.meta import Meta
DB = "fonduer_test"
if "CI" in os.environ:
CONN_STRING = (
f"postgresql://{os.environ['PGUSER']}:{os.environ['PGPASSWORD']}"
+ f"@{os.environ['POSTGRES_HOST']}:{os.environ['POSTGRES_PORT']}/{DB}"
)
else:
CONN_STRING = f"postgresql://127.0.0.1:5432/{DB}"
@pytest.fixture
def database_session():
"""Fixture function that creates and drops a database.
As a setup, a database is created and an SQLAlchemy session is passed to a test.
As a teardown, the database is dropped after the test runs.
"""
if "CI" in os.environ:
con = psycopg2.connect(
host=os.environ["POSTGRES_HOST"],
port=os.environ["POSTGRES_PORT"],
user=os.environ["PGUSER"],
password=os.environ["PGPASSWORD"],
)
else:
con = psycopg2.connect(host="127.0.0.1", port="5432")
# Setup
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = con.cursor()
cursor.execute(f'create database "{DB}";')
session = Meta.init(CONN_STRING).Session()
yield session
# Teardown
engine = session.get_bind()
session.close()
engine.dispose()
Meta.engine = None
cursor.execute(f'drop database "{DB}";')
cursor.close()
con.close()
| fonduer-master | tests/conftest.py |
"""Fonduer unit tests."""
| fonduer-master | tests/__init__.py |
"""Test Fonduer meta."""
import logging
import os
import psycopg2
import pytest
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from sqlalchemy.exc import OperationalError
from fonduer import Meta
from fonduer.candidates.models import mention_subclass
from tests.conftest import CONN_STRING, DB
logger = logging.getLogger(__name__)
def test_meta_connection_strings(database_session):
"""Simple sanity checks for validating postgres connection strings."""
with pytest.raises(ValueError):
Meta.init("postgresql" + DB).Session()
with pytest.raises(ValueError):
Meta.init("sqlite://somethingsilly" + DB).Session()
with pytest.raises(OperationalError):
Meta.init("postgresql://somethingsilly:5432/").Session()
session = Meta.init("postgresql://localhost:5432/" + DB).Session()
engine = session.get_bind()
session.close()
engine.dispose()
assert Meta.DBNAME == DB
def test_subclass_before_meta_init():
"""Test if mention (candidate) subclass can be created before Meta init."""
# Test if mention (candidate) subclass can be created
Part = mention_subclass("Part")
logger.info(f"Create a mention subclass '{Part.__tablename__}'")
# Setup a database
con = psycopg2.connect(
host=os.environ["POSTGRES_HOST"],
port=os.environ["POSTGRES_PORT"],
user=os.environ["PGUSER"],
password=os.environ["PGPASSWORD"],
)
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = con.cursor()
cursor.execute(f'create database "{DB}";')
session = Meta.init(CONN_STRING).Session()
# Test if another mention subclass can be created
Temp = mention_subclass("Temp")
logger.info(f"Create a mention subclass '{Temp.__tablename__}'")
# Teardown the database
session.close()
Meta.engine.dispose()
Meta.engine = None
cursor.execute(f'drop database "{DB}";')
cursor.close()
con.close()
| fonduer-master | tests/test_meta.py |
"""Unit tests that involve postgres access."""
import logging
from fonduer.candidates import CandidateExtractor, MentionExtractor, MentionFigures
from fonduer.candidates.matchers import LambdaFunctionFigureMatcher
from fonduer.candidates.models import (
Candidate,
Mention,
candidate_subclass,
mention_subclass,
)
from fonduer.parser import Parser
from fonduer.parser.models import Document, Sentence
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser.visual_parser import PdfVisualParser
from tests.shared.hardware_matchers import part_matcher, temp_matcher, volt_matcher
from tests.shared.hardware_spaces import (
MentionNgramsPart,
MentionNgramsTemp,
MentionNgramsVolt,
)
from tests.shared.hardware_throttlers import temp_throttler
logger = logging.getLogger(__name__)
def test_preprocessor_parse_file_called_once(database_session, mocker):
"""Test if DocPreprocessor._parse_file is called only once during parser.apply."""
max_docs = 1
session = database_session
docs_path = "tests/data/html/"
# Set up preprocessor, parser, and spy on preprocessor
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
spy = mocker.spy(doc_preprocessor, "_parse_file")
corpus_parser = Parser(session)
# Check if udf.last_docs is empty.
assert len(corpus_parser.get_last_documents()) == 0
# Parsing
corpus_parser.apply(doc_preprocessor)
# Check if udf.last_docs is correctly updated.
assert len(corpus_parser.get_last_documents()) == max_docs
# doc_preprocessor._parse_file should be called only once (#434).
spy.assert_called_once()
def test_cand_gen_cascading_delete(database_session):
"""Test cascading the deletion of candidates."""
# GitHub Actions gives 2 cores
# help.github.com/en/actions/reference/virtual-environments-for-github-hosted-runners
PARALLEL = 2
max_docs = 1
session = database_session
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
# Parsing
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session,
structural=True,
lingual=True,
visual_parser=PdfVisualParser(pdf_path),
)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 799
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
mention_extractor = MentionExtractor(
session, [Part, Temp], [part_ngrams, temp_ngrams], [part_matcher, temp_matcher]
)
mention_extractor.clear_all()
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Mention).count() == 93
assert session.query(Part).count() == 70
assert session.query(Temp).count() == 23
part = session.query(Part).order_by(Part.id).all()[0]
temp = session.query(Temp).order_by(Temp.id).all()[0]
logger.info(f"Part: {part.context}")
logger.info(f"Temp: {temp.context}")
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
candidate_extractor = CandidateExtractor(
session, [PartTemp], throttlers=[temp_throttler]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 1431
assert session.query(Candidate).count() == 1431
assert docs[0].name == "112823"
assert len(docs[0].parts) == 70
assert len(docs[0].temps) == 23
# Delete from parent class should cascade to child
x = session.query(Candidate).first()
session.query(Candidate).filter_by(id=x.id).delete(synchronize_session="fetch")
assert session.query(Candidate).count() == 1430
assert session.query(PartTemp).count() == 1430
# Test that deletion of a Candidate does not delete the Mention
x = session.query(PartTemp).first()
candidate = session.query(PartTemp).filter_by(id=x.id).first()
session.delete(candidate)
assert session.query(PartTemp).count() == 1429
assert session.query(Temp).count() == 23
assert session.query(Part).count() == 70
# Clearing Mentions should also delete Candidates
mention_extractor.clear()
assert session.query(Mention).count() == 0
assert session.query(Part).count() == 0
assert session.query(Temp).count() == 0
assert session.query(PartTemp).count() == 0
assert session.query(Candidate).count() == 0
def test_too_many_clients_error_should_not_happen(database_session):
"""Too many clients error should not happens."""
PARALLEL = 32
logger.info("Parallel: {PARALLEL}")
def do_nothing_matcher(fig):
return True
max_docs = 1
session = database_session
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
# Parsing
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session,
structural=True,
lingual=True,
visual_parser=PdfVisualParser(pdf_path),
)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
figs = MentionFigures(types="png")
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
Fig = mention_subclass("Fig")
fig_matcher = LambdaFunctionFigureMatcher(func=do_nothing_matcher)
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt, Fig],
[part_ngrams, temp_ngrams, volt_ngrams, figs],
[part_matcher, temp_matcher, volt_matcher, fig_matcher],
)
mention_extractor.apply(docs, parallelism=PARALLEL)
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
# Test that no throttler in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt]
) # Pass, no throttler
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
candidate_extractor.clear_all(split=0)
# Test with None in throttlers in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, None]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
def test_parse_error_doc_skipping(database_session):
"""Test skipping of faulty htmls."""
faulty_doc_path = "tests/data/html_faulty/ext_diseases_missing_table_tag.html"
preprocessor = HTMLDocPreprocessor(faulty_doc_path)
session = database_session
corpus_parser = Parser(session)
corpus_parser.apply(preprocessor)
# This returns documents that apply() was called on
assert corpus_parser.last_docs == {"ext_diseases_missing_table_tag"}
# This returns only documents that are successfully parsed.
assert corpus_parser.get_last_documents() == []
| fonduer-master | tests/test_postgres.py |
"""Fonduer unit tests for extracting candidates."""
import logging
import pickle
from typing import Optional
import pytest
from fonduer.candidates import (
CandidateExtractor,
MentionCaptions,
MentionCells,
MentionDocuments,
MentionExtractor,
MentionFigures,
MentionNgrams,
MentionParagraphs,
MentionSections,
MentionSentences,
MentionTables,
)
from fonduer.candidates.candidates import CandidateExtractorUDF
from fonduer.candidates.matchers import (
DoNothingMatcher,
LambdaFunctionFigureMatcher,
LambdaFunctionMatcher,
PersonMatcher,
)
from fonduer.candidates.mentions import MentionExtractorUDF, Ngrams
from fonduer.candidates.models import candidate_subclass, mention_subclass
from fonduer.parser.models import Sentence
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser.visual_parser import PdfVisualParser
from fonduer.utils.data_model_utils import get_col_ngrams, get_row_ngrams
from tests.parser.test_parser import get_parser_udf
from tests.shared.hardware_matchers import part_matcher, temp_matcher, volt_matcher
from tests.shared.hardware_spaces import (
MentionNgramsPart,
MentionNgramsTemp,
MentionNgramsVolt,
)
from tests.shared.hardware_throttlers import temp_throttler, volt_throttler
logger = logging.getLogger(__name__)
def parse_doc(docs_path: str, file_name: str, pdf_path: Optional[str] = None):
"""Parse documents from given path."""
max_docs = 1
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
doc = next(doc_preprocessor._parse_file(docs_path, file_name))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual=True if pdf_path else False,
visual_parser=PdfVisualParser(pdf_path) if pdf_path else None,
language="en",
)
doc = parser_udf.apply(doc)
return doc
def test_ngram_split():
"""Test ngram split."""
ngrams = Ngrams(split_tokens=["-", "/"])
sent = Sentence()
# When a split_token appears in the middle of the text.
sent.text = "New-Text"
sent.words = ["New-Text"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 3
assert result[0].get_span() == "New-Text"
assert result[1].get_span() == "New"
assert result[2].get_span() == "Text"
# When a text ends with a split_token.
sent.text = "New-"
sent.words = ["New-"]
result = list(ngrams.apply(sent))
assert len(result) == 2
assert result[0].get_span() == "New-"
assert result[1].get_span() == "New"
# When a text starts with a split_token.
sent.text = "-Text"
sent.words = ["-Text"]
result = list(ngrams.apply(sent))
assert len(result) == 2
assert result[0].get_span() == "-Text"
assert result[1].get_span() == "Text"
# When more than one split_token appears.
sent.text = "New/Text-Word"
sent.words = ["New/Text-Word"]
result = list(ngrams.apply(sent))
assert len(result) == 6
spans = [r.get_span() for r in result]
assert "New/Text-Word" in spans
assert "New" in spans
assert "New/Text" in spans
assert "Text" in spans
assert "Text-Word" in spans
assert "Word" in spans
sent.text = "A-B/C-D"
sent.words = ["A-B/C-D"]
result = list(ngrams.apply(sent))
assert len(result) == 10
spans = [r.get_span() for r in result]
assert "A-B/C-D" in spans
assert "A-B/C" in spans
assert "B/C-D" in spans
assert "A-B" in spans
assert "C-D" in spans
assert "B/C" in spans
assert "A" in spans
assert "B" in spans
assert "C" in spans
assert "D" in spans
ngrams = Ngrams(split_tokens=["~", "~~"])
sent = Sentence()
sent.text = "a~b~~c~d"
sent.words = ["a~b~~c~d"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 10
spans = [r.get_span() for r in result]
assert "a~b~~c~d" in spans
assert "a" in spans
assert "a~b" in spans
assert "a~b~~c" in spans
assert "b" in spans
assert "b~~c" in spans
assert "b~~c~d" in spans
assert "c" in spans
assert "c~d" in spans
assert "d" in spans
ngrams = Ngrams(split_tokens=["~a", "a~"])
sent = Sentence()
sent.text = "~a~b~~c~d"
sent.words = ["~a~b~~c~d"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 2
spans = [r.get_span() for r in result]
assert "~a~b~~c~d" in spans
assert "~b~~c~d" in spans
ngrams = Ngrams(split_tokens=["-", "/", "*"])
sent = Sentence()
sent.text = "A-B/C*D"
sent.words = ["A-B/C*D"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 10
spans = [r.get_span() for r in result]
assert "A-B/C*D" in spans
assert "A" in spans
assert "A-B" in spans
assert "A-B/C" in spans
assert "B" in spans
assert "B/C" in spans
assert "B/C*D" in spans
assert "C" in spans
assert "C*D" in spans
assert "D" in spans
def test_span_char_start_and_char_end():
"""Test chart_start and char_end of TemporarySpan that comes from Ngrams.apply."""
ngrams = Ngrams()
sent = Sentence()
sent.text = "BC548BG"
sent.words = ["BC548BG"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 1
assert result[0].get_span() == "BC548BG"
assert result[0].char_start == 0
assert result[0].char_end == 6
def test_cand_gen():
"""Test extracting candidates from mentions from documents."""
def do_nothing_matcher(fig):
return True
docs_path = "tests/data/html/112823.html"
pdf_path = "tests/data/pdf/"
doc = parse_doc(docs_path, "112823", pdf_path)
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
figs = MentionFigures(types="png")
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
Fig = mention_subclass("Fig")
fig_matcher = LambdaFunctionFigureMatcher(func=do_nothing_matcher)
with pytest.raises(ValueError):
MentionExtractor(
"dummy",
[Part, Temp, Volt],
[part_ngrams, volt_ngrams], # Fail, mismatched arity
[part_matcher, temp_matcher, volt_matcher],
)
with pytest.raises(ValueError):
MentionExtractor(
"dummy",
[Part, Temp, Volt],
[part_ngrams, temp_matcher, volt_ngrams],
[part_matcher, temp_matcher], # Fail, mismatched arity
)
mention_extractor_udf = MentionExtractorUDF(
[Part, Temp, Volt, Fig],
[part_ngrams, temp_ngrams, volt_ngrams, figs],
[part_matcher, temp_matcher, volt_matcher, fig_matcher],
)
doc = mention_extractor_udf.apply(doc)
assert len(doc.parts) == 70
assert len(doc.volts) == 33
assert len(doc.temps) == 23
assert len(doc.figs) == 31
part = doc.parts[0]
volt = doc.volts[0]
temp = doc.temps[0]
logger.info(f"Part: {part.context}")
logger.info(f"Volt: {volt.context}")
logger.info(f"Temp: {temp.context}")
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
with pytest.raises(ValueError):
CandidateExtractor(
"dummy",
[PartTemp, PartVolt],
throttlers=[
temp_throttler,
volt_throttler,
volt_throttler,
], # Fail, mismatched arity
)
with pytest.raises(ValueError):
CandidateExtractor(
"dummy",
[PartTemp], # Fail, mismatched arity
throttlers=[temp_throttler, volt_throttler],
)
# Test that no throttler in candidate extractor
candidate_extractor_udf = CandidateExtractorUDF(
[PartTemp, PartVolt], [None, None], False, False, True # Pass, no throttler
)
doc = candidate_extractor_udf.apply(doc, split=0)
assert len(doc.part_temps) == 1610
assert len(doc.part_volts) == 2310
# Clear
doc.part_temps = []
doc.part_volts = []
# Test with None in throttlers in candidate extractor
candidate_extractor_udf = CandidateExtractorUDF(
[PartTemp, PartVolt], [temp_throttler, None], False, False, True
)
doc = candidate_extractor_udf.apply(doc, split=0)
assert len(doc.part_temps) == 1431
assert len(doc.part_volts) == 2310
# Clear
doc.part_temps = []
doc.part_volts = []
candidate_extractor_udf = CandidateExtractorUDF(
[PartTemp, PartVolt], [temp_throttler, volt_throttler], False, False, True
)
doc = candidate_extractor_udf.apply(doc, split=0)
assert len(doc.part_temps) == 1431
assert len(doc.part_volts) == 1993
assert len(doc.parts) == 70
assert len(doc.volts) == 33
assert len(doc.temps) == 23
def test_ngrams():
"""Test n-gram limits in mention extraction."""
file_name = "lincoln_short"
docs_path = f"tests/data/pure_html/{file_name}.html"
doc = parse_doc(docs_path, file_name)
# Mention Extraction
Person = mention_subclass("Person")
person_ngrams = MentionNgrams(n_max=3)
person_matcher = PersonMatcher()
mention_extractor_udf = MentionExtractorUDF(
[Person], [person_ngrams], [person_matcher]
)
doc = mention_extractor_udf.apply(doc)
assert len(doc.persons) == 123
mentions = doc.persons
assert len([x for x in mentions if x.context.get_num_words() == 1]) == 44
assert len([x for x in mentions if x.context.get_num_words() > 3]) == 0
# Test for unigram exclusion
for mention in doc.persons[:]:
doc.persons.remove(mention)
assert len(doc.persons) == 0
person_ngrams = MentionNgrams(n_min=2, n_max=3)
mention_extractor_udf = MentionExtractorUDF(
[Person], [person_ngrams], [person_matcher]
)
doc = mention_extractor_udf.apply(doc)
assert len(doc.persons) == 79
mentions = doc.persons
assert len([x for x in mentions if x.context.get_num_words() == 1]) == 0
assert len([x for x in mentions if x.context.get_num_words() > 3]) == 0
def test_row_col_ngram_extraction():
"""Test whether row/column ngrams list is empty, if mention is not in a table."""
file_name = "lincoln_short"
docs_path = f"tests/data/pure_html/{file_name}.html"
doc = parse_doc(docs_path, file_name)
# Mention Extraction
place_ngrams = MentionNgramsTemp(n_max=4)
Place = mention_subclass("Place")
def get_row_and_column_ngrams(mention):
row_ngrams = list(get_row_ngrams(mention))
col_ngrams = list(get_col_ngrams(mention))
if not mention.sentence.is_tabular():
assert len(row_ngrams) == 0
assert len(col_ngrams) == 0
else:
assert not any(x is None for x in row_ngrams)
assert not any(x is None for x in col_ngrams)
if "birth_place" in row_ngrams:
return True
else:
return False
birthplace_matcher = LambdaFunctionMatcher(func=get_row_and_column_ngrams)
mention_extractor_udf = MentionExtractorUDF(
[Place], [place_ngrams], [birthplace_matcher]
)
doc = mention_extractor_udf.apply(doc)
def test_mention_longest_match():
"""Test longest match filtering in mention extraction."""
file_name = "lincoln_short"
docs_path = f"tests/data/pure_html/{file_name}.html"
doc = parse_doc(docs_path, file_name)
# Mention Extraction
name_ngrams = MentionNgramsPart(n_max=3)
place_ngrams = MentionNgramsTemp(n_max=4)
Name = mention_subclass("Name")
Place = mention_subclass("Place")
def is_birthplace_table_row(mention):
if not mention.sentence.is_tabular():
return False
ngrams = get_row_ngrams(mention, lower=True)
if "birth_place" in ngrams:
return True
else:
return False
birthplace_matcher = LambdaFunctionMatcher(
func=is_birthplace_table_row, longest_match_only=False
)
mention_extractor_udf = MentionExtractorUDF(
[Name, Place],
[name_ngrams, place_ngrams],
[PersonMatcher(), birthplace_matcher],
)
doc = mention_extractor_udf.apply(doc)
mentions = doc.places
mention_spans = [x.context.get_span() for x in mentions]
assert "Sinking Spring Farm" in mention_spans
assert "Farm" in mention_spans
assert len(mention_spans) == 23
# Clear manually
for mention in doc.places[:]:
doc.places.remove(mention)
birthplace_matcher = LambdaFunctionMatcher(
func=is_birthplace_table_row, longest_match_only=True
)
mention_extractor_udf = MentionExtractorUDF(
[Name, Place],
[name_ngrams, place_ngrams],
[PersonMatcher(), birthplace_matcher],
)
doc = mention_extractor_udf.apply(doc)
mentions = doc.places
mention_spans = [x.context.get_span() for x in mentions]
assert "Sinking Spring Farm" in mention_spans
assert "Farm" not in mention_spans
assert len(mention_spans) == 4
def test_multimodal_cand():
"""Test multimodal candidate generation."""
file_name = "radiology"
docs_path = f"tests/data/pure_html/{file_name}.html"
doc = parse_doc(docs_path, file_name)
assert len(doc.sentences) == 35
# Mention Extraction
ms_doc = mention_subclass("m_doc")
ms_sec = mention_subclass("m_sec")
ms_tab = mention_subclass("m_tab")
ms_fig = mention_subclass("m_fig")
ms_cell = mention_subclass("m_cell")
ms_para = mention_subclass("m_para")
ms_cap = mention_subclass("m_cap")
ms_sent = mention_subclass("m_sent")
m_doc = MentionDocuments()
m_sec = MentionSections()
m_tab = MentionTables()
m_fig = MentionFigures()
m_cell = MentionCells()
m_para = MentionParagraphs()
m_cap = MentionCaptions()
m_sent = MentionSentences()
ms = [ms_doc, ms_cap, ms_sec, ms_tab, ms_fig, ms_para, ms_sent, ms_cell]
m = [m_doc, m_cap, m_sec, m_tab, m_fig, m_para, m_sent, m_cell]
matchers = [DoNothingMatcher()] * 8
mention_extractor_udf = MentionExtractorUDF(ms, m, matchers)
doc = mention_extractor_udf.apply(doc)
assert len(doc.m_docs) == 1
assert len(doc.m_caps) == 2
assert len(doc.m_secs) == 5
assert len(doc.m_tabs) == 2
assert len(doc.m_figs) == 2
assert len(doc.m_paras) == 30
assert len(doc.m_sents) == 35
assert len(doc.m_cells) == 21
# Candidate Extraction
cs_doc = candidate_subclass("cs_doc", [ms_doc])
cs_sec = candidate_subclass("cs_sec", [ms_sec])
cs_tab = candidate_subclass("cs_tab", [ms_tab])
cs_fig = candidate_subclass("cs_fig", [ms_fig])
cs_cell = candidate_subclass("cs_cell", [ms_cell])
cs_para = candidate_subclass("cs_para", [ms_para])
cs_cap = candidate_subclass("cs_cap", [ms_cap])
cs_sent = candidate_subclass("cs_sent", [ms_sent])
candidate_extractor_udf = CandidateExtractorUDF(
[cs_doc, cs_sec, cs_tab, cs_fig, cs_cell, cs_para, cs_cap, cs_sent],
[None, None, None, None, None, None, None, None],
False,
False,
True,
)
doc = candidate_extractor_udf.apply(doc, split=0)
assert len(doc.cs_docs) == 1
assert len(doc.cs_caps) == 2
assert len(doc.cs_secs) == 5
assert len(doc.cs_tabs) == 2
assert len(doc.cs_figs) == 2
assert len(doc.cs_paras) == 30
assert len(doc.cs_sents) == 35
assert len(doc.cs_cells) == 21
def test_pickle_subclasses():
"""Test if mention/candidate subclasses and their objects can be pickled."""
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
logger.info("Test if mention/candidate subclasses are picklable")
pickle.loads(pickle.dumps(Part))
pickle.loads(pickle.dumps(Temp))
pickle.loads(pickle.dumps(PartTemp))
logger.info("Test if their objects are pickable")
part = Part()
temp = Temp()
parttemp = PartTemp()
pickle.loads(pickle.dumps(part))
pickle.loads(pickle.dumps(temp))
pickle.loads(pickle.dumps(parttemp))
def test_candidate_with_nullable_mentions():
"""Test if mentions can be NULL."""
docs_path = "tests/data/html/112823.html"
pdf_path = "tests/data/pdf/"
doc = parse_doc(docs_path, "112823", pdf_path)
# Mention Extraction
MentionTemp = mention_subclass("MentionTemp")
temp_ngrams = MentionNgramsTemp(n_max=2)
mention_extractor_udf = MentionExtractorUDF(
[MentionTemp],
[temp_ngrams],
[temp_matcher],
)
doc = mention_extractor_udf.apply(doc)
assert len(doc.mention_temps) == 23
# Candidate Extraction
CandidateTemp = candidate_subclass("CandidateTemp", [MentionTemp], nullables=[True])
candidate_extractor_udf = CandidateExtractorUDF(
[CandidateTemp], [None], False, False, True
)
doc = candidate_extractor_udf.apply(doc, split=0)
# The number of extracted candidates should be that of mentions + 1 (NULL)
assert len(doc.candidate_temps) == len(doc.mention_temps) + 1
# Extracted candidates should include one with NULL mention.
assert None in [c[0] for c in doc.candidate_temps]
| fonduer-master | tests/candidates/test_candidates.py |
"""Fonduer candidate unit tests."""
| fonduer-master | tests/candidates/__init__.py |
"""Fonduer unit tests for matchers."""
from unittest.mock import Mock
import pytest
from nltk.stem.porter import PorterStemmer
from fonduer.candidates.matchers import (
Concat,
DateMatcher,
DictionaryMatch,
Intersect,
Inverse,
LambdaFunctionFigureMatcher,
LambdaFunctionMatcher,
LocationMatcher,
MiscMatcher,
NumberMatcher,
OrganizationMatcher,
PersonMatcher,
RegexMatchEach,
RegexMatchSpan,
Union,
)
from fonduer.candidates.mentions import MentionFigures, MentionNgrams
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.parser.lingual_parser.spacy_parser import SpacyParser
from fonduer.parser.models import Document, Figure, Sentence
@pytest.fixture()
def doc_setup():
"""Set up document."""
doc = Document(id=1, name="test", stable_id="1::document:0:0")
doc.text = "This is apple"
lingual_parser = SpacyParser("en")
for parts in lingual_parser.split_sentences(doc.text):
parts["document"] = doc
Sentence(**parts)
return doc
def test_union(doc_setup):
"""Test union matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=2)
tc: TemporarySpanMention
assert set(tc.get_span() for tc in space.apply(doc)) == {
"This is",
"is apple",
"This",
"is",
"apple",
}
# Match any span that contains "apple"
matcher0 = RegexMatchSpan(
rgx=r"apple", search=True, full_match=True, longest_match_only=False
)
assert set(tc.get_span() for tc in matcher0.apply(space.apply(doc))) == {
"is apple",
"apple",
}
# Match any span that contains "this" (case insensitive)
matcher1 = RegexMatchSpan(
rgx=r"this", search=False, full_match=False, longest_match_only=False
)
assert set(tc.get_span() for tc in matcher1.apply(space.apply(doc))) == {
"This is",
"This",
}
matcher = Union(matcher0, matcher1, longest_match_only=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"is apple",
"apple",
"This is",
"This",
}
# longest_match_only of each matcher is ignored.
matcher = Union(matcher0, matcher1, longest_match_only=True)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is",
"is apple",
}
# Unsupported option should raise an exception
with pytest.raises(Exception):
Union(matcher0, matcher1, long_match_only=False)
def test_intersect(doc_setup):
"""Test intersect matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=3)
tc: TemporarySpanMention
# Match any span that contains "apple"
matcher0 = RegexMatchSpan(
rgx=r"apple", search=True, full_match=True, longest_match_only=False
)
assert set(tc.get_span() for tc in matcher0.apply(space.apply(doc))) == {
"This is apple",
"is apple",
"apple",
}
# Match any span that contains "this" (case insensitive)
matcher1 = RegexMatchSpan(
rgx=r"this", search=False, full_match=False, longest_match_only=False
)
assert set(tc.get_span() for tc in matcher1.apply(space.apply(doc))) == {
"This is apple",
"This is",
"This",
}
# Intersection of matcher0 and matcher1
matcher = Intersect(matcher0, matcher1, longest_match_only=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is apple"
}
# Intersection of matcher0 and matcher0
matcher = Intersect(matcher0, matcher0, longest_match_only=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is apple",
"is apple",
"apple",
}
# longest_match_only=True overrides that of child matchers.
matcher = Intersect(matcher0, matcher0, longest_match_only=True)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is apple"
}
def test_inverse(doc_setup):
"""Test inverse matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=2)
tc: TemporarySpanMention
assert set(tc.get_span() for tc in space.apply(doc)) == {
"This is",
"is apple",
"This",
"is",
"apple",
}
# Match any span that contains "apple" with longest_match_only=False
matcher0 = RegexMatchSpan(
rgx=r"apple", search=True, full_match=True, longest_match_only=False
)
assert set(tc.get_span() for tc in matcher0.apply(space.apply(doc))) == {
"is apple",
"apple",
}
# Take an inverse
matcher = Inverse(matcher0, longest_match_only=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is",
"This",
"is",
}
# longest_match_only=True
matcher = Inverse(matcher0, longest_match_only=True)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"This is"}
# Match any span that contains "apple" with longest_match_only=True
matcher0 = RegexMatchSpan(
rgx=r"apple", search=True, full_match=True, longest_match_only=True
)
assert set(tc.get_span() for tc in matcher0.apply(space.apply(doc))) == {"is apple"}
# longest_match_only=False on Inverse is in effect.
matcher = Inverse(matcher0, longest_match_only=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is",
"This",
"is",
}
# longest_match_only=True on Inverse is in effect.
matcher = Inverse(matcher0, longest_match_only=True)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"This is"}
# Check if Inverse raises an error when no child matcher is provided.
with pytest.raises(ValueError):
Inverse()
# Check if Inverse raises an error when two child matchers are provided.
with pytest.raises(ValueError):
Inverse(matcher0, matcher0)
def test_cancat(doc_setup):
"""Test Concat matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=2)
# Match any span that contains "this"
matcher0 = RegexMatchSpan(
rgx=r"this", search=False, full_match=False, longest_match_only=False
)
# Match any span that contains "is"
matcher1 = RegexMatchSpan(
rgx=r"is", search=False, full_match=False, longest_match_only=False
)
matcher = Concat(matcher0, matcher1)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"This is"}
# Test if matcher raises an error when _f is given non-TemporarySpanMention
with pytest.raises(ValueError):
list(matcher.apply(doc.sentences[0].words))
# Test if an error is raised when the number of child matchers is not 2.
matcher = Concat(matcher0)
with pytest.raises(ValueError):
list(matcher.apply(space.apply(doc)))
# Test with left_required=False
matcher = Concat(matcher0, matcher1, left_required=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This is",
"is apple",
}
# Test with right_required=False
matcher = Concat(matcher0, matcher1, right_required=False)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"This is"}
# Test with permutations=False
matcher = Concat(matcher1, matcher0, permutations=False)
assert set(matcher.apply(space.apply(doc))) == set()
# Test with permutations=True
matcher = Concat(matcher1, matcher0, permutations=True)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"This is"}
# TODO: Add a test for ignore_sep=False
def test_dictionary_match(doc_setup):
"""Test DictionaryMatch matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=1)
# Test with a list of str
matcher = DictionaryMatch(d=["this"])
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"This"}
# Test without a dictionary
with pytest.raises(Exception):
DictionaryMatch()
# TODO: test with plural words
matcher = DictionaryMatch(d=["is"], stemmer=PorterStemmer())
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"is"}
# Test if matcher raises an error when _f is given non-TemporarySpanMention
matcher = DictionaryMatch(d=["this"])
with pytest.raises(ValueError):
list(matcher.apply(doc.sentences[0].words))
def test_do_not_use_stemmer_when_UnicodeDecodeError():
"""Test DictionaryMatch when stemmer causes UnicodeDecodeError."""
stemmer = PorterStemmer()
matcher = DictionaryMatch(d=["is"], stemmer=stemmer)
# _stem(w) should return a word stem.
assert matcher._stem("caresses") == "caress"
stemmer.stem = Mock(
side_effect=UnicodeDecodeError("dummycodec", b"\x00\x00", 1, 2, "Dummy !")
)
matcher = DictionaryMatch(d=["is"], stemmer=stemmer)
# _stem(w) should return w as stemmer.stem raises UnicodeDecodeError.
assert matcher._stem("caresses") == "caresses"
def test_lambda_function_matcher(doc_setup):
"""Test DictionaryMatch matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=1)
# Test with a lambda function
matcher = LambdaFunctionMatcher(func=lambda x: True)
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"This",
"is",
"apple",
}
# Test if matcher raises an error when _f is given non-TemporarySpanMention
with pytest.raises(ValueError):
list(matcher.apply(doc.sentences[0].words))
# Test if an error raised when a func is not provided.
with pytest.raises(Exception):
LambdaFunctionMatcher()
def test_regex_match(doc_setup):
"""Test RegexMatch matcher."""
doc = doc_setup
space = MentionNgrams(n_min=1, n_max=2)
# a wrong option name should raise an excetiopn
with pytest.raises(Exception):
RegexMatchSpan(regex=r"apple")
# Test if matcher raises an error when _f is given non-TemporarySpanMention
matcher = RegexMatchSpan(rgx=r"apple")
with pytest.raises(ValueError):
list(matcher.apply(doc.sentences[0].words))
matcher = RegexMatchEach(rgx=r"apple")
with pytest.raises(ValueError):
list(matcher.apply(doc.sentences[0].words))
# Test if RegexMatchEach works as expected.
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"apple"}
# Test ignore_case option
matcher = RegexMatchEach(rgx=r"Apple", ignore_case=False)
assert list(matcher.apply(space.apply(doc))) == []
# Test sep option
matcher = RegexMatchSpan(rgx=r"isapple", sep=" ")
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"is apple"}
def test_ner_matchers():
"""Test different ner type matchers."""
# Set up a document
doc = Document(id=1, name="test", stable_id="1::document:0:0")
doc.text = " ".join(
[
"Tim Cook was born in USA in 1960.",
"He is the CEO of Apple.",
"He sold 100 million of iPhone.",
]
)
lingual_parser = SpacyParser("en")
for parts in lingual_parser.split_sentences(doc.text):
parts["document"] = doc
Sentence(**parts)
# Manually attach ner_tags as the result from spacy may fluctuate.
doc.sentences[0].ner_tags = [
"PERSON",
"PERSON",
"O",
"O",
"O",
"GPE",
"O",
"DATE",
"O",
]
doc.sentences[1].ner_tags = ["O", "O", "O", "O", "O", "ORG", "O"]
doc.sentences[2].ner_tags = ["O", "O", "CARDINAL", "CARDINAL", "O", "MISC", "O"]
# the length of words and that of ner_tags should match.
assert len(doc.sentences[0].words) == len(doc.sentences[0].ner_tags)
assert len(doc.sentences[1].words) == len(doc.sentences[1].ner_tags)
space = MentionNgrams(n_min=1, n_max=2)
# Test if PersonMatcher works as expected
matcher = PersonMatcher()
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"Tim Cook"}
# Test if LocationMatcher works as expected
matcher = LocationMatcher()
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"USA"}
# Test if DateMatcher works as expected
matcher = DateMatcher()
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"1960"}
# Test if OrganizationMatcher works as expected
matcher = OrganizationMatcher()
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"Apple"}
# Test if NumberMatcher works as expected
matcher = NumberMatcher()
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {
"100 million"
}
# Test if MiscMatcher works as expected
matcher = MiscMatcher()
assert set(tc.get_span() for tc in matcher.apply(space.apply(doc))) == {"iPhone"}
def test_figure_matcher(doc_setup):
"""Test matchers for figures."""
doc = doc_setup
# Create two dummy figures
Figure(id=2, document=doc)
Figure(id=3, document=doc)
assert len(doc.figures) == 2
space = MentionFigures()
assert len(list(space.apply(doc))) == 2
# Set up a matcher that matches figures with id==2.
matcher = LambdaFunctionFigureMatcher(
func=lambda tf: True if tf.figure.id == 2 else False
)
# Test if matcher only matches the first figure.
assert len(list(matcher.apply(space.apply(doc)))) == 1
assert set(tf.figure.id for tf in matcher.apply(space.apply(doc))) == {2}
# The keyword arg should be "func"
with pytest.raises(Exception):
LambdaFunctionFigureMatcher(
function=lambda tf: True if tf.figure.id == 2 else False
)
# LambdaFunctionFigureMatcher only supports TemporaryFigureMention.
space = MentionNgrams(n_min=1, n_max=2)
with pytest.raises(ValueError):
list(matcher.apply(space.apply(doc)))
| fonduer-master | tests/candidates/test_matchers.py |
"""Fonduer MLflow unit tests."""
import os
from pathlib import Path
from typing import Any, Dict, List
from unittest.mock import MagicMock
import emmental.meta
import mlflow
import mlflow.pyfunc
import numpy as np
import pandas as pd
import pytest
import yaml
from emmental.model import EmmentalModel
from packaging import version
from snorkel.labeling.model import LabelModel
from fonduer.candidates import CandidateExtractor, MentionExtractor
from fonduer.candidates.models import candidate_subclass, mention_subclass
from fonduer.candidates.models.candidate import candidate_subclasses
from fonduer.candidates.models.mention import mention_subclasses
from fonduer.features.featurizer import Featurizer
from fonduer.features.models import FeatureKey
from fonduer.packaging import FonduerModel, log_model, save_model
from fonduer.packaging.fonduer_model import (
_get_default_conda_env,
_load_candidate_classes,
_load_mention_classes,
_save_candidate_classes,
_save_mention_classes,
)
from fonduer.parser import Parser
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.supervision.labeler import Labeler
from fonduer.supervision.models import LabelKey
from tests.shared.hardware_fonduer_model import HardwareFonduerModel
from tests.shared.hardware_lfs import LF_storage_row
from tests.shared.hardware_matchers import part_matcher, temp_matcher
from tests.shared.hardware_spaces import MentionNgramsPart, MentionNgramsTemp
from tests.shared.hardware_subclasses import Part, PartTemp, Temp
from tests.shared.hardware_throttlers import temp_throttler
artifact_path = "fonduer_model"
@pytest.fixture
def setup_common_components():
"""Set up mention/candidate extractor."""
preprocessor = HTMLDocPreprocessor("tests/data/html/")
parser = Parser(None)
mention_extractor = MentionExtractor(
None,
[Part, Temp],
[MentionNgramsPart(parts_by_doc=None, n_max=3), MentionNgramsTemp(n_max=2)],
[part_matcher, temp_matcher],
)
candidate_extractor = CandidateExtractor(None, [PartTemp], [temp_throttler])
return {
"preprocessor": preprocessor,
"parser": parser,
"mention_extractor": mention_extractor,
"candidate_extractor": candidate_extractor,
}
def test_convert_features_to_matrix():
"""Test _convert_features_to_matrix."""
features: List[Dict[str, Any]] = [
{"keys": ["key1", "key2"], "values": [0.0, 0.1]},
{"keys": ["key1", "key2"], "values": [1.0, 1.1]},
]
key_names: List[str] = ["key1", "key2"]
F = FonduerModel.convert_features_to_matrix(features, key_names)
D = np.array([[0.0, 0.1], [1.0, 1.1]])
assert (F.todense() == D).all()
def test_convert_features_to_matrix_limited_keys():
"""Test _convert_features_to_matrix with limited keys."""
features: List[Dict[str, Any]] = [
{"keys": ["key1", "key2"], "values": [0.0, 0.1]},
{"keys": ["key1", "key2"], "values": [1.0, 1.1]},
]
F = FonduerModel.convert_features_to_matrix(features, ["key1"])
D = np.array([[0.0], [1.0]])
assert (F.todense() == D).all()
def test_convert_labels_to_matrix():
"""Test _convert_labels_to_matrix."""
labels: List[Dict[str, Any]] = [
{"keys": ["key1", "key2"], "values": [0, 1]},
{"keys": ["key1", "key2"], "values": [1, 2]},
]
key_names: List[str] = ["key1", "key2"]
L = FonduerModel.convert_labels_to_matrix(labels, key_names)
D = np.array([[-1, 0], [0, 1]])
assert (L == D).all()
@pytest.mark.dependency()
def test_save_subclasses():
"""Test if subclasses can be saved."""
mention_class = mention_subclass("test_mention_class")
_save_mention_classes([mention_class], "./")
assert os.path.exists("./mention_classes.pkl")
candidate_class = candidate_subclass("test_candidate_class", [mention_class])
_save_candidate_classes([candidate_class], "./")
assert os.path.exists("./candidate_classes.pkl")
@pytest.mark.dependency(depends=["test_save_subclasses"])
def test_load_subclasses():
"""Test if subclasses can be loaded."""
_load_mention_classes("./")
assert "test_mention_class" in mention_subclasses
mention_class, _ = mention_subclasses["test_mention_class"]
_load_candidate_classes("./")
assert "test_candidate_class" in candidate_subclasses
candidate_class, _ = candidate_subclasses["test_candidate_class"]
assert candidate_class.mentions[0] == mention_class
@pytest.mark.dependency()
def test_save_model(tmp_path: Path, setup_common_components: Dict):
"""Test if a Fonduer model can be saved."""
kwargs = setup_common_components
featurizer = Featurizer(None, [PartTemp])
# Mock the get_keys()
featurizer.get_keys = MagicMock(return_value=[FeatureKey(name="key1")])
emmental.meta.init_config()
save_model(
HardwareFonduerModel(),
os.path.join(tmp_path, artifact_path),
**kwargs,
code_paths=[
"tests"
], # pass a directory name to preserver the directory hierarchy
featurizer=featurizer,
emmental_model=EmmentalModel(),
word2id={"foo": 1},
)
assert os.path.exists(os.path.join(tmp_path, artifact_path))
log_model(
HardwareFonduerModel(),
artifact_path,
**kwargs,
code_paths=[
"tests"
], # pass a directory name to preserver the directory hierarchy
featurizer=featurizer,
emmental_model=EmmentalModel(),
word2id={"foo": 1},
)
@pytest.mark.dependency(depends=["test_save_model"])
def test_load_model(tmp_path: Path):
"""Test if a saved model can be loaded."""
# Load from a saved model
mlflow.pyfunc.load_model(
os.path.join(tmp_path, "../test_save_model0", artifact_path)
)
@pytest.mark.dependency()
def test_save_label_model(tmp_path: Path, setup_common_components: Dict):
"""Test if a Fonduer model with a LabelModel as a classifier."""
kwargs = setup_common_components
labeler = Labeler(None, [PartTemp])
# Mock the get_keys()
labeler.get_keys = MagicMock(return_value=[LabelKey(name="key1")])
lfs = [[LF_storage_row]]
label_models = [LabelModel()]
save_model(
HardwareFonduerModel(),
os.path.join(tmp_path, artifact_path),
**kwargs,
code_paths=[
"tests"
], # pass a directory name to preserver the directory hierarchy
model_type="label",
labeler=labeler,
lfs=lfs,
label_models=label_models,
)
assert os.path.exists(os.path.join(tmp_path, artifact_path))
@pytest.mark.dependency(depends=["test_save_label_model"])
def test_load_label_model(tmp_path: Path):
"""Test if a saved model can be loaded."""
# Load from a saved model
mlflow.pyfunc.load_model(
os.path.join(tmp_path, "../test_save_label_model0", artifact_path)
)
def test_save_with_conda_yaml(tmp_path: Path, setup_common_components: Dict):
"""Test if a model can be saved with a conda yaml file."""
kwargs = setup_common_components
labeler = Labeler(None, [PartTemp])
# Mock the get_keys()
labeler.get_keys = MagicMock(return_value=[LabelKey(name="key1")])
lfs = [[LF_storage_row]]
label_models = [LabelModel()]
# Create a conda yaml file
with open(tmp_path.joinpath("my_conda.yaml"), "w") as f:
yaml.dump(_get_default_conda_env(), f)
# Save a model with a conda yaml file.
save_model(
HardwareFonduerModel(),
os.path.join(tmp_path, artifact_path),
**kwargs,
conda_env=tmp_path.joinpath("my_conda.yaml"),
code_paths=[
"tests"
], # pass a directory name to preserver the directory hierarchy
model_type="label",
labeler=labeler,
lfs=lfs,
label_models=label_models,
)
# Your conda yaml file is saved as "conda.yaml".
assert os.path.exists(os.path.join(tmp_path, artifact_path, "conda.yaml"))
def test_predict(mocker, setup_common_components: Dict):
"""Test if a Fonduer model can predict."""
kwargs = setup_common_components
featurizer = Featurizer(None, [PartTemp])
# Mock the get_keys()
featurizer.get_keys = MagicMock(return_value=[FeatureKey(name="key1")])
emmental.meta.init_config()
# Log the model with FonduerModel()
log_model(
FonduerModel(),
artifact_path,
**kwargs,
code_paths=[
"tests"
], # pass a directory name to preserver the directory hierarchy
featurizer=featurizer,
emmental_model=EmmentalModel(),
word2id={"foo": 1},
)
# Load the model
fonduer_model = mlflow.pyfunc.load_model(
os.path.join(mlflow.active_run().info.artifact_uri, artifact_path)
)
with pytest.raises(NotImplementedError):
_ = fonduer_model.predict(
pd.DataFrame(data={"html_path": ["tests/data/html/112823.html"]})
)
# Log the model with HardwareFonduerModel()
log_model(
HardwareFonduerModel(),
artifact_path,
**kwargs,
code_paths=[
"tests"
], # pass a directory name to preserver the directory hierarchy
featurizer=featurizer,
emmental_model=EmmentalModel(),
word2id={"foo": 1},
)
# Load the model
fonduer_model = mlflow.pyfunc.load_model(
os.path.join(mlflow.active_run().info.artifact_uri, artifact_path)
)
# Mock the _classify as we don't test the implementation of _classify here.
mock_output = pd.DataFrame(data={"col1": ["val1"], "col2": ["val2"]})
# Input both html_path and pdf_html
if version.parse(mlflow.__version__) >= version.parse("1.9.0"):
fonduer_model._model_impl._classify = MagicMock(return_value=mock_output)
spy = mocker.spy(fonduer_model._model_impl, "_process")
else:
fonduer_model._classify = MagicMock(return_value=mock_output)
spy = mocker.spy(fonduer_model, "_process")
output = fonduer_model.predict(
pd.DataFrame(
data={
"html_path": ["tests/data/html/112823.html"],
"pdf_path": ["tests/data/pdf/112823.pdf"],
}
)
)
spy.assert_called_once_with(
"tests/data/html/112823.html", "tests/data/pdf/112823.pdf"
)
assert output.equals(
pd.DataFrame(
data={
"col1": ["val1"],
"col2": ["val2"],
"html_path": ["tests/data/html/112823.html"],
}
)
)
# Input only html_path
spy.reset_mock()
output = fonduer_model.predict(
pd.DataFrame(data={"html_path": ["tests/data/html/112823.html"]})
)
spy.assert_called_once_with("tests/data/html/112823.html", None)
assert output.equals(
pd.DataFrame(
data={
"col1": ["val1"],
"col2": ["val2"],
"html_path": ["tests/data/html/112823.html"],
}
)
)
# Input html_path that does not exist
spy.reset_mock()
with pytest.raises(ValueError):
_ = fonduer_model.predict(
pd.DataFrame(data={"html_path": ["tests/data/html/foo.html"]})
)
# Test when _classify produces multiple relations per doc.
mock_output = pd.DataFrame(data={"col0": ["00", "10"], "col1": ["01", "11"]})
if version.parse(mlflow.__version__) >= version.parse("1.9.0"):
fonduer_model._model_impl._classify = MagicMock(return_value=mock_output)
else:
fonduer_model._classify = MagicMock(return_value=mock_output)
output = fonduer_model.predict(
pd.DataFrame(data={"html_path": ["tests/data/html/112823.html"]})
)
assert output.equals(
pd.DataFrame(
data={
"col0": ["00", "10"],
"col1": ["01", "11"],
"html_path": [
"tests/data/html/112823.html",
"tests/data/html/112823.html",
],
}
)
)
| fonduer-master | tests/packaging/test_fonduer_model.py |
"""Fonduer packaging unit tests."""
| fonduer-master | tests/packaging/__init__.py |
"""Fonduer learning utils' unit tests."""
from fonduer.candidates.models import Candidate
from fonduer.learning.utils import confusion_matrix
def test_confusion_matrix():
"""Test the confusion matrix."""
# Synthesize candidates
cand1 = Candidate(id=1, type="type")
cand2 = Candidate(id=2, type="type")
cand3 = Candidate(id=3, type="type")
cand4 = Candidate(id=4, type="type")
# pred and gold as set
pred = {cand1, cand2, cand3}
gold = {cand1, cand2, cand4}
(TP, FP, FN) = confusion_matrix(pred, gold)
assert TP == {cand1, cand2}
assert FP == {cand3}
assert FN == {cand4}
# pred as list
pred = [cand1, cand2, cand3]
(TP, FP, FN) = confusion_matrix(pred, gold)
assert TP == {cand1, cand2}
assert FP == {cand3}
assert FN == {cand4}
# test if the order of elements does not affect the output
pred = [cand3, cand2, cand1]
(TP, FP, FN) = confusion_matrix(pred, gold)
assert TP == {cand1, cand2}
assert FP == {cand3}
assert FN == {cand4}
# Assume the followings are entities
pred = {"1", "2", "3"}
gold = {"1", "2", "4"}
(TP, FP, FN) = confusion_matrix(pred, gold)
assert TP == {"1", "2"}
assert FP == {"3"}
assert FN == {"4"}
| fonduer-master | tests/learning/test_utils.py |
"""Fonduer learning unit tests."""
| fonduer-master | tests/learning/__init__.py |
"""Fonduer featurization unit tests."""
import itertools
import logging
import pytest
from fonduer.candidates import MentionNgrams
from fonduer.candidates.candidates import CandidateExtractorUDF
from fonduer.candidates.mentions import MentionExtractorUDF
from fonduer.candidates.models import candidate_subclass, mention_subclass
from fonduer.features import FeatureExtractor
from fonduer.features.featurizer import FeaturizerUDF
from tests.candidates.test_candidates import parse_doc
from tests.shared.hardware_matchers import part_matcher, temp_matcher, volt_matcher
logger = logging.getLogger(__name__)
def test_unary_relation_feature_extraction():
"""Test extracting unary candidates from mentions from documents."""
docs_path = "tests/data/html/112823.html"
pdf_path = "tests/data/pdf/"
# Parsing
doc = parse_doc(docs_path, "112823", pdf_path)
assert len(doc.sentences) == 799
# Mention Extraction
part_ngrams = MentionNgrams(n_max=1)
Part = mention_subclass("Part")
mention_extractor_udf = MentionExtractorUDF([Part], [part_ngrams], [part_matcher])
doc = mention_extractor_udf.apply(doc)
assert doc.name == "112823"
assert len(doc.parts) == 62
part = doc.parts[0]
logger.info(f"Part: {part.context}")
# Candidate Extraction
PartRel = candidate_subclass("PartRel", [Part])
candidate_extractor_udf = CandidateExtractorUDF([PartRel], None, False, False, True)
doc = candidate_extractor_udf.apply(doc, split=0)
# Featurization based on default feature library
featurizer_udf = FeaturizerUDF([PartRel], FeatureExtractor())
# Test that featurization default feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_default_feats = len(key_set)
# Featurization with only textual feature
feature_extractors = FeatureExtractor(features=["textual"])
featurizer_udf = FeaturizerUDF([PartRel], feature_extractors=feature_extractors)
# Test that featurization textual feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_textual_features = len(key_set)
# Featurization with only tabular feature
feature_extractors = FeatureExtractor(features=["tabular"])
featurizer_udf = FeaturizerUDF([PartRel], feature_extractors=feature_extractors)
# Test that featurization tabular feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_tabular_features = len(key_set)
# Featurization with only structural feature
feature_extractors = FeatureExtractor(features=["structural"])
featurizer_udf = FeaturizerUDF([PartRel], feature_extractors=feature_extractors)
# Test that featurization structural feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_structural_features = len(key_set)
# Featurization with only visual feature
feature_extractors = FeatureExtractor(features=["visual"])
featurizer_udf = FeaturizerUDF([PartRel], feature_extractors=feature_extractors)
# Test that featurization visual feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_visual_features = len(key_set)
assert (
n_default_feats
== n_textual_features
+ n_tabular_features
+ n_structural_features
+ n_visual_features
)
def test_binary_relation_feature_extraction():
"""Test extracting candidates from mentions from documents."""
docs_path = "tests/data/html/112823.html"
pdf_path = "tests/data/pdf/"
# Parsing
doc = parse_doc(docs_path, "112823", pdf_path)
assert len(doc.sentences) == 799
# Mention Extraction
part_ngrams = MentionNgrams(n_max=1)
temp_ngrams = MentionNgrams(n_max=1)
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
mention_extractor_udf = MentionExtractorUDF(
[Part, Temp], [part_ngrams, temp_ngrams], [part_matcher, temp_matcher]
)
doc = mention_extractor_udf.apply(doc)
assert len(doc.parts) == 62
assert len(doc.temps) == 16
part = doc.parts[0]
temp = doc.temps[0]
logger.info(f"Part: {part.context}")
logger.info(f"Temp: {temp.context}")
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
candidate_extractor_udf = CandidateExtractorUDF(
[PartTemp], None, False, False, True
)
doc = candidate_extractor_udf.apply(doc, split=0)
# Manually set id as it is not set automatically b/c a database is not used.
i = 0
for cand in doc.part_temps:
cand.id = i
i = i + 1
n_cands = len(doc.part_temps)
# Featurization based on default feature library
featurizer_udf = FeaturizerUDF([PartTemp], FeatureExtractor())
# Test that featurization default feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_default_feats = len(key_set)
# Example feature extractor
def feat_ext(candidates):
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
yield candidate.id, f"cand_id_{candidate.id}", 1
# Featurization with one extra feature extractor
feature_extractors = FeatureExtractor(customize_feature_funcs=[feat_ext])
featurizer_udf = FeaturizerUDF([PartTemp], feature_extractors=feature_extractors)
# Test that featurization default feature library with one extra feature extractor
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_default_w_customized_features = len(key_set)
# Example spurious feature extractor
def bad_feat_ext(candidates):
raise RuntimeError()
# Featurization with a spurious feature extractor
feature_extractors = FeatureExtractor(customize_feature_funcs=[bad_feat_ext])
featurizer_udf = FeaturizerUDF([PartTemp], feature_extractors=feature_extractors)
# Test that featurization default feature library with one extra feature extractor
logger.info("Featurizing with a spurious feature extractor...")
with pytest.raises(RuntimeError):
features = featurizer_udf.apply(doc)
# Featurization with only textual feature
feature_extractors = FeatureExtractor(features=["textual"])
featurizer_udf = FeaturizerUDF([PartTemp], feature_extractors=feature_extractors)
# Test that featurization textual feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_textual_features = len(key_set)
# Featurization with only tabular feature
feature_extractors = FeatureExtractor(features=["tabular"])
featurizer_udf = FeaturizerUDF([PartTemp], feature_extractors=feature_extractors)
# Test that featurization tabular feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_tabular_features = len(key_set)
# Featurization with only structural feature
feature_extractors = FeatureExtractor(features=["structural"])
featurizer_udf = FeaturizerUDF([PartTemp], feature_extractors=feature_extractors)
# Test that featurization structural feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_structural_features = len(key_set)
# Featurization with only visual feature
feature_extractors = FeatureExtractor(features=["visual"])
featurizer_udf = FeaturizerUDF([PartTemp], feature_extractors=feature_extractors)
# Test that featurization visual feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_visual_features = len(key_set)
assert (
n_default_feats
== n_textual_features
+ n_tabular_features
+ n_structural_features
+ n_visual_features
)
assert n_default_w_customized_features == n_default_feats + n_cands
def test_multinary_relation_feature_extraction():
"""Test extracting candidates from mentions from documents."""
docs_path = "tests/data/html/112823.html"
pdf_path = "tests/data/pdf/"
# Parsing
doc = parse_doc(docs_path, "112823", pdf_path)
assert len(doc.sentences) == 799
# Mention Extraction
part_ngrams = MentionNgrams(n_max=1)
temp_ngrams = MentionNgrams(n_max=1)
volt_ngrams = MentionNgrams(n_max=1)
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
mention_extractor_udf = MentionExtractorUDF(
[Part, Temp, Volt],
[part_ngrams, temp_ngrams, volt_ngrams],
[part_matcher, temp_matcher, volt_matcher],
)
doc = mention_extractor_udf.apply(doc)
assert len(doc.parts) == 62
assert len(doc.temps) == 16
assert len(doc.volts) == 33
part = doc.parts[0]
temp = doc.temps[0]
volt = doc.volts[0]
logger.info(f"Part: {part.context}")
logger.info(f"Temp: {temp.context}")
logger.info(f"Volt: {volt.context}")
# Candidate Extraction
PartTempVolt = candidate_subclass("PartTempVolt", [Part, Temp, Volt])
candidate_extractor_udf = CandidateExtractorUDF(
[PartTempVolt], None, False, False, True
)
doc = candidate_extractor_udf.apply(doc, split=0)
# Manually set id as it is not set automatically b/c a database is not used.
i = 0
for cand in doc.part_temp_volts:
cand.id = i
i = i + 1
n_cands = len(doc.part_temp_volts)
# Featurization based on default feature library
featurizer_udf = FeaturizerUDF([PartTempVolt], FeatureExtractor())
# Test that featurization default feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_default_feats = len(key_set)
# Example feature extractor
def feat_ext(candidates):
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
yield candidate.id, f"cand_id_{candidate.id}", 1
# Featurization with one extra feature extractor
feature_extractors = FeatureExtractor(customize_feature_funcs=[feat_ext])
featurizer_udf = FeaturizerUDF(
[PartTempVolt], feature_extractors=feature_extractors
)
# Test that featurization default feature library with one extra feature extractor
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_default_w_customized_features = len(key_set)
# Example spurious feature extractor
def bad_feat_ext(candidates):
raise RuntimeError()
# Featurization with a spurious feature extractor
feature_extractors = FeatureExtractor(customize_feature_funcs=[bad_feat_ext])
featurizer_udf = FeaturizerUDF(
[PartTempVolt], feature_extractors=feature_extractors
)
# Test that featurization default feature library with one extra feature extractor
logger.info("Featurizing with a spurious feature extractor...")
with pytest.raises(RuntimeError):
features = featurizer_udf.apply(doc)
# Featurization with only textual feature
feature_extractors = FeatureExtractor(features=["textual"])
featurizer_udf = FeaturizerUDF(
[PartTempVolt], feature_extractors=feature_extractors
)
# Test that featurization textual feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_textual_features = len(key_set)
# Featurization with only tabular feature
feature_extractors = FeatureExtractor(features=["tabular"])
featurizer_udf = FeaturizerUDF(
[PartTempVolt], feature_extractors=feature_extractors
)
# Test that featurization tabular feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_tabular_features = len(key_set)
# Featurization with only structural feature
feature_extractors = FeatureExtractor(features=["structural"])
featurizer_udf = FeaturizerUDF(
[PartTempVolt], feature_extractors=feature_extractors
)
# Test that featurization structural feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_structural_features = len(key_set)
# Featurization with only visual feature
feature_extractors = FeatureExtractor(features=["visual"])
featurizer_udf = FeaturizerUDF(
[PartTempVolt], feature_extractors=feature_extractors
)
# Test that featurization visual feature library
features_list = featurizer_udf.apply(doc)
features = itertools.chain.from_iterable(features_list)
key_set = set([key for feature in features for key in feature["keys"]])
n_visual_features = len(key_set)
assert (
n_default_feats
== n_textual_features
+ n_tabular_features
+ n_structural_features
+ n_visual_features
)
assert n_default_w_customized_features == n_default_feats + n_cands
| fonduer-master | tests/features/test_features.py |
"""Fonduer feature unit tests."""
| fonduer-master | tests/features/__init__.py |
"""Fonduer table utils' tests."""
import logging
from fonduer.utils.utils_table import _min_range_diff
def test_min_range_diff(caplog):
"""Test the minimum range calculation for table utils."""
caplog.set_level(logging.INFO)
assert _min_range_diff(((0, 5), (0, 5))) == 0
assert _min_range_diff(((1, 5), (3, 6))) == 0
assert _min_range_diff(((1, 2), (2, 3))) == 0
assert _min_range_diff(((3, 6), (1, 4))) == 0
assert _min_range_diff(((1, 2), (3, 4))) == 1
assert _min_range_diff(((3, 4), (1, 2))) == 1
assert _min_range_diff(((3, 4), (1, 2)), absolute=False) == 1
assert _min_range_diff(((1, 2), (3, 4)), absolute=False) == -1
assert _min_range_diff(((0, 5), (0, 5), (0, 5))) == 0
assert _min_range_diff(((1, 5), (3, 6), (2, 4))) == 0
assert _min_range_diff(((3, 4), (1, 2), (4, 5))) == 1
assert _min_range_diff(((1, 2), (3, 4), (5, 6)), absolute=False) == -1
assert _min_range_diff(((1, 2), (3, 4), (4, 6)), absolute=False) == 0
assert _min_range_diff(((1, 2), (3, 4), (4, 6), (5, 8)), absolute=False) == 0
| fonduer-master | tests/utils/test_utils_table.py |
"""Fonduer visualizer unit tests."""
from fonduer.candidates import MentionNgrams
from fonduer.candidates.candidates import CandidateExtractorUDF
from fonduer.candidates.matchers import OrganizationMatcher
from fonduer.candidates.mentions import MentionExtractorUDF
from fonduer.candidates.models import candidate_subclass, mention_subclass
from tests.candidates.test_candidates import parse_doc
def test_visualizer():
"""Unit test of visualizer using the md document."""
from fonduer.utils.visualizer import Visualizer, get_box # noqa
docs_path = "tests/data/html_simple/md.html"
pdf_path = "tests/data/pdf_simple/"
# Grab the md document
doc = parse_doc(docs_path, "md", pdf_path)
assert doc.name == "md"
organization_ngrams = MentionNgrams(n_max=1)
Org = mention_subclass("Org")
organization_matcher = OrganizationMatcher()
mention_extractor_udf = MentionExtractorUDF(
[Org], [organization_ngrams], [organization_matcher]
)
doc = mention_extractor_udf.apply(doc)
Organization = candidate_subclass("Organization", [Org])
candidate_extractor_udf = CandidateExtractorUDF(
[Organization], None, False, False, True
)
doc = candidate_extractor_udf.apply(doc, split=0)
# Take one candidate
cand = doc.organizations[0]
pdf_path = "tests/data/pdf_simple"
vis = Visualizer(pdf_path)
# Test bounding boxes
boxes = [get_box(mention.context) for mention in cand.get_mentions()]
for box in boxes:
assert box.top <= box.bottom
assert box.left <= box.right
assert boxes == [mention.context.get_bbox() for mention in cand.get_mentions()]
# Test visualizer
vis.display_candidates([cand])
def test_get_pdf_dim():
"""Test get_pdf_dim on different pages."""
from fonduer.utils.visualizer import get_pdf_dim # noqa
assert get_pdf_dim("tests/data/pdf/BC546A_Series_B14-521026.pdf") == (729, 1032)
assert get_pdf_dim("tests/data/pdf/BC546A_Series_B14-521026.pdf", page=1) == (
729,
1032,
)
assert get_pdf_dim("tests/data/pdf/BC546A_Series_B14-521026.pdf", page=6) == (
612,
792,
)
| fonduer-master | tests/utils/test_visualizer.py |
"""Fonduer unit tests' utils."""
| fonduer-master | tests/utils/__init__.py |
"""Fonduer load config unit tests."""
import os
from fonduer.utils.config import get_config
def test_load_config():
"""Simple sanity check for loading feature config."""
# Check that default is loaded
defaults = get_config()
assert defaults["featurization"]["textual"]["window_feature"]["size"] == 3
assert defaults["learning"]["LSTM"]["emb_dim"] == 100
assert defaults["learning"]["LSTM"]["bias"] is False
# Check that file is loaded if present
settings = get_config(os.path.dirname(__file__))
assert settings["featurization"]["textual"]["window_feature"]["size"] == 8
assert settings["learning"]["LSTM"]["bias"] is False
# Check that defaults are used for unspecified settings
assert (
settings["featurization"]["tabular"]["unary_features"]["get_head_ngrams"]["max"]
== 2
)
| fonduer-master | tests/utils/test_config.py |
"""Fonduer UDF utils' unit tests."""
import logging
import numpy as np
from fonduer.utils.utils_udf import shift_label_matrix, unshift_label_matrix
def test_shift_label_matrix(caplog):
"""Test the label matrix shifter and unshifter."""
caplog.set_level(logging.INFO)
"""
L is a dense label matrix (ABSTAIN as -1) with values:
-1 0
1 -1
"""
L = np.array([[-1, 0], [1, -1]])
"""
L_sparse is a sparse label matrix (ABSTAIN as 0)
0 1
2 0
"""
L_sparse = shift_label_matrix(L)
assert np.array_equal(L, unshift_label_matrix(L_sparse))
assert L_sparse.count_nonzero() == 2
| fonduer-master | tests/utils/test_utils_udf.py |
"""Fonduer data model's visual utils' unit tests."""
import pytest
from fonduer.candidates.mentions import MentionNgrams
from fonduer.parser.lingual_parser.spacy_parser import SpacyParser
from fonduer.parser.models import Document, Sentence
from fonduer.utils.data_model_utils.visual import get_horz_ngrams, get_vert_ngrams
@pytest.fixture()
def doc_setup():
"""Set up document."""
doc = Document(id=1, name="test", stable_id="1::document:0:0")
doc.text = "This is apple. That is orange. Where is banaba? I like Apple."
lingual_parser = SpacyParser("en")
# Split sentences
for parts in lingual_parser.split_sentences(doc.text):
parts["document"] = doc
Sentence(**parts)
# Enrich sentences
for _ in lingual_parser.enrich_sentences_with_NLP(doc.sentences):
pass
# Pick one sentence and add visual information
# so that all the words get aligned horizontally.
sentence: Sentence = doc.sentences[0]
sentence.page = [1, 1, 1, 1]
sentence.top = [0, 0, 0, 0]
sentence.bottom = [10, 10, 10, 10]
sentence.left = [0, 10, 20, 30]
sentence.right = [10, 20, 30, 40]
# Assume the 2nd sentence is horizontally aligned with 1st.
sentence: Sentence = doc.sentences[1]
sentence.page = [1, 1, 1, 1]
sentence.top = [0, 0, 0, 0]
sentence.bottom = [10, 10, 10, 10]
sentence.left = [40, 50, 60, 70]
sentence.right = [50, 60, 70, 80]
# Assume the 3rd sentence is vertically aligned with 1st.
sentence: Sentence = doc.sentences[2]
sentence.page = [1, 1, 1, 1]
sentence.top = [10, 10, 10, 10]
sentence.bottom = [20, 20, 20, 20]
sentence.left = [0, 10, 20, 30]
sentence.right = [10, 20, 30, 40]
# Assume the 4th sentence is in 2nd page.
sentence: Sentence = doc.sentences[3]
sentence.page = [2, 2, 2, 2]
sentence.top = [0, 0, 0, 0]
sentence.bottom = [10, 10, 10, 10]
sentence.left = [0, 10, 20, 30]
sentence.right = [10, 20, 30, 40]
return doc
def test_get_vert_ngrams(doc_setup):
"""Test if get_vert_ngrams works."""
doc = doc_setup
sentence: Sentence = doc.sentences[0]
# Assert this sentence is visual.
assert sentence.is_visual()
# Assert this sentence is not tabular.
assert not sentence.is_tabular()
# Create 1-gram span mentions
space = MentionNgrams(n_min=1, n_max=1)
mentions = [tc for tc in space.apply(doc)]
assert len(mentions) == len([word for sent in doc.sentences for word in sent.words])
# Pick "apple" span mention.
mention = mentions[2]
assert mention.get_span() == "apple"
# from_sentence=True (ie ngrams from all aligned Sentences but its Sentence)
ngrams = list(get_vert_ngrams(mention))
assert ngrams == ["where", "is", "banaba", "?"]
def test_get_horz_ngrams(doc_setup):
"""Test if get_horz_ngrams works."""
doc = doc_setup
sentence: Sentence = doc.sentences[0]
# Assert this sentence is visual.
assert sentence.is_visual()
# Assert this sentence is not tabular.
assert not sentence.is_tabular()
# Create 1-gram span mentions
space = MentionNgrams(n_min=1, n_max=1)
mentions = [tc for tc in space.apply(doc)]
assert len(mentions) == len([word for sent in doc.sentences for word in sent.words])
# Pick "apple" span mention.
mention = mentions[2]
assert mention.get_span() == "apple"
# from_sentence=True (ie ngrams from all aligned Sentences but its Sentence)
ngrams = list(get_horz_ngrams(mention))
assert ngrams == ["that", "is", "orange", "."]
# Check the from_sentence=False (ie all aligned ngrams but itself)
assert mention.get_span() == "apple"
ngrams = list(get_horz_ngrams(mention, from_sentence=False))
assert ngrams == ["this", "is", ".", "that", "is", "orange", "."]
# Check attrib="lemmas"
ngrams = list(get_horz_ngrams(mention, attrib="lemmas"))
assert ngrams == ["that", "be", "orange", "."]
# Check attrib="pos_tags"
ngrams = list(get_horz_ngrams(mention, attrib="pos_tags"))
assert ngrams == ["dt", "vbz", "jj", "."]
# Check lower option
ngrams = list(get_horz_ngrams(mention, lower=False, from_sentence=False))
assert ngrams == ["This", "is", ".", "That", "is", "orange", "."]
# Pick "This" span mention.
mention = mentions[0]
assert mention.get_span() == "This"
ngrams = list(get_horz_ngrams(mention, from_sentence=False))
assert ngrams == ["is", "apple", ".", "that", "is", "orange", "."]
# Check n_max=2
ngrams = list(get_horz_ngrams(mention, n_max=2, from_sentence=False))
assert ngrams == [
"is apple",
"apple.",
"is",
"apple",
".",
"that is",
"is orange",
"orange.",
"that",
"is",
"orange",
".",
]
def test_get_ngrams_that_match_in_string(doc_setup):
"""Test if ngrams can be obtained even if they match mention's span in string."""
doc = doc_setup
sentence: Sentence = doc.sentences[0]
# Assert this sentence is visual.
assert sentence.is_visual()
# Assert this sentence is not tabular.
assert not sentence.is_tabular()
# Create 1-gram span mentions
space = MentionNgrams(n_min=1, n_max=1)
mentions = [tc for tc in space.apply(doc)]
assert len(mentions) == len([word for sent in doc.sentences for word in sent.words])
# Pick "is" from the apple sentence that matches "is" in the orange sentence.
mention = mentions[1]
assert mention.get_span() == "is"
# Check if the "is" in the orange sentence can be obtained.
ngrams = list(get_horz_ngrams(mention, from_sentence=False))
assert "is" in ngrams
| fonduer-master | tests/utils/data_model_utils/test_visual.py |
"""Fonduer data model's tabular utils' unit tests."""
import pytest
from fonduer.candidates import MentionNgrams
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser.visual_parser import PdfVisualParser
from fonduer.utils.data_model_utils.tabular import (
get_aligned_ngrams,
get_cell_ngrams,
get_col_ngrams,
get_head_ngrams,
get_max_col_num,
get_max_row_num,
get_min_col_num,
get_min_row_num,
get_neighbor_cell_ngrams,
get_neighbor_sentence_ngrams,
get_row_ngrams,
get_sentence_ngrams,
is_tabular_aligned,
same_cell,
same_col,
same_row,
same_sentence,
)
from tests.parser.test_parser import get_parser_udf
@pytest.fixture()
def mention_setup():
"""Set up mentions."""
docs_path = "tests/data/html_simple/md.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor.__iter__())
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
language="en",
)
doc = parser_udf.apply(doc)
# Create 1-gram span mentions
space = MentionNgrams(n_min=1, n_max=1)
mentions = [tc for tc in space.apply(doc)]
return mentions
def test_same_row(mention_setup):
"""Test the same_row function."""
mentions = mention_setup
# Same row
assert mentions[51].get_span() == "Joan"
assert mentions[52].get_span() == "saag"
assert same_row((mentions[51], mentions[52]))
# Different row
assert mentions[57].get_span() == "Sally"
assert not same_row((mentions[51], mentions[57]))
def test_same_col(mention_setup):
"""Test the same_col function."""
mentions = mention_setup
# Different column
assert mentions[51].get_span() == "Joan"
assert mentions[52].get_span() == "saag"
assert not same_col((mentions[51], mentions[52]))
# Same column
assert mentions[57].get_span() == "Sally"
assert same_col((mentions[51], mentions[57]))
def test_is_tabular_aligned(mention_setup):
"""Test the is_tabular_aligned function."""
mentions = mention_setup
# tabular_aligned
assert mentions[51].get_span() == "Joan"
assert mentions[52].get_span() == "saag"
assert is_tabular_aligned((mentions[51], mentions[52]))
# not tabular_aligned
assert mentions[58].get_span() == "vindaloo"
assert not is_tabular_aligned((mentions[51], mentions[58]))
def test_same_cell(mention_setup):
"""Test the same_cell function."""
mentions = mention_setup
# Different cell
assert mentions[51].get_span() == "Joan"
assert mentions[52].get_span() == "saag"
assert not same_cell((mentions[51], mentions[52]))
# Same cell
assert mentions[53].get_span() == "paneer"
assert same_cell((mentions[52], mentions[53]))
def test_same_sentence(mention_setup):
"""Test the same_sentence function."""
mentions = mention_setup
# Same sentence
assert mentions[0].get_span() == "Sample"
assert mentions[1].get_span() == "Markdown"
assert same_sentence((mentions[0], mentions[1]))
# Different sentence
assert mentions[2].get_span() == "This"
assert not same_sentence((mentions[0], mentions[2]))
def test_get_min_max_col_num(mention_setup):
"""Test the get_min_col_num and get_max_col_num function."""
mentions = mention_setup
# Non tabular mention
assert mentions[0].get_span() == "Sample"
assert not get_max_col_num(mentions[0])
assert not get_min_col_num(mentions[0])
# Tabular mention
assert mentions[51].get_span() == "Joan"
assert get_min_col_num(mentions[51]) == 0
# TODO: it'd be better to use the mention that spans multiple cols
assert get_max_col_num(mentions[51]) == 0
def test_get_min_max_row_num(mention_setup):
"""Test the get_min_row_num and get_max_row_num function."""
mentions = mention_setup
# Non tabular mention
assert mentions[0].get_span() == "Sample"
assert not get_max_row_num(mentions[0])
assert not get_min_row_num(mentions[0])
# Tabular mention
assert mentions[51].get_span() == "Joan"
assert get_min_row_num(mentions[51]) == 1
assert get_max_row_num(mentions[51]) == 1
def test_get_sentence_ngrams(mention_setup):
"""Test the get_sentence_ngrams function."""
mentions = mention_setup
assert mentions[5].get_span() == "basic"
assert list(get_sentence_ngrams(mentions[5])) == [
"this",
"is",
"some",
",",
"sample",
"markdown",
".",
]
def test_get_neighbor_sentence_ngrams(mention_setup):
"""Test the get_neighbor_sentence_ngrams function."""
mentions = mention_setup
assert mentions[5].get_span() == "basic"
assert list(get_neighbor_sentence_ngrams(mentions[5])) == ["sample", "markdown"] + [
"second",
"heading",
]
def test_get_cell_ngrams(mention_setup):
"""Test the get_cell_ngrams function."""
mentions = mention_setup
assert mentions[52].get_span() == "saag"
assert list(get_cell_ngrams(mentions[52])) == ["paneer"]
# TODO: test get_cell_ngrams when there are other sentences in the cell.
# when a mention is not tabular
assert mentions[0].get_span() == "Sample"
assert list(get_cell_ngrams(mentions[0])) == []
def test_get_neighbor_cell_ngrams(mention_setup):
"""Test the get_neighbor_cell_ngrams function."""
mentions = mention_setup
assert mentions[52].get_span() == "saag"
# No directions
assert list(get_neighbor_cell_ngrams(mentions[52])) == ["paneer"] + ["joan"] + [
"medium"
] + ["lunch", "order"] + ["vindaloo"]
# directions=True
assert list(get_neighbor_cell_ngrams(mentions[52], directions=True)) == [
"paneer",
("joan", "LEFT"),
("medium", "RIGHT"),
("lunch", "UP"),
("order", "UP"),
("vindaloo", "DOWN"),
]
# when a mention is not tabular
assert mentions[0].get_span() == "Sample"
assert list(get_neighbor_cell_ngrams(mentions[0])) == []
def test_get_row_ngrams(mention_setup):
"""Test the get_row_ngrams function."""
mentions = mention_setup
assert mentions[52].get_span() == "saag"
assert list(get_row_ngrams(mentions[52])) == ["paneer"] + ["joan"] + ["medium"] + [
"$",
"11",
]
# when a mention is not tabular
assert mentions[0].get_span() == "Sample"
assert list(get_row_ngrams(mentions[0])) == []
def test_get_col_ngrams(mention_setup):
"""Test the get_col_ngrams function."""
mentions = mention_setup
assert mentions[52].get_span() == "saag"
assert list(get_col_ngrams(mentions[52])) == ["paneer"] + ["lunch", "order"] + [
"vindaloo"
] + ["lamb", "madras"]
# when a mention is not tabular
assert mentions[0].get_span() == "Sample"
assert list(get_col_ngrams(mentions[0])) == []
def test_get_aligned_ngrams(mention_setup):
"""Test the get_aligned_ngrams function."""
mentions = mention_setup
assert mentions[52].get_span() == "saag"
# TODO: ["paneer"] appears twice. Is this expected result?
assert list(get_aligned_ngrams(mentions[52])) == ["paneer"] + ["joan"] + [
"medium"
] + ["$", "11"] + ["paneer"] + ["lunch", "order"] + ["vindaloo"] + [
"lamb",
"madras",
]
# when a mention is not tabular
assert mentions[0].get_span() == "Sample"
assert list(get_aligned_ngrams(mentions[0])) == []
def test_get_head_ngrams(mention_setup):
"""Test the get_head_ngrams function."""
mentions = mention_setup
assert mentions[52].get_span() == "saag"
assert list(get_head_ngrams(mentions[52])) == ["joan"] + ["lunch", "order"]
# when a mention is in the 1st column
assert mentions[51].get_span() == "Joan"
assert list(get_head_ngrams(mentions[51])) == []
# when a mention is in the header row
assert mentions[46].get_span() == "Name"
assert list(get_head_ngrams(mentions[46])) == []
# when a mention is not tabular
assert mentions[0].get_span() == "Sample"
assert list(get_head_ngrams(mentions[0])) == []
| fonduer-master | tests/utils/data_model_utils/test_tabular.py |
"""Fonduer data model's structural utils unit tests."""
import pytest
from fonduer.candidates.mentions import MentionNgrams
from fonduer.parser.models import Document
from fonduer.parser.parser import ParserUDF
from fonduer.utils.data_model_utils import common_ancestor, lowest_common_ancestor_depth
def get_parser_udf(
structural=True, # structural information
blacklist=["style", "script"], # ignore tag types, default: style, script
flatten=["span", "br"], # flatten tag types, default: span, br
language="en",
lingual=True, # lingual information
lingual_parser=None,
strip=True,
replacements=[("[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")],
tabular=True, # tabular information
visual=False, # visual information
visual_parser=None,
pdf_path=None,
):
"""Return an instance of ParserUDF."""
parser_udf = ParserUDF(
structural=structural,
blacklist=blacklist,
flatten=flatten,
lingual=lingual,
lingual_parser=lingual_parser,
strip=strip,
replacements=replacements,
tabular=tabular,
visual=visual,
visual_parser=visual_parser,
pdf_path=pdf_path,
language=language,
)
return parser_udf
@pytest.fixture()
def doc_setup():
"""Set up a document."""
parser_udf = get_parser_udf()
doc = Document(id=1, name="test", stable_id="1::document:0:0")
doc.text = """<html>
<body>
<h1>test1</h1>
<h2>test2</h2>
<div>
<h3>test3</h3>
<table>
<tr>
<td>test4</td>
<td>test5</td>
</tr>
</table>
<table>
<tr>
<td>test6</td>
<td>test7</td>
</tr>
</table>
</div>
<p>test8 test9</p>
</body>
</html>"""
doc = parser_udf.apply(doc)
return doc
@pytest.mark.parametrize(
"mention_ids, output_common_ancestor, output_lcad",
[
([], ["", "html", "body"], 1),
([0, 1], ["", "html", "body"], 1),
([2, 3], ["", "html", "body", "div"], 1),
([3, 4], ["", "html", "body", "div", "table[1]", "tr"], 1),
([4, 5], ["", "html", "body", "div"], 3),
([5, 6], ["", "html", "body", "div", "table[2]", "tr"], 1),
([3, 5], ["", "html", "body", "div"], 3),
([7, 8], ["", "html", "body", "p"], 0),
],
)
def test_ancestors(doc_setup, mention_ids, output_common_ancestor, output_lcad):
"""Test if get_vert_ngrams works."""
doc = doc_setup
# Create 1-gram span mentions
space = MentionNgrams(n_min=1, n_max=1)
mentions = [tc for tc in space.apply(doc)]
assert len(mentions) == len([word for sent in doc.sentences for word in sent.words])
# Test mentions extraction
assert mentions[0].sentence.text == "test1"
assert mentions[1].sentence.text == "test2"
assert mentions[2].sentence.text == "test3"
assert mentions[3].sentence.text == "test4"
assert mentions[4].sentence.text == "test5"
assert mentions[5].sentence.text == "test6"
assert mentions[6].sentence.text == "test7"
assert mentions[7].sentence.text == "test8 test9"
assert mentions[7].get_span() == "test8"
assert mentions[8].get_span() == "test9"
test_mentions = (
[mentions[i] for i in mention_ids] if len(mention_ids) > 0 else mentions
)
# Test commont ancestor calculation
overall_common_ancestor = common_ancestor(test_mentions)
assert overall_common_ancestor == output_common_ancestor
# Test lowest commont ancestor depth calculation
overall_lowest_common_ancestor_depth = lowest_common_ancestor_depth(test_mentions)
assert overall_lowest_common_ancestor_depth == output_lcad
| fonduer-master | tests/utils/data_model_utils/test_structural.py |
"""Hardware labeling functions."""
import re
from itertools import chain
from fonduer.utils.data_model_utils import (
get_aligned_ngrams,
get_left_ngrams,
get_row_ngrams,
overlap,
)
from tests.shared.hardware_utils import ABSTAIN, FALSE, TRUE
def LF_storage_row(c):
"""Return True if temp mention's row ngrams contain ``storage''."""
return TRUE if "storage" in get_row_ngrams(c.temp) else ABSTAIN
def LF_temperature_row(c):
"""Return True if temp mention's row ngrams contain ``temperature''."""
return TRUE if "temperature" in get_row_ngrams(c.temp) else ABSTAIN
def LF_operating_row(c):
"""Return True if temp mention's row ngrams contain ``operating''."""
return TRUE if "operating" in get_row_ngrams(c.temp) else ABSTAIN
def LF_tstg_row(c):
"""Return True if temp mention's row ngrams overlap with the following keywords."""
return (
TRUE
if overlap(["tstg", "stg", "ts"], list(get_row_ngrams(c.temp)))
else ABSTAIN
)
def LF_to_left(c):
"""Return True if temp mention's left ngrams contain ``to''."""
return TRUE if "to" in get_left_ngrams(c.temp, window=2) else ABSTAIN
def LF_negative_number_left(c):
"""Return True if temp mention's left ngrams contain negative number."""
return (
TRUE
if any(
[re.match(r"-\s*\d+", ngram) for ngram in get_left_ngrams(c.temp, window=4)]
)
else ABSTAIN
)
def LF_test_condition_aligned(c):
"""Return False if temp mention's ngrams align with ``test'' or ``condition''."""
return (
FALSE
if overlap(["test", "condition"], list(get_aligned_ngrams(c.temp)))
else ABSTAIN
)
def LF_collector_aligned(c):
"""Return False if temp mention's ngrams align with the following keywords."""
return (
FALSE
if overlap(
["collector", "collector-current", "collector-base", "collector-emitter"],
list(get_aligned_ngrams(c.temp)),
)
else ABSTAIN
)
def LF_current_aligned(c):
"""Return False if temp mention's ngrams align with the following keywords."""
return (
FALSE
if overlap(["current", "dc", "ic"], list(get_aligned_ngrams(c.temp)))
else ABSTAIN
)
def LF_voltage_row_temp(c):
"""Return False if temp mention's ngrams align with the following keywords."""
return (
FALSE
if overlap(
["voltage", "cbo", "ceo", "ebo", "v"], list(get_aligned_ngrams(c.temp))
)
else ABSTAIN
)
def LF_voltage_row_part(c):
"""Return False if temp mention's ngrams align with the following keywords."""
return (
FALSE
if overlap(
["voltage", "cbo", "ceo", "ebo", "v"], list(get_aligned_ngrams(c.temp))
)
else ABSTAIN
)
def LF_typ_row(c):
"""Return False if temp mention's ngrams align with the following keywords."""
return FALSE if overlap(["typ", "typ."], list(get_row_ngrams(c.temp))) else ABSTAIN
def LF_complement_left_row(c):
"""Return False if temp mention's ngrams align with the following keywords."""
return (
FALSE
if (
overlap(
["complement", "complementary"],
chain.from_iterable(
[get_row_ngrams(c.part), get_left_ngrams(c.part, window=10)]
),
)
)
else ABSTAIN
)
def LF_too_many_numbers_row(c):
"""Return False if too many numbers in the row."""
num_numbers = list(get_row_ngrams(c.temp, attrib="ner_tags")).count("number")
return FALSE if num_numbers >= 3 else ABSTAIN
def LF_temp_on_high_page_num(c):
"""Return False if temp mention on high page number."""
return FALSE if c.temp.context.get_attrib_tokens("page")[0] > 2 else ABSTAIN
def LF_temp_outside_table(c):
"""Return False if temp mention is outside of the table."""
return FALSE if not c.temp.context.sentence.is_tabular() is None else ABSTAIN
def LF_not_temp_relevant(c):
"""Return False if temp mention's ngrams overlap with the following keywords."""
return (
FALSE
if not overlap(
["storage", "temperature", "tstg", "stg", "ts"],
list(get_aligned_ngrams(c.temp)),
)
else ABSTAIN
)
# Voltage LFS
def LF_bad_keywords_in_row(c):
"""Return False if volt mention's ngrams overlap with the following keywords."""
return (
FALSE
if overlap(
["continuous", "cut-off", "gain", "breakdown"], get_row_ngrams(c.volt)
)
else ABSTAIN
)
def LF_current_in_row(c):
"""Return False if volt mention's ngrams overlap with the following keywords."""
return FALSE if overlap(["i", "ic", "mA"], get_row_ngrams(c.volt)) else ABSTAIN
non_ce_voltage_keywords = set(
[
"collector-base",
"collector - base",
"collector base",
"vcbo",
"cbo",
"vces",
"emitter-base",
"emitter - base",
"emitter base",
"vebo",
"ebo",
"breakdown voltage",
"emitter breakdown",
"emitter breakdown voltage",
"current",
]
)
def LF_non_ce_voltages_in_row(c):
"""Return False if volt mention's ngrams overlap with non_ce_voltage_keywords."""
return (
FALSE
if overlap(non_ce_voltage_keywords, get_row_ngrams(c.volt, n_max=3))
else ABSTAIN
)
| fonduer-master | tests/shared/hardware_lfs.py |
"""Hardware FonduerModel."""
import pickle
import numpy as np
from emmental.data import EmmentalDataLoader
from pandas import DataFrame
from fonduer.learning.dataset import FonduerDataset
from fonduer.packaging import FonduerModel
from fonduer.parser.models import Document
from tests.shared.hardware_lfs import TRUE
from tests.shared.hardware_utils import get_implied_parts
ATTRIBUTE = "stg_temp_max"
class HardwareFonduerModel(FonduerModel):
"""Customized FonduerModel for hardware."""
def _classify(self, doc: Document) -> DataFrame:
# Only one candidate class is used.
candidate_class = self.candidate_extractor.candidate_classes[0]
test_cands = getattr(doc, candidate_class.__tablename__ + "s")
features_list = self.featurizer.apply(doc)
# Convert features into a sparse matrix
F_test = FonduerModel.convert_features_to_matrix(
features_list[0], self.key_names
)
test_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(ATTRIBUTE, test_cands, F_test, self.word2id, 2),
split="test",
batch_size=100,
shuffle=False,
)
test_preds = self.emmental_model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_preds = [test_cands[_] for _ in positive[0]]
pickle_file = "tests/data/parts_by_doc_dict.pkl"
with open(pickle_file, "rb") as f:
parts_by_doc = pickle.load(f)
df = DataFrame()
for c in true_preds:
part = c[0].context.get_span()
doc = c[0].context.sentence.document.name.upper()
val = c[1].context.get_span()
for p in get_implied_parts(part, doc, parts_by_doc):
entity_relation = (doc, p, val)
df = df.append(
DataFrame([entity_relation], columns=["doc", "part", "val"])
)
return df
| fonduer-master | tests/shared/hardware_fonduer_model.py |
"""Fonduer shared modules for unit tests."""
| fonduer-master | tests/shared/__init__.py |
"""Hardware throttlers."""
import re
from fonduer.utils.data_model_utils import (
get_horz_ngrams,
get_vert_ngrams,
is_horz_aligned,
is_vert_aligned,
same_table,
)
from tests.shared.hardware_spaces import expand_part_range
def temp_throttler(c):
"""Temperature throttler."""
(part, attr) = c
if same_table((part, attr)):
return is_horz_aligned((part, attr)) or is_vert_aligned((part, attr))
return True
def filter_non_parts(c):
"""Filter non parts."""
ret = set()
for _ in c:
for __ in expand_part_range(_):
if re.match("^([0-9]+[A-Z]+|[A-Z]+[0-9]+)[0-9A-Z]*$", __) and len(__) > 2:
ret.add(__)
return ret
def LF_part_miss_match(c):
"""Return 0 if part mismatch."""
ngrams_part = set(list(get_vert_ngrams(c[1], n_max=1)))
ngrams_part = filter_non_parts(
ngrams_part.union(set(list(get_horz_ngrams(c[1], n_max=1))))
)
return (
0
if len(ngrams_part) == 0
or any([c[0].get_span().lower().startswith(_.lower()) for _ in ngrams_part])
else -1
)
def volt_throttler(c):
"""Voltage throttler."""
(part, attr) = c
if same_table((part, attr)):
return is_horz_aligned((part, attr)) or is_vert_aligned((part, attr))
if LF_part_miss_match((part, attr)) < 0:
return False
return True
| fonduer-master | tests/shared/hardware_throttlers.py |
"""Hardware mention/candidate subclasses."""
from fonduer.candidates.models import candidate_subclass, mention_subclass
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
| fonduer-master | tests/shared/hardware_subclasses.py |
"""Hardware matchers."""
import csv
from fonduer.candidates.matchers import (
DictionaryMatch,
Intersect,
LambdaFunctionMatcher,
RegexMatchSpan,
Union,
)
from fonduer.utils.data_model_utils import get_row_ngrams, overlap
temp_matcher = RegexMatchSpan(rgx=r"(?:[1][5-9]|20)[05]", longest_match_only=False)
# Transistor Naming Conventions as Regular Expressions ###
eeca_rgx = (
r"([ABC][A-Z][WXYZ]?[0-9]{3,5}(?:[A-Z]){0,5}"
r"[0-9]?[A-Z]?(?:-[A-Z0-9]{1,7})?(?:[-][A-Z0-9]{1,2})?(?:\/DG)?)"
)
jedec_rgx = r"(2N\d{3,4}[A-Z]{0,5}[0-9]?[A-Z]?)"
jis_rgx = r"(2S[ABCDEFGHJKMQRSTVZ]{1}[\d]{2,4})"
others_rgx = (
r"((?:NSVBC|SMBT|MJ|MJE|MPS|MRF|RCA|TIP|ZTX|ZT|ZXT|TIS|"
r"TIPL|DTC|MMBT|SMMBT|PZT|FZT|STD|BUV|PBSS|KSC|CXT|FCX|CMPT){1}"
r"[\d]{2,4}[A-Z]{0,5}(?:-[A-Z0-9]{0,6})?(?:[-][A-Z0-9]{0,1})?)"
)
part_rgx = "|".join([eeca_rgx, jedec_rgx, jis_rgx, others_rgx])
part_rgx_matcher = RegexMatchSpan(rgx=part_rgx, longest_match_only=True)
def get_digikey_parts_set(path):
"""Get all transistor parts from digikey part dictionary."""
all_parts = set()
with open(path, "r") as csvinput:
reader = csv.reader(csvinput)
for line in reader:
(part, url) = line
all_parts.add(part)
return all_parts
# Dictionary of known transistor parts ###
dict_path = "tests/data/digikey_part_dictionary.csv"
part_dict_matcher = DictionaryMatch(d=get_digikey_parts_set(dict_path))
def common_prefix_length_diff(str1, str2):
"""Calculate common prefix length difference."""
for i in range(min(len(str1), len(str2))):
if str1[i] != str2[i]:
return min(len(str1), len(str2)) - i
return 0
def part_file_name_conditions(attr):
"""Check part file name conditions."""
file_name = attr.sentence.document.name
if len(file_name.split("_")) != 2:
return False
if attr.get_span()[0] == "-":
return False
name = attr.get_span().replace("-", "")
return (
any(char.isdigit() for char in name)
and any(char.isalpha() for char in name)
and common_prefix_length_diff(file_name.split("_")[1], name) <= 2
)
add_rgx = r"^[A-Z0-9\-]{5,15}$"
part_file_name_lambda_matcher = LambdaFunctionMatcher(func=part_file_name_conditions)
part_file_name_matcher = Intersect(
RegexMatchSpan(rgx=add_rgx, longest_match_only=True), part_file_name_lambda_matcher
)
part_matcher = Union(part_rgx_matcher, part_dict_matcher, part_file_name_matcher)
# CE Voltage Matcher
ce_keywords = set(["collector emitter", "collector-emitter", "collector - emitter"])
ce_abbrevs = set(["ceo", "vceo"])
ce_v_max_rgx_matcher = RegexMatchSpan(rgx=r"\d{1,2}[05]", longest_match_only=False)
def ce_v_max_conditions(attr):
"""Check ce_v_max conditions."""
return overlap(
ce_keywords.union(ce_abbrevs), get_row_ngrams(attr, spread=[0, 3], n_max=3)
)
ce_v_max_row_matcher = LambdaFunctionMatcher(func=ce_v_max_conditions)
def ce_v_max_more_conditions1(attr):
"""Check ce_v_max conditions."""
text = attr.sentence.text
if (
attr.char_start > 1
and text[attr.char_start - 1] == "-"
and text[attr.char_start - 2] not in [" ", "="]
):
return False
return True
def ce_v_max_more_conditions(attr):
"""Check ce_v_max conditions."""
text = attr.sentence.text
if attr.char_start != 0 and text[attr.char_start - 1] == "/":
return False
if (
attr.char_start > 1
and text[attr.char_start - 1] == "-"
and text[attr.char_start - 2] not in [" ", "="]
):
return False
if "vcb" in attr.sentence.text.lower():
return False
for i in range(attr.char_end + 1, len(text)):
if text[i] == " ":
continue
if text[i].isdigit():
break
if text[i].upper() != "V":
return False
else:
break
return True
def attr_in_table(attr):
"""Check attribute is in table."""
return attr.sentence.is_tabular()
attr_in_table_matcher = LambdaFunctionMatcher(func=attr_in_table)
ce_v_whole_number = LambdaFunctionMatcher(func=ce_v_max_more_conditions)
volt_matcher = Intersect(
ce_v_max_rgx_matcher, attr_in_table_matcher, ce_v_max_row_matcher, ce_v_whole_number
)
| fonduer-master | tests/shared/hardware_matchers.py |
"""Hardware mention spaces."""
import logging
import re
from builtins import chr, range, str
from difflib import SequenceMatcher
from fonduer.candidates import MentionNgrams
from fonduer.candidates.models.implicit_span_mention import TemporaryImplicitSpanMention
logger = logging.getLogger(__name__)
def expand_part_range(text):
"""Expand transistor part range.
Given a string, generates strings that are potentially implied by
the original text. Two main operations are performed:
1. Expanding ranges (X to Y; X ~ Y; X -- Y)
2. Expanding suffixes (123X/Y/Z; 123X, Y, Z)
Also yields the original input string.
To get the correct output from complex strings, this function should be fed
many Ngrams from a particular sentence.
"""
# Regex Patterns compile only once per function call.
# This range pattern will find text that "looks like" a range.
range_pattern = re.compile(
r"^(?P<start>[\w\/]+)(?:\s*(\.{3,}|\~|\-+|to|thru|through"
r"|\u2011+|\u2012+|\u2013+|\u2014+|\u2012+|\u2212+)\s*)(?P<end>[\w\/]+)$",
re.IGNORECASE | re.UNICODE,
)
suffix_pattern = re.compile(r"(?P<spacer>(?:,|\/)\s*)(?P<suffix>[\w\-]+)")
base_pattern = re.compile(
r"(?P<base>[\w\-]+)(?P<spacer>(?:,|\/)\s*)(?P<suffix>[\w\-]+)?"
)
logger.debug("Text: " + text)
expanded_parts = set()
final_set = set()
# Step 1: Search and expand ranges
m = re.search(range_pattern, text)
if m:
start = m.group("start")
end = m.group("end")
start_diff = ""
end_diff = ""
logger.debug(f" Start: {start} \t End: {end}")
# Use difflib to find difference. We are interested in 'replace' only
seqm = SequenceMatcher(None, start, end).get_opcodes()
for opcode, a0, a1, b0, b1 in seqm:
if opcode == "equal":
continue
elif opcode == "insert":
break
elif opcode == "delete":
break
elif opcode == "replace":
# NOTE: Potential bug if there is more than 1 replace
start_diff = start[a0:a1]
end_diff = end[b0:b1]
else:
logger.error("Unexpected opcode")
raise RuntimeError("[ERROR] unexpected opcode")
logger.debug(f" start_diff: {start_diff} \t end_diff: {end_diff}")
# First, check for number range
if atoi(start_diff) and atoi(end_diff):
logger.debug(f" Enumerate {atoi(start_diff)} to {atoi(end_diff)}")
# generate a list of the numbers plugged in
for number in range(atoi(start_diff), atoi(end_diff) + 1):
new_part = start.replace(start_diff, str(number))
# Produce the strings with the enumerated ranges
expanded_parts.add(new_part)
# Second, check for single-letter enumeration
if len(start_diff) == 1 and len(end_diff) == 1:
if start_diff.isalpha() and end_diff.isalpha():
logger.debug(f" Enumerate {start_diff} to {end_diff}")
letter_range = char_range(start_diff, end_diff)
for letter in letter_range:
new_part = start.replace(start_diff, letter)
# Produce the strings with the enumerated ranges
expanded_parts.add(new_part)
# If we cannot identify a clear number or letter range, or if there are
# multiple ranges being expressed, just ignore it.
if len(expanded_parts) == 0:
expanded_parts.add(text)
else:
expanded_parts.add(text)
# Special case is when there is a single slack (e.g. BC337-16/BC338-16)
# and we want to output both halves of the slash, assuming that both
# halves are the same length
if text.count("/") == 1:
split = text.split("/")
if len(split[0]) == len(split[1]):
expanded_parts.add(split[0])
expanded_parts.add(split[1])
logger.debug(" Inferred Text: \n " + str(sorted(expanded_parts)))
# Step 2: Expand suffixes for each of the inferred sentences
# NOTE: this only does the simple case of replacing same-length suffixes.
# we do not handle cases like "BC546A/B/XYZ/QR"
for part in expanded_parts:
first_match = re.search(base_pattern, part)
if first_match:
base = re.search(base_pattern, part).group("base")
final_set.add(base) # add the base (multiple times, but set handles that)
if first_match.group("suffix"):
all_suffix_lengths = set()
# This is a bit inefficient but this first pass just is here
# to make sure that the suffixes are the same length
for m in re.finditer(suffix_pattern, part):
suffix = m.group("suffix")
suffix_len = len(suffix)
all_suffix_lengths.add(suffix_len)
if len(all_suffix_lengths) == 1:
for m in re.finditer(suffix_pattern, part):
suffix = m.group("suffix")
suffix_len = len(suffix)
old_suffix = base[-suffix_len:]
if (suffix.isalpha() and old_suffix.isalpha()) or (
suffix.isdigit() and old_suffix.isdigit()
):
trimmed = base[:-suffix_len]
final_set.add(trimmed + suffix)
else:
if part and (not part.isspace()):
final_set.add(part) # no base was found with suffixes to expand
logger.debug(" Final Set: " + str(sorted(final_set)))
# Also return the original input string
final_set.add(text)
for part in final_set:
yield part
# NOTE: We make a few assumptions (e.g. suffixes must be same length), but
# one important unstated assumption is that if there is a single suffix,
# (e.g. BC546A/B), the single suffix will be swapped in no matter what.
# In this example, it works. But if we had "ABCD/EFG" we would get "ABCD,AEFG"
# Check out UtilsTests.py to see more of our assumptions capture as test
# cases.
def atoi(num_str):
"""Convert a string to an integer, or returns None."""
try:
return int(num_str)
except Exception:
pass
return None
def char_range(a, b):
"""Generate the characters from a to b inclusive."""
for c in range(ord(a), ord(b) + 1):
yield chr(c)
class MentionNgramsPart(MentionNgrams):
"""N-grams mention specifically for transistor parts."""
def __init__(
self, parts_by_doc=None, n_max=3, expand=True, split_tokens=["-", "/"]
):
"""Initialize MentionNgramsPart.
:param parts_by_doc: a dictionary d where d[document_name.upper()] =
[partA, partB, ...]
"""
super().__init__(n_max=n_max, split_tokens=split_tokens)
self.parts_by_doc = parts_by_doc
self.expander = expand_part_range if expand else (lambda x: [x])
def apply(self, doc):
"""Apply function takes a Document and return a mention generator."""
for ts in MentionNgrams.apply(self, doc):
enumerated_parts = [
part.upper() for part in expand_part_range(ts.get_span())
]
parts = set(enumerated_parts)
if self.parts_by_doc:
possible_parts = self.parts_by_doc[ts.parent.document.name.upper()]
for base_part in enumerated_parts:
for part in possible_parts:
if part.startswith(base_part) and len(base_part) >= 4:
parts.add(part)
for i, part in enumerate(sorted(list(parts))):
if " " in part:
continue # it won't pass the part_matcher
if part == ts.get_span():
yield ts
else:
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="part_expander",
position=i,
text=part,
words=[part],
lemmas=[part],
pos_tags=[ts.get_attrib_tokens("pos_tags")[0]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[0]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[0]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[0]],
page=[min(ts.get_attrib_tokens("page"))]
if ts.sentence.is_visual()
else [None],
top=[min(ts.get_attrib_tokens("top"))]
if ts.sentence.is_visual()
else [None],
left=[max(ts.get_attrib_tokens("left"))]
if ts.sentence.is_visual()
else [None],
bottom=[min(ts.get_attrib_tokens("bottom"))]
if ts.sentence.is_visual()
else [None],
right=[max(ts.get_attrib_tokens("right"))]
if ts.sentence.is_visual()
else [None],
meta=None,
)
class MentionNgramsTemp(MentionNgrams):
"""N-grams mention specifically for temperature."""
def __init__(self, n_max=2, split_tokens=["-", "/"]):
"""Initialize MentionNgramsTemp."""
super().__init__(n_max=n_max, split_tokens=split_tokens)
def apply(self, doc):
"""Apply function takes a Document and return a mention generator."""
for ts in MentionNgrams.apply(self, doc):
m = re.match(
r"^([\+\-\u2010\u2011\u2012\u2013\u2014\u2212\uf02d])?(\s*)(\d+)$",
ts.get_span(),
re.U,
)
if m:
if m.group(1) is None:
temp = ""
elif m.group(1) == "+":
if m.group(2) != "":
# If bigram '+ 150' is seen, accept the unigram '150',
# not both
continue
temp = ""
else: # m.group(1) is a type of negative sign
# A bigram '- 150' is different from unigram '150', so we
# keep the implicit '-150'
temp = "-"
temp += m.group(3)
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="temp_expander",
position=0,
text=temp,
words=[temp],
lemmas=[temp],
pos_tags=[ts.get_attrib_tokens("pos_tags")[-1]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[-1]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[-1]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[-1]],
page=[ts.get_attrib_tokens("page")[-1]]
if ts.sentence.is_visual()
else [None],
top=[ts.get_attrib_tokens("top")[-1]]
if ts.sentence.is_visual()
else [None],
left=[ts.get_attrib_tokens("left")[-1]]
if ts.sentence.is_visual()
else [None],
bottom=[ts.get_attrib_tokens("bottom")[-1]]
if ts.sentence.is_visual()
else [None],
right=[ts.get_attrib_tokens("right")[-1]]
if ts.sentence.is_visual()
else [None],
meta=None,
)
else:
yield ts
class MentionNgramsVolt(MentionNgrams):
"""N-grams mention specifically for voltage."""
def __init__(self, n_max=1, split_tokens=["-", "/"]):
"""Initialize MentionNgramsVolt."""
super().__init__(n_max=n_max, split_tokens=split_tokens)
def apply(self, doc):
"""Apply function takes a Document and return a mention generator."""
for ts in MentionNgrams.apply(self, doc):
if ts.get_span().endswith(".0"):
value = ts.get_span()[:-2]
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="volt_expander",
position=0,
text=value,
words=[value],
lemmas=[value],
pos_tags=[ts.get_attrib_tokens("pos_tags")[-1]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[-1]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[-1]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[-1]],
page=[ts.get_attrib_tokens("page")[-1]]
if ts.sentence.is_visual()
else [None],
top=[ts.get_attrib_tokens("top")[-1]]
if ts.sentence.is_visual()
else [None],
left=[ts.get_attrib_tokens("left")[-1]]
if ts.sentence.is_visual()
else [None],
bottom=[ts.get_attrib_tokens("bottom")[-1]]
if ts.sentence.is_visual()
else [None],
right=[ts.get_attrib_tokens("right")[-1]]
if ts.sentence.is_visual()
else [None],
meta=None,
)
else:
yield ts
| fonduer-master | tests/shared/hardware_spaces.py |
"""Hardware utils."""
import codecs
import csv
import logging
from builtins import range
from fonduer.candidates.models import Candidate
from fonduer.learning.utils import confusion_matrix
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
from tqdm import tqdm_notebook as tqdm
logger = logging.getLogger(__name__)
ABSTAIN = -1
FALSE = 0
TRUE = 1
def get_gold_dict(
filename, doc_on=True, part_on=True, val_on=True, attribute=None, docs=None
):
"""Get gold dict."""
with codecs.open(filename, encoding="utf-8") as csvfile:
gold_reader = csv.reader(csvfile)
gold_dict = set()
for row in gold_reader:
(doc, part, attr, val) = row
if docs is None or doc.upper() in docs:
if attribute and attr != attribute:
continue
if val == TRUE:
continue
else:
key = []
if doc_on:
key.append(doc.upper())
if part_on:
key.append(part.upper())
if val_on:
key.append(val.upper())
gold_dict.add(tuple(key))
return gold_dict
gold_dict = get_gold_dict(
"tests/data/hardware_tutorial_gold.csv", attribute="stg_temp_max"
)
def gold(c: Candidate) -> int:
"""Check if candidate is in gold dict."""
doc = (c[0].context.sentence.document.name).upper()
part = (c[0].context.get_span()).upper()
val = ("".join(c[1].context.get_span().split())).upper()
if (doc, part, val) in gold_dict:
return TRUE
else:
return FALSE
def entity_level_f1(
candidates, gold_file, attribute=None, corpus=None, parts_by_doc=None
):
"""Check entity-level recall of candidates compared to gold.
Turns a CandidateSet into a normal set of entity-level tuples
(doc, part, [attribute_value])
then compares this to the entity-level tuples found in the gold.
Example Usage:
from hardware_utils import entity_level_total_recall
candidates = # CandidateSet of all candidates you want to consider
gold_file = 'tutorials/tables/data/hardware/hardware_gold.csv'
entity_level_total_recall(candidates, gold_file, 'stg_temp_min')
"""
docs = [(doc.name).upper() for doc in corpus] if corpus else None
val_on = attribute is not None
gold_set = get_gold_dict(
gold_file,
docs=docs,
doc_on=True,
part_on=True,
val_on=val_on,
attribute=attribute,
)
if len(gold_set) == 0:
logger.info(f"Gold File: {gold_file}\n Attribute: {attribute}")
logger.error("Gold set is empty.")
return
# Turn CandidateSet into set of tuples
logger.info("Preparing candidates...")
entities = set()
for i, c in enumerate(tqdm(candidates)):
part = c[0].context.get_span()
doc = c[0].context.sentence.document.name.upper()
if attribute:
val = c[1].context.get_span()
for p in get_implied_parts(part, doc, parts_by_doc):
if attribute:
entities.add((doc, p, val))
else:
entities.add((doc, p))
(TP_set, FP_set, FN_set) = confusion_matrix(entities, gold_set)
TP = len(TP_set)
FP = len(FP_set)
FN = len(FN_set)
prec = TP / (TP + FP) if TP + FP > 0 else float("nan")
rec = TP / (TP + FN) if TP + FN > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info("========================================")
logger.info("Scoring on Entity-Level Gold Data")
logger.info("========================================")
logger.info(f"Corpus Precision {prec:.3}")
logger.info(f"Corpus Recall {rec:.3}")
logger.info(f"Corpus F1 {f1:.3}")
logger.info("----------------------------------------")
logger.info(f"TP: {TP} | FP: {FP} | FN: {FN}")
logger.info("========================================\n")
return [sorted(list(x)) for x in [TP_set, FP_set, FN_set]]
def get_implied_parts(part, doc, parts_by_doc):
"""Get implied parts."""
yield part
if parts_by_doc:
for p in parts_by_doc[doc]:
if p.startswith(part) and len(part) >= 4:
yield p
def entity_to_candidates(entity, candidate_subset):
"""Get corresponding candidates from entity."""
matches = []
for c in candidate_subset:
c_entity = tuple(
[c[0].context.sentence.document.name.upper()]
+ [c[i].context.get_span().upper() for i in range(len(c))]
)
c_entity = tuple([str(x) for x in c_entity])
if c_entity == entity:
matches.append(c)
return matches
| fonduer-master | tests/shared/hardware_utils.py |
"""Fonduer parser unit tests."""
import logging
import os
from typing import List
import pytest
from sqlalchemy.orm import Session
from fonduer.parser import Parser
from fonduer.parser.lingual_parser import SpacyParser
from fonduer.parser.models import Document
from fonduer.parser.parser import ParserUDF, SimpleParser
from fonduer.parser.preprocessors import (
CSVDocPreprocessor,
HOCRDocPreprocessor,
HTMLDocPreprocessor,
TextDocPreprocessor,
TSVDocPreprocessor,
)
from fonduer.parser.visual_parser import HocrVisualParser, PdfVisualParser
def get_parser_udf(
structural=True, # structural information
blacklist=["style", "script"], # ignore tag types, default: style, script
flatten=["span", "br"], # flatten tag types, default: span, br
language="en",
lingual=True, # lingual information
lingual_parser=None,
strip=True,
replacements=[("[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")],
tabular=True, # tabular information
visual=False, # visual information
visual_parser=None,
):
"""Return an instance of ParserUDF."""
parser_udf = ParserUDF(
structural=structural,
blacklist=blacklist,
flatten=flatten,
lingual=lingual,
lingual_parser=lingual_parser,
strip=strip,
replacements=replacements,
tabular=tabular,
visual=visual,
visual_parser=visual_parser,
language=language,
)
return parser_udf
def parse(session: Session, docs_path: str, pdf_path: str) -> List[Document]:
"""Parse documents using Parser UDF Runner."""
# Preprocessor for the Docs
doc_preprocessor = HTMLDocPreprocessor(docs_path)
# Create an Parser and parse the documents
corpus_parser = Parser(
session,
parallelism=1,
structural=True,
lingual=True,
visual_parser=PdfVisualParser(pdf_path),
)
corpus_parser.clear()
corpus_parser.apply(doc_preprocessor)
return corpus_parser.get_documents()
def test_parse_md_details():
"""Test the parser with the md document."""
logger = logging.getLogger(__name__)
docs_path = "tests/data/html_simple/md.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md"))
# Check that doc has a name
assert doc.name == "md"
# Check that doc does not have any of these
assert len(doc.figures) == 0
assert len(doc.tables) == 0
assert len(doc.cells) == 0
assert len(doc.sentences) == 0
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
language="en",
)
doc = parser_udf.apply(doc)
# Check that doc has a figure
assert len(doc.figures) == 1
assert doc.figures[0].url == "http://placebear.com/200/200"
assert doc.figures[0].position == 0
assert doc.figures[0].section.position == 0
assert doc.figures[0].stable_id == "md::figure:0"
# Check that doc has a table
assert len(doc.tables) == 1
assert doc.tables[0].position == 0
assert doc.tables[0].section.position == 0
assert doc.tables[0].document.name == "md"
# Check that doc has cells
assert len(doc.cells) == 16
cells = list(doc.cells)
assert cells[0].row_start == 0
assert cells[0].col_start == 0
assert cells[0].position == 0
assert cells[0].document.name == "md"
assert cells[0].table.position == 0
assert cells[10].row_start == 2
assert cells[10].col_start == 2
assert cells[10].position == 10
assert cells[10].document.name == "md"
assert cells[10].table.position == 0
# Check that doc has sentences
assert len(doc.sentences) == 45
sent = sorted(doc.sentences, key=lambda x: x.position)[25]
assert sent.text == "Spicy"
assert sent.table.position == 0
assert sent.table.section.position == 0
assert sent.cell.row_start == 0
assert sent.cell.col_start == 2
# Check if the tail text is processed after inner elements (#333)
assert [sent.text for sent in doc.sentences[14:18]] == [
"italics and later",
"bold",
".",
"Even",
]
# Check abs_char_offsets (#332)
text = "".join([sent.text for sent in doc.sentences])
for sent in doc.sentences:
for abs_char_offset, word in zip(sent.abs_char_offsets, sent.words):
assert text[abs_char_offset] == word[0]
logger.info(f"Doc: {doc}")
for i, sentence in enumerate(doc.sentences):
logger.info(f" Sentence[{i}]: {sentence.text}")
header = sorted(doc.sentences, key=lambda x: x.position)[0]
# Test structural attributes
assert header.xpath == "/html/body/h1"
assert header.html_tag == "h1"
assert header.html_attrs == ["id=sample-markdown"]
# Test visual attributes
assert header.page == [1, 1]
assert header.top == [35, 35]
assert header.bottom == [61, 61]
assert header.right == [111, 231]
assert header.left == [35, 117]
# Test lingual attributes
# when lingual=True, some value other than "" should be filled-in.
assert all(sent.ner_tags)
assert all(sent.dep_labels)
# Test whether nlp information corresponds to sentence words
for sent in doc.sentences:
assert len(sent.words) == len(sent.lemmas)
assert len(sent.words) == len(sent.pos_tags)
assert len(sent.words) == len(sent.ner_tags)
assert len(sent.words) == len(sent.dep_parents)
assert len(sent.words) == len(sent.dep_labels)
def test_parse_wo_tabular():
"""Test the parser without extracting tabular information."""
docs_path = "tests/data/html_simple/md.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md"))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=False,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
language="en",
)
doc = parser_udf.apply(doc)
# Check that doc has neither table nor cell
assert len(doc.sections) == 1
assert len(doc.paragraphs) == 44
assert len(doc.figures) == 1
assert len(doc.tables) == 0
assert len(doc.cells) == 0
assert len(doc.sentences) == 45
# Check that sentences are associated with both section and paragraph.
assert all([sent.section is not None for sent in doc.sentences])
assert all([sent.paragraph is not None for sent in doc.sentences])
# Check that sentences are NOT associated with cell
assert all([sent.cell is None for sent in doc.sentences])
@pytest.mark.skipif(
"CI" not in os.environ, reason="Only run spacy non English test on GitHub Actions"
)
def test_spacy_german():
"""Test the parser with the md document."""
docs_path = "tests/data/pure_html/brot.html"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md"))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False, language="de"
)
doc = parser_udf.apply(doc)
# Check that doc has sentences
assert len(doc.sentences) == 824
sent = sorted(doc.sentences, key=lambda x: x.position)[143]
assert sent.ner_tags == [
"O",
"O",
"LOC",
"O",
"O",
"LOC",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
] # inaccurate
assert sent.dep_labels == [
"mo",
"ROOT",
"sb",
"mo",
"nk",
"nk",
"punct",
"mo",
"nk",
"nk",
"nk",
"sb",
"oc",
"rc",
"punct",
]
@pytest.mark.skipif(
"CI" not in os.environ, reason="Only run spacy non English test on GitHub Actions"
)
def test_spacy_japanese():
"""Test the parser with the md document."""
# Test Japanese alpha tokenization
docs_path = "tests/data/pure_html/japan.html"
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md"))
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False, language="ja"
)
doc = parser_udf.apply(doc)
assert len(doc.sentences) == 308
sent = doc.sentences[45]
assert sent.text == "当時マルコ・ポーロが辿り着いたと言われる"
assert sent.words == [
"当時",
"マルコ",
"・",
"ポーロ",
"が",
"辿り",
"着い",
"た",
"と",
"言わ",
"れる",
]
assert sent.lemmas == [
"当時",
"マルコ",
"・",
"ポーロ",
"が",
"辿る",
"着く",
"た",
"と",
"言う",
"れる",
]
# These tags are less stable (ie they change when a spacy model changes)
# So just check that values other than "" are assigned.
assert all(sent.pos_tags)
assert all(sent.ner_tags)
assert all(sent.dep_labels)
@pytest.mark.skipif(
"CI" not in os.environ, reason="Only run spacy non English test on GitHub Actions"
)
def test_spacy_chinese():
"""Test the parser with the md document."""
# Test Chinese alpha tokenization
docs_path = "tests/data/pure_html/chinese.html"
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md"))
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False, language="zh"
)
doc = parser_udf.apply(doc)
assert len(doc.sentences) == 8
sent = doc.sentences[1]
assert sent.text == "我们和他对比谁更厉害!"
assert sent.words == ["我们", "和", "他", "对比", "谁", "更", "厉害", "!"]
# These tags are less stable (ie they change when a spacy model changes)
# So just check that values other than "" are assigned.
assert all(sent.ner_tags)
assert all(sent.dep_labels)
def test_warning_on_missing_pdf():
"""Test that a warning is issued on invalid pdf."""
docs_path = "tests/data/html_simple/table_span.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(iter(preprocessor))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
)
with pytest.warns(RuntimeWarning) as record:
doc = parser_udf.apply(doc)
assert isinstance(record, type(pytest.warns(RuntimeWarning)))
def test_warning_on_incorrect_filename():
"""Test that a warning is issued on invalid pdf."""
docs_path = "tests/data/html_simple/md_para.html"
pdf_path = "tests/data/html_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md_para"))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
)
with pytest.warns(RuntimeWarning) as record:
doc = parser_udf.apply(doc)
assert isinstance(record, type(pytest.warns(RuntimeWarning)))
def test_parse_md_paragraphs():
"""Unit test of Paragraph parsing."""
docs_path = "tests/data/html_simple/md_para.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md_para"))
# Check that doc has a name
assert doc.name == "md_para"
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
)
doc = parser_udf.apply(doc)
# Check that doc has a figure
assert len(doc.figures) == 6
assert doc.figures[0].url == "http://placebear.com/200/200"
assert doc.figures[0].position == 0
assert doc.figures[0].section.position == 0
assert len(doc.figures[0].captions) == 0
assert doc.figures[0].stable_id == "md_para::figure:0"
assert doc.figures[0].cell.position == 13
assert (
doc.figures[2].url
== "http://html5doctor.com/wp-content/uploads/2010/03/kookaburra.jpg"
)
assert doc.figures[2].position == 2
assert len(doc.figures[2].captions) == 1
assert len(doc.figures[2].captions[0].paragraphs[0].sentences) == 3
assert (
doc.figures[2].captions[0].paragraphs[0].sentences[0].text
== "Australian Birds."
)
assert len(doc.figures[4].captions) == 0
assert (
doc.figures[4].url
== "http://html5doctor.com/wp-content/uploads/2010/03/pelican.jpg"
)
# Check that doc has a table
assert len(doc.tables) == 1
assert doc.tables[0].position == 0
assert doc.tables[0].section.position == 0
# Check that doc has cells
assert len(doc.cells) == 16
cells = list(doc.cells)
assert cells[0].row_start == 0
assert cells[0].col_start == 0
assert cells[0].position == 0
assert cells[0].table.position == 0
assert cells[10].row_start == 2
assert cells[10].col_start == 2
assert cells[10].position == 10
assert cells[10].table.position == 0
# Check that doc has sentences
assert len(doc.sentences) == 51
sentences = sorted(doc.sentences, key=lambda x: x.position)
sent1 = sentences[1]
sent2 = sentences[2]
sent3 = sentences[3]
assert sent1.text == "This is some basic, sample markdown."
assert sent2.text == (
"Unlike the other markdown document, however, "
"this document actually contains paragraphs of text."
)
assert sent1.paragraph.position == 1
assert sent1.section.position == 0
assert sent2.paragraph.position == 1
assert sent2.section.position == 0
assert sent3.paragraph.position == 1
assert sent3.section.position == 0
assert len(doc.paragraphs) == 46
assert len(doc.paragraphs[1].sentences) == 3
assert len(doc.paragraphs[2].sentences) == 1
def test_simple_parser():
"""Unit test of Parser on a single document with lingual features off."""
logger = logging.getLogger(__name__)
docs_path = "tests/data/html_simple/md.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "md"))
# Check that doc has a name
assert doc.name == "md"
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
lingual=False,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
lingual_parser=SimpleParser(delim="NoDelim"),
)
doc = parser_udf.apply(doc)
logger.info(f"Doc: {doc}")
for i, sentence in enumerate(doc.sentences):
logger.info(f" Sentence[{i}]: {sentence.text}")
header = sorted(doc.sentences, key=lambda x: x.position)[0]
# Test structural attributes
assert header.xpath == "/html/body/h1"
assert header.html_tag == "h1"
assert header.html_attrs == ["id=sample-markdown"]
# Test lingual attributes
assert header.ner_tags == ["", ""]
assert header.dep_labels == ["", ""]
assert header.dep_parents == [0, 0]
assert header.lemmas == ["", ""]
assert header.pos_tags == ["", ""]
assert len(doc.sentences) == 44
def test_custom_parser():
"""Unit test of customized parser."""
lingual_parser = SpacyParser("en")
parser_udf = get_parser_udf(
language="de", lingual=True, lingual_parser=lingual_parser
)
# The lingual_parser is prioritized over language
assert parser_udf.lingual_parser == lingual_parser
assert parser_udf.lingual_parser.lang == "en"
def test_parse_table_span():
"""Unit test of parse table span."""
logger = logging.getLogger(__name__)
docs_path = "tests/data/html_simple/table_span.html"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "table_span"))
# Check that doc has a name
assert doc.name == "table_span"
# Create an Parser and parse the document
parser_udf = get_parser_udf(structural=True, lingual=True, visual=False)
doc = parser_udf.apply(doc)
logger.info(f"Doc: {doc}")
assert len(doc.sentences) == 1
for sentence in doc.sentences:
logger.info(f" Sentence: {sentence.text}")
def test_parse_document_diseases():
"""Unit test of Parser on a single document.
This tests both the structural and visual parse of the document.
"""
logger = logging.getLogger(__name__)
docs_path = "tests/data/html_simple/diseases.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "diseases"))
# Check that doc has a name
assert doc.name == "diseases"
# Create an Parser and parse the diseases document
parser_udf = get_parser_udf(
structural=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
)
doc = parser_udf.apply(doc)
logger.info(f"Doc: {doc}")
for sentence in doc.sentences:
logger.info(f" Sentence: {sentence.text}")
# Check captions
assert len(doc.captions) == 2
caption = sorted(doc.sentences, key=lambda x: x.position)[20]
assert caption.paragraph.caption.position == 0
assert caption.paragraph.caption.table.position == 0
assert caption.text == "Table 1: Infectious diseases and where to find them."
assert caption.paragraph.position == 18
# Check figures
assert len(doc.figures) == 0
# Check that doc has a table
assert len(doc.tables) == 3
assert doc.tables[0].position == 0
assert doc.tables[0].document.name == "diseases"
# Check that doc has cells
assert len(doc.cells) == 25
sentence = sorted(doc.sentences, key=lambda x: x.position)[10]
logger.info(f" {sentence}")
# Check the sentence's cell
assert sentence.table.position == 0
assert sentence.cell.row_start == 2
assert sentence.cell.col_start == 1
assert sentence.cell.position == 4
# Test structural attributes
assert sentence.xpath == "/html/body/table[1]/tbody/tr[3]/td[1]/p"
assert sentence.html_tag == "p"
assert sentence.html_attrs == ["class=s6", "style=padding-top: 1pt"]
# Test visual attributes
assert sentence.page == [1, 1, 1]
assert sentence.top == [342, 296, 356]
assert sentence.left == [318, 369, 318]
# Test lingual attributes
# when lingual=True, some value other than "" should be filled-in.
assert all(sentence.ner_tags)
assert all(sentence.dep_labels)
assert len(doc.sentences) == 37
def test_parse_style():
"""Test style tag parsing."""
logger = logging.getLogger(__name__)
docs_path = "tests/data/html_extended/ext_diseases.html"
pdf_path = "tests/data/pdf_extended/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "ext_diseases"))
# Create an Parser and parse the diseases document
parser_udf = get_parser_udf(
structural=True,
lingual=True,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
)
doc = parser_udf.apply(doc)
# Grab the sentences parsed by the Parser
sentences = doc.sentences
logger.warning(f"Doc: {doc}")
for i, sentence in enumerate(sentences):
logger.warning(f" Sentence[{i}]: {sentence.html_attrs}")
# sentences for testing
sub_sentences = [
{
"index": 6,
"attr": [
"class=col-header",
"hobbies=work:hard;play:harder",
"type=phenotype",
"style=background: #f1f1f1; color: aquamarine; font-size: 18px;",
],
},
{"index": 9, "attr": ["class=row-header", "style=background: #f1f1f1;"]},
{"index": 11, "attr": ["class=cell", "style=text-align: center;"]},
]
# Assertions
assert all(sentences[p["index"]].html_attrs == p["attr"] for p in sub_sentences)
def test_parse_error_doc_skipping():
"""Test skipping of faulty htmls."""
faulty_doc_path = "tests/data/html_faulty/ext_diseases_missing_table_tag.html"
preprocessor = HTMLDocPreprocessor(faulty_doc_path)
doc = next(
preprocessor._parse_file(faulty_doc_path, "ext_diseases_missing_table_tag")
)
parser_udf = get_parser_udf(structural=True, lingual=True)
doc = parser_udf.apply(doc)
# No document is returned for faulty document
assert doc is None
valid_doc_path = "tests/data/html_extended/ext_diseases.html"
preprocessor = HTMLDocPreprocessor(valid_doc_path)
doc = next(preprocessor._parse_file(valid_doc_path, "ext_diseases"))
parser_udf = get_parser_udf(structural=True, lingual=True)
doc = parser_udf.apply(doc)
assert len(doc.sentences) == 37
def test_parse_multi_sections():
"""Test the parser with the radiology document."""
# Test multi-section html
docs_path = "tests/data/pure_html/radiology.html"
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "radiology"))
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False
)
doc = parser_udf.apply(doc)
assert len(doc.sections) == 5
assert len(doc.paragraphs) == 30
assert len(doc.sentences) == 35
assert len(doc.figures) == 2
assert doc.sections[0].name is None
assert doc.sections[1].name == "label"
assert doc.sections[2].name == "content"
assert doc.sections[3].name == "image"
assert doc.sections[2].paragraphs[0].name == "COMPARISON"
assert doc.sections[2].paragraphs[1].name == "INDICATION"
assert doc.sections[2].paragraphs[2].name == "FINDINGS"
assert doc.sections[2].paragraphs[3].name == "IMPRESSION"
def test_text_doc_preprocessor():
"""Test ``TextDocPreprocessor`` with text document."""
# Test text document
docs_path = "tests/data/various_format/text_format.txt"
preprocessor = TextDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "plain_text_format"))
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False
)
doc = parser_udf.apply(doc)
assert len(preprocessor) == 1
assert len(doc.sections) == 1
assert len(doc.paragraphs) == 1
assert len(doc.sentences) == 57
def test_tsv_doc_preprocessor():
"""Test ``TSVDocPreprocessor`` with tsv document."""
# Test tsv document
docs_path = "tests/data/various_format/tsv_format.tsv"
preprocessor = TSVDocPreprocessor(docs_path, header=True)
assert len(preprocessor) == 2
preprocessor = TSVDocPreprocessor(docs_path, max_docs=1, header=True)
doc = next(preprocessor._parse_file(docs_path, "tsv_format"))
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False
)
doc = parser_udf.apply(doc)
assert len(preprocessor) == 1
assert doc.name == "9b28e780-ba48-4a53-8682-7c58c141a1b6"
assert len(doc.sections) == 1
assert len(doc.paragraphs) == 1
assert len(doc.sentences) == 33
def test_csv_doc_preprocessor():
"""Test ``CSVDocPreprocessor`` with csv document."""
# Test csv document
docs_path = "tests/data/various_format/csv_format.csv"
preprocessor = CSVDocPreprocessor(docs_path, header=True)
assert len(preprocessor) == 10
preprocessor = CSVDocPreprocessor(docs_path, max_docs=1, header=True)
doc = next(preprocessor._parse_file(docs_path, "csv_format"))
parser_udf = get_parser_udf(
structural=True, tabular=True, lingual=True, visual=False
)
doc = parser_udf.apply(doc)
assert len(preprocessor) == 1
assert len(doc.sections) == 12
assert len(doc.paragraphs) == 10
assert len(doc.sentences) == 17
def test_parser_skips_and_flattens():
"""Test if ``Parser`` skips/flattens elements."""
parser_udf = get_parser_udf()
# Test if a parser skips comments
doc = Document(id=1, name="test", stable_id="1::document:0:0")
doc.text = "<html><body>Hello!<!-- comment --></body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "Hello!"
# Test if a parser skips blacklisted elements
doc = Document(id=2, name="test2", stable_id="2::document:0:0")
doc.text = "<html><body><script>alert('Hello');</script><p>Hello!</p></body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "Hello!"
# Test if a parser flattens elements
doc = Document(id=3, name="test3", stable_id="3::document:0:0")
doc.text = "<html><body><span>Hello, <br>world!</span></body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "Hello, world!"
# Now with different blacklist and flatten
parser_udf = get_parser_udf(blacklist=["meta"], flatten=["word"])
# Test if a parser does not skip non-blacklisted element
doc = Document(id=4, name="test4", stable_id="4::document:0:0")
doc.text = "<html><body><script>alert('Hello');</script><p>Hello!</p></body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "alert('Hello');"
assert doc.sentences[1].text == "Hello!"
# Test if a parser skips blacklisted elements
doc = Document(id=5, name="test5", stable_id="5::document:0:0")
doc.text = "<html><head><meta name='keywords'></head><body>Hello!</body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "Hello!"
# Test if a parser does not flatten elements
doc = Document(id=6, name="test6", stable_id="6::document:0:0")
doc.text = "<html><body><span>Hello, <br>world!</span></body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "Hello,"
assert doc.sentences[1].text == "world!"
# Test if a parser flattens elements
doc = Document(id=7, name="test7", stable_id="7::document:0:0")
doc.text = "<html><body><word>Hello, </word><word>world!</word></body></html>"
doc = parser_udf.apply(doc)
assert doc.sentences[0].text == "Hello, world!"
def test_parser_no_image():
"""Unit test of Parser on a single document that has a figure without image."""
docs_path = "tests/data/html_simple/no_image.html"
pdf_path = "tests/data/pdf_simple/"
# Preprocessor for the Docs
preprocessor = HTMLDocPreprocessor(docs_path)
doc = next(preprocessor._parse_file(docs_path, "no_image"))
# Check that doc has a name
assert doc.name == "no_image"
# Create an Parser and parse the no_image document
parser_udf = get_parser_udf(
structural=True,
lingual=False,
visual=True,
visual_parser=PdfVisualParser(pdf_path),
)
doc = parser_udf.apply(doc)
# Check that doc has no figures
assert len(doc.figures) == 0
@pytest.mark.parametrize(
"docs_path, pdf_path",
[
("tests/data/html_simple/", "tests/data/pdf_simple/"),
("tests/data/html_simple/", "tests/data/pdf_simple"),
("tests/data/html_simple", "tests/data/pdf_simple/"),
("tests/data/html_simple", "tests/data/pdf_simple"),
],
)
def test_various_dir_path_formats(database_session, docs_path, pdf_path):
"""Test the parser with various directory path formats."""
docs = parse(database_session, docs_path, pdf_path)
assert len(docs) == 6
for doc in docs:
# table_span.pdf and no_image_unsorted.pdf are not exist, so no
# coordinate info available
if doc.name in ["table_span", "no_image_unsorted"]:
assert doc.sentences[0].top is None
else:
assert doc.sentences[0].top is not None
@pytest.mark.parametrize(
"pdf_path",
[
("tests/data/pdf_extended/"),
("tests/data/pdf_extended"),
],
)
def test_various_pdf_path_formats(database_session, pdf_path):
"""Test the parser with various pdf_path formats."""
docs_path = "tests/data/html_extended/ext_diseases.html"
docs = parse(database_session, docs_path, pdf_path)
assert len(docs) == 1
assert docs[0].sentences[0].top is not None
def test_parse_hocr():
"""Test the parser with hOCR documents."""
docs_path = "tests/data/hocr_simple/md.hocr"
# Preprocessor for the Docs
preprocessor = HOCRDocPreprocessor(docs_path)
doc = next(iter(preprocessor))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual_parser=HocrVisualParser(),
)
doc = parser_udf.apply(doc)
assert doc.name == "md"
assert doc.sentences[12].left == [372, 384, 405, 418, 418]
assert doc.sentences[12].page == [1] * len(doc.sentences[12].words)
docs_path = "tests/data/hocr_simple/121.hocr"
preprocessor = HOCRDocPreprocessor(docs_path)
doc = next(iter(preprocessor))
doc = parser_udf.apply(doc)
# The double spaces between "This" and "Consumer" should be replaced with a single
# space at HOCRDocPreprocessor.
assert doc.sentences[0].words[:3] == ["This", "Consumer", "Credit"]
docs_path = "tests/data/hocr_simple/japan.hocr"
preprocessor = HOCRDocPreprocessor(docs_path, space=False)
doc = next(iter(preprocessor))
# Create an Parser and parse the md document
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
language="ja",
visual_parser=HocrVisualParser(),
)
doc = parser_udf.apply(doc)
assert doc.name == "japan"
sent = doc.sentences[0]
assert len(sent.words) == len(sent.left)
# "にっぽん" is tokenized into three: "に", "っ", "ぽん" in hOCR,
# but it is tokenized as an one token by spaCy.
assert sent.words[1] == "にっぽん"
assert sent.left[1] == 150 # this left comes from "に" in hOCR
assert sent.right[1] == 249 # this right comes from "ぽん" in hOCR
sent = doc.sentences[2]
assert len(sent.words) == len(sent.left)
# "チェーン店\n本・支店" is tokenized into three: "チェーン店", "本・支店" in hOCR,
# but it is tokenized as "チェーン", "店本", "・", "支店" by spaCy.
assert sent.words[1] == "店本"
assert sent.left[1] == 145 # comes from left min of "チェーン店\n本・支店" in hOCR
assert sent.right[1] == 245 # comes from right min of "チェーン店\n本・支店" in hOCR
def test_parse_hocr_with_tables():
"""Test the parser with hOCR documents that have tables."""
docs_path = "tests/data/hocr_simple/1st.hocr"
preprocessor = HOCRDocPreprocessor(docs_path)
doc = next(iter(preprocessor))
parser_udf = get_parser_udf(
structural=True,
tabular=True,
lingual=True,
visual_parser=HocrVisualParser(),
)
doc = parser_udf.apply(doc)
assert doc.name == "1st"
| fonduer-master | tests/parser/test_parser.py |
"""Fonduer simple parser unit tests."""
from fonduer.parser.lingual_parser import SimpleParser
def test_simple_parser_support():
"""Unit test of simple parser support."""
lingual_parser = SimpleParser()
assert lingual_parser.has_tokenizer_support()
assert not lingual_parser.has_NLP_support()
def test_simple_split_sentences():
"""Unit test of splitting sentences."""
tokenize_and_split_sentences = SimpleParser().split_sentences
text = "This is a text. This is another text."
iterator = tokenize_and_split_sentences(text)
assert len(list(iterator)) == 2
| fonduer-master | tests/parser/test_simple_parser.py |
"""Fonduer spacy parser unit tests."""
import os
import pytest
from fonduer.parser.lingual_parser.spacy_parser import (
SpacyParser,
TokenPreservingTokenizer,
set_custom_boundary,
)
from fonduer.parser.models import Sentence
@pytest.mark.skipif(
"CI" not in os.environ, reason="Only run non-English tests on GitHub Actions"
)
def test_spacy_support():
"""Unit test of spacy support."""
# Supported language
lingual_parser = SpacyParser("en")
assert lingual_parser.has_tokenizer_support()
assert lingual_parser.has_NLP_support()
lingual_parser = SpacyParser("ja")
assert lingual_parser.has_tokenizer_support()
assert lingual_parser.has_NLP_support()
# Non supported language
lingual_parser = SpacyParser("non-supported-lang")
assert not lingual_parser.has_tokenizer_support()
assert not lingual_parser.has_NLP_support()
# Language not specified
with pytest.raises(TypeError):
lingual_parser = SpacyParser()
def test_spacy_split_sentences():
"""Unit test of splitting sentences."""
lingual_parser = SpacyParser("en")
tokenize_and_split_sentences = lingual_parser.split_sentences
text = "This is a text. This is another text."
iterator = tokenize_and_split_sentences(text)
assert len(list(iterator)) == 2
def test_split_sentences_by_char_limit():
"""Unit test of splitting sentences by char limit."""
lingual_parser = SpacyParser("en")
text = "This is a text. This is another text."
all_sentences = [
Sentence(**parts) for parts in lingual_parser.split_sentences(text)
]
assert len(all_sentences) == 2
assert [len(sentence.text) for sentence in all_sentences] == [15, 21]
lingual_parser.model.remove_pipe("sentencizer")
lingual_parser.model.add_pipe(
set_custom_boundary, before="parser", name="sentence_boundary_detector"
)
sentence_batches = lingual_parser._split_sentences_by_char_limit(all_sentences, 20)
assert len(sentence_batches) == 2
sentence_batches = lingual_parser._split_sentences_by_char_limit(all_sentences, 100)
assert len(sentence_batches) == 1
sentence_batch = sentence_batches[0]
custom_tokenizer = TokenPreservingTokenizer(lingual_parser.model.vocab)
doc = custom_tokenizer(sentence_batch)
doc.user_data = sentence_batch
for name, proc in lingual_parser.model.pipeline: # iterate over components in order
doc = proc(doc)
assert doc.is_parsed
# See if the number of parsed spaCy sentences matches that of input sentences
assert len(list(doc.sents)) == len(sentence_batch)
| fonduer-master | tests/parser/test_spacy_parser.py |
"""Fonduer parser unit tests."""
| fonduer-master | tests/parser/__init__.py |
"""Unit tests for preprocessors."""
from bs4 import BeautifulSoup
from fonduer.parser.preprocessors.hocr_doc_preprocessor import HOCRDocPreprocessor
def test_hocrpreprocessor():
"""Test hOCRDocPreprocessor with a simple hOCR."""
path = "tests/data/hocr_simple/md.hocr"
preprocessor = HOCRDocPreprocessor(path=path)
doc = next(iter(preprocessor))
assert doc.name == "md"
# the intermidiate attribute: "fonduer" should be removed.
assert "fonduer" not in doc.text
# number of "left" attribute is equal to that of "ppageno" - 1 (at ocr_page)
assert doc.text.count("left") == doc.text.count("ppageno") - 1 == 24
def test_hocrpreprocessor_space_false():
"""Test hOCRDocPreprocessor with space=False."""
path = "tests/data/hocr_simple/japan.hocr"
preprocessor = HOCRDocPreprocessor(path=path, space=False)
doc = next(iter(preprocessor))
assert doc.name == "japan"
# the intermidiate attribute: "fonduer" should be removed.
assert "fonduer" not in doc.text
soup = BeautifulSoup(doc.text, "lxml")
element = soup.find(id="par_1_1")
# A token cannot contain " " (whitespace) as "tokens" are deliminated by a " ".
assert len(element.get("left").split()) == len(element.get("tokens").split()) == 59
| fonduer-master | tests/parser/test_preprocessor.py |
"""Fonduer visual_parser unit tests."""
import random
from operator import attrgetter
import pytest
from bs4 import BeautifulSoup
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser.visual_parser import PdfVisualParser
from tests.parser.test_parser import get_parser_udf
def test_visual_parser_not_affected_by_order_of_sentences():
"""Test if visual_parser result is not affected by the order of sentences."""
docs_path = "tests/data/html/2N6427.html"
pdf_path = "tests/data/pdf/"
# Initialize preprocessor, parser, visual_parser.
# Note that parser is initialized with `visual=False` and that visual_parser
# will be used to attach "visual" information to sentences after parsing.
preprocessor = HTMLDocPreprocessor(docs_path)
parser_udf = get_parser_udf(
structural=True, lingual=False, tabular=True, visual=False
)
visual_parser = PdfVisualParser(pdf_path=pdf_path)
doc = parser_udf.apply(next(preprocessor.__iter__()))
# Sort sentences by sentence.position
doc.sentences = sorted(doc.sentences, key=attrgetter("position"))
sentences0 = [sent for sent in visual_parser.parse(doc.name, doc.sentences)]
# Sort again in case visual_parser.link changes the order
sentences0 = sorted(sentences0, key=attrgetter("position"))
doc = parser_udf.apply(next(preprocessor.__iter__()))
# Shuffle
random.shuffle(doc.sentences)
sentences1 = [sent for sent in visual_parser.parse(doc.name, doc.sentences)]
# Sort sentences by sentence.position
sentences1 = sorted(sentences1, key=attrgetter("position"))
# This should hold as both sentences are sorted by their position
assert all(
[
sent0.position == sent1.position
for (sent0, sent1) in zip(sentences0, sentences1)
]
)
# The following assertion should hold if the visual_parser result is not affected
# by the order of sentences.
assert all(
[sent0.left == sent1.left for (sent0, sent1) in zip(sentences0, sentences1)]
)
def test_non_existent_pdf_path_should_fail():
"""Test if a non-existent raises an error."""
pdf_path = "dummy_path"
with pytest.raises(ValueError):
PdfVisualParser(pdf_path=pdf_path)
def test_pdf_word_list_is_sorted():
"""Test if pdf_word_list is sorted as expected.
no_image_unsorted.html is originally created from pdf_simple/no_image.pdf,
but the order of html elements like block and word has been changed to see if
pdf_word_list is sorted as expected.
"""
docs_path = "tests/data/html_simple/no_image_unsorted.html"
pdf_path = "tests/data/pdf_simple"
visual_parser = PdfVisualParser(pdf_path=pdf_path)
with open(docs_path) as f:
soup = BeautifulSoup(f, "html.parser")
page = soup.find_all("page")[0]
pdf_word_list, coordinate_map = visual_parser._coordinates_from_HTML(page, 1)
# Check if words are sorted by block top
assert set([content for (_, content) in pdf_word_list[:2]]) == {"Sample", "HTML"}
# Check if words are sorted by top
assert [content for (_, content) in pdf_word_list[2:7]] == [
"This",
"is",
"an",
"html",
"that",
]
# Check if words are sorted by left (#449)
assert [content for (_, content) in pdf_word_list[:2]] == ["Sample", "HTML"]
| fonduer-master | tests/parser/test_visual_linker.py |
"""Fonduer e2e tests."""
| fonduer-master | tests/e2e/__init__.py |
"""Fonduer incremental e2e test."""
import logging
import os
import pytest
from snorkel.labeling import labeling_function
from fonduer.candidates import CandidateExtractor, MentionExtractor
from fonduer.candidates.models import Candidate
from fonduer.features import Featurizer
from fonduer.features.models import Feature, FeatureKey
from fonduer.parser import Parser
from fonduer.parser.models import Document
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser.visual_parser import PdfVisualParser
from fonduer.supervision import Labeler
from fonduer.supervision.labeler import ABSTAIN
from fonduer.supervision.models import Label, LabelKey
from tests.shared.hardware_lfs import (
LF_negative_number_left,
LF_operating_row,
LF_storage_row,
LF_temperature_row,
LF_to_left,
LF_tstg_row,
)
from tests.shared.hardware_matchers import part_matcher, temp_matcher
from tests.shared.hardware_spaces import MentionNgramsPart, MentionNgramsTemp
from tests.shared.hardware_subclasses import Part, PartTemp, Temp
from tests.shared.hardware_throttlers import temp_throttler
logger = logging.getLogger(__name__)
ATTRIBUTE = "stg_temp_max"
@pytest.mark.skipif(
"CI" not in os.environ, reason="Only run incremental on GitHub Actions"
)
def test_incremental(database_session):
"""Run an end-to-end test on incremental additions."""
# GitHub Actions gives 2 cores
# help.github.com/en/actions/reference/virtual-environments-for-github-hosted-runners
PARALLEL = 2
max_docs = 1
session = database_session
docs_path = "tests/data/html/dtc114w.html"
pdf_path = "tests/data/pdf/"
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session,
parallelism=PARALLEL,
structural=True,
lingual=True,
visual_parser=PdfVisualParser(pdf_path),
)
corpus_parser.apply(doc_preprocessor)
num_docs = session.query(Document).count()
logger.info(f"Docs: {num_docs}")
assert num_docs == max_docs
docs = corpus_parser.get_documents()
last_docs = corpus_parser.get_documents()
assert len(docs[0].sentences) == len(last_docs[0].sentences)
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
mention_extractor = MentionExtractor(
session, [Part, Temp], [part_ngrams, temp_ngrams], [part_matcher, temp_matcher]
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Part).count() == 11
assert session.query(Temp).count() == 8
# Test if clear=True works
mention_extractor.apply(docs, parallelism=PARALLEL, clear=True)
assert session.query(Part).count() == 11
assert session.query(Temp).count() == 8
# Candidate Extraction
candidate_extractor = CandidateExtractor(
session, [PartTemp], throttlers=[temp_throttler]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).filter(PartTemp.split == 0).count() == 70
assert session.query(Candidate).count() == 70
# Grab candidate lists
train_cands = candidate_extractor.get_candidates(split=0)
assert len(train_cands) == 1
assert len(train_cands[0]) == 70
# Featurization
featurizer = Featurizer(session, [PartTemp])
featurizer.apply(split=0, train=True, parallelism=PARALLEL)
assert session.query(Feature).count() == len(train_cands[0])
num_feature_keys = session.query(FeatureKey).count()
assert num_feature_keys == 514
F_train = featurizer.get_feature_matrices(train_cands)
assert F_train[0].shape == (len(train_cands[0]), num_feature_keys)
assert len(featurizer.get_keys()) == num_feature_keys
# Test Dropping FeatureKey
featurizer.drop_keys(["BASIC_e1_LENGTH_1"])
assert session.query(FeatureKey).count() == num_feature_keys - 1
stg_temp_lfs = [
LF_storage_row,
LF_operating_row,
LF_temperature_row,
LF_tstg_row,
LF_to_left,
LF_negative_number_left,
]
labeler = Labeler(session, [PartTemp])
labeler.apply(split=0, lfs=[stg_temp_lfs], train=True, parallelism=PARALLEL)
assert session.query(Label).count() == len(train_cands[0])
# Only 5 because LF_operating_row doesn't apply to the first test doc
assert session.query(LabelKey).count() == 5
L_train = labeler.get_label_matrices(train_cands)
assert L_train[0].shape == (len(train_cands[0]), 5)
assert len(labeler.get_keys()) == 5
docs_path = "tests/data/html/112823.html"
pdf_path = "tests/data/pdf/"
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser.apply(doc_preprocessor, clear=False)
assert len(corpus_parser.get_documents()) == 2
new_docs = corpus_parser.get_last_documents()
assert len(new_docs) == 1
assert new_docs[0].name == "112823"
# Get mentions from just the new docs
mention_extractor.apply(new_docs, parallelism=PARALLEL, clear=False)
assert session.query(Part).count() == 81
assert session.query(Temp).count() == 31
# Test if existing mentions are skipped.
mention_extractor.apply(new_docs, parallelism=PARALLEL, clear=False)
assert session.query(Part).count() == 81
assert session.query(Temp).count() == 31
# Just run candidate extraction and assign to split 0
candidate_extractor.apply(new_docs, split=0, parallelism=PARALLEL, clear=False)
# Grab candidate lists
train_cands = candidate_extractor.get_candidates(split=0)
assert len(train_cands) == 1
assert len(train_cands[0]) == 1501
# Test if existing candidates are skipped.
candidate_extractor.apply(new_docs, split=0, parallelism=PARALLEL, clear=False)
train_cands = candidate_extractor.get_candidates(split=0)
assert len(train_cands) == 1
assert len(train_cands[0]) == 1501
# Update features
featurizer.update(new_docs, parallelism=PARALLEL)
assert session.query(Feature).count() == len(train_cands[0])
num_feature_keys = session.query(FeatureKey).count()
assert num_feature_keys == 2608
F_train = featurizer.get_feature_matrices(train_cands)
assert F_train[0].shape == (len(train_cands[0]), num_feature_keys)
assert len(featurizer.get_keys()) == num_feature_keys
# Update LF_storage_row. Now it always returns ABSTAIN.
@labeling_function(name="LF_storage_row")
def LF_storage_row_updated(c):
return ABSTAIN
stg_temp_lfs = [
LF_storage_row_updated,
LF_operating_row,
LF_temperature_row,
LF_tstg_row,
LF_to_left,
LF_negative_number_left,
]
# Update Labels
labeler.update(docs, lfs=[stg_temp_lfs], parallelism=PARALLEL)
labeler.update(new_docs, lfs=[stg_temp_lfs], parallelism=PARALLEL)
assert session.query(Label).count() == len(train_cands[0])
# Only 5 because LF_storage_row doesn't apply to any doc (always ABSTAIN)
num_label_keys = session.query(LabelKey).count()
assert num_label_keys == 5
L_train = labeler.get_label_matrices(train_cands)
assert L_train[0].shape == (len(train_cands[0]), num_label_keys)
# Test clear
featurizer.clear(train=True)
assert session.query(FeatureKey).count() == 0
| fonduer-master | tests/e2e/test_incremental.py |
"""Fonduer e2e test."""
import logging
import os
import pickle
import emmental
import numpy as np
import pytest
from emmental.data import EmmentalDataLoader
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from emmental.modules.embedding_module import EmbeddingModule
from snorkel.labeling.model import LabelModel
import fonduer
from fonduer.candidates import CandidateExtractor, MentionExtractor
from fonduer.candidates.models import Candidate
from fonduer.features import Featurizer
from fonduer.features.models import Feature, FeatureKey
from fonduer.learning.dataset import FonduerDataset
from fonduer.learning.task import create_task
from fonduer.learning.utils import collect_word_counter
from fonduer.parser import Parser
from fonduer.parser.models import Document, Sentence
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser.visual_parser import PdfVisualParser
from fonduer.supervision import Labeler
from fonduer.supervision.models import GoldLabel, Label, LabelKey
from tests.shared.hardware_lfs import (
TRUE,
LF_bad_keywords_in_row,
LF_collector_aligned,
LF_complement_left_row,
LF_current_aligned,
LF_current_in_row,
LF_negative_number_left,
LF_non_ce_voltages_in_row,
LF_not_temp_relevant,
LF_operating_row,
LF_storage_row,
LF_temp_on_high_page_num,
LF_temp_outside_table,
LF_temperature_row,
LF_test_condition_aligned,
LF_to_left,
LF_too_many_numbers_row,
LF_tstg_row,
LF_typ_row,
LF_voltage_row_part,
LF_voltage_row_temp,
)
from tests.shared.hardware_matchers import part_matcher, temp_matcher, volt_matcher
from tests.shared.hardware_spaces import (
MentionNgramsPart,
MentionNgramsTemp,
MentionNgramsVolt,
)
from tests.shared.hardware_subclasses import Part, PartTemp, PartVolt, Temp, Volt
from tests.shared.hardware_throttlers import temp_throttler, volt_throttler
from tests.shared.hardware_utils import entity_level_f1, gold
logger = logging.getLogger(__name__)
ATTRIBUTE = "stg_temp_max"
@pytest.mark.skipif("CI" not in os.environ, reason="Only run e2e on GitHub Actions")
def test_e2e(database_session):
"""Run an end-to-end test on documents of the hardware domain."""
# GitHub Actions gives 2 cores
# help.github.com/en/actions/reference/virtual-environments-for-github-hosted-runners
PARALLEL = 2
max_docs = 12
fonduer.init_logging(
format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s",
level=logging.INFO,
)
session = database_session
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session,
parallelism=PARALLEL,
structural=True,
lingual=True,
visual_parser=PdfVisualParser(pdf_path),
)
corpus_parser.apply(doc_preprocessor)
assert session.query(Document).count() == max_docs
num_docs = session.query(Document).count()
logger.info(f"Docs: {num_docs}")
assert num_docs == max_docs
num_sentences = session.query(Sentence).count()
logger.info(f"Sentences: {num_sentences}")
# Divide into test and train
docs = sorted(corpus_parser.get_documents())
last_docs = sorted(corpus_parser.get_last_documents())
ld = len(docs)
assert ld == len(last_docs)
assert len(docs[0].sentences) == len(last_docs[0].sentences)
train_docs = set()
dev_docs = set()
test_docs = set()
splits = (0.5, 0.75)
data = [(doc.name, doc) for doc in docs]
data.sort(key=lambda x: x[0])
for i, (doc_name, doc) in enumerate(data):
if i < splits[0] * ld:
train_docs.add(doc)
elif i < splits[1] * ld:
dev_docs.add(doc)
else:
test_docs.add(doc)
logger.info([x.name for x in train_docs])
# NOTE: With multi-relation support, return values of getting candidates,
# mentions, or sparse matrices are formatted as a list of lists. This means
# that with a single relation, we need to index into the list of lists to
# get the candidates/mentions/sparse matrix for a particular relation or
# mention.
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_ngrams, volt_ngrams],
[part_matcher, temp_matcher, volt_matcher],
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert len(mention_extractor.get_mentions()) == 3
assert len(mention_extractor.get_mentions(docs)) == 3
# Candidate Extraction
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, volt_throttler]
)
for i, docs in enumerate([train_docs, dev_docs, test_docs]):
candidate_extractor.apply(docs, split=i, parallelism=PARALLEL)
# Grab candidate lists
train_cands = candidate_extractor.get_candidates(split=0, sort=True)
dev_cands = candidate_extractor.get_candidates(split=1, sort=True)
test_cands = candidate_extractor.get_candidates(split=2, sort=True)
# Candidate lists should be deterministically sorted.
assert (
"112823::implicit_span_mention:11059:11065:part_expander:0"
== train_cands[0][0][0].context.get_stable_id()
)
assert (
"112823::implicit_span_mention:2752:2754:temp_expander:0"
== train_cands[0][0][1].context.get_stable_id()
)
assert len(train_cands) == 2
assert len(candidate_extractor.get_candidates(docs)) == 2
# Featurization
featurizer = Featurizer(session, [PartTemp, PartVolt])
# Test that FeatureKey is properly reset
featurizer.apply(split=1, train=True, parallelism=PARALLEL)
assert session.query(Feature).count() == 214
num_feature_keys = session.query(FeatureKey).count()
assert num_feature_keys == 1278
# Test Dropping FeatureKey
# Should force a row deletion
featurizer.drop_keys(["BASIC_e0_CONTAINS_WORDS_[BC182]"])
assert session.query(FeatureKey).count() == num_feature_keys - 1
# Should only remove the part_volt as a relation and leave part_temp
assert set(
session.query(FeatureKey)
.filter(FeatureKey.name == "DDL_e0_LEMMA_SEQ_[bc182]")
.one()
.candidate_classes
) == {"part_temp", "part_volt"}
featurizer.drop_keys(["DDL_e0_LEMMA_SEQ_[bc182]"], candidate_classes=[PartVolt])
assert session.query(FeatureKey).filter(
FeatureKey.name == "DDL_e0_LEMMA_SEQ_[bc182]"
).one().candidate_classes == ["part_temp"]
assert session.query(FeatureKey).count() == num_feature_keys - 1
# Inserting the removed key
featurizer.upsert_keys(
["DDL_e0_LEMMA_SEQ_[bc182]"], candidate_classes=[PartTemp, PartVolt]
)
assert set(
session.query(FeatureKey)
.filter(FeatureKey.name == "DDL_e0_LEMMA_SEQ_[bc182]")
.one()
.candidate_classes
) == {"part_temp", "part_volt"}
assert session.query(FeatureKey).count() == num_feature_keys - 1
# Removing the key again
featurizer.drop_keys(["DDL_e0_LEMMA_SEQ_[bc182]"], candidate_classes=[PartVolt])
# Removing the last relation from a key should delete the row
featurizer.drop_keys(["DDL_e0_LEMMA_SEQ_[bc182]"], candidate_classes=[PartTemp])
assert session.query(FeatureKey).count() == num_feature_keys - 2
session.query(Feature).delete(synchronize_session="fetch")
session.query(FeatureKey).delete(synchronize_session="fetch")
featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# the number of Features should equals to the total number of train candidates
num_features = session.query(Feature).count()
assert num_features == len(train_cands[0]) + len(train_cands[1])
num_feature_keys = session.query(FeatureKey).count()
assert num_feature_keys == 4629
F_train = featurizer.get_feature_matrices(train_cands)
assert F_train[0].shape == (len(train_cands[0]), num_feature_keys)
assert F_train[1].shape == (len(train_cands[1]), num_feature_keys)
assert len(featurizer.get_keys()) == num_feature_keys
featurizer.apply(split=1, parallelism=PARALLEL)
# the number of Features should increate by the total number of dev candidates
num_features += len(dev_cands[0]) + len(dev_cands[1])
assert session.query(Feature).count() == num_features
assert session.query(FeatureKey).count() == num_feature_keys
F_dev = featurizer.get_feature_matrices(dev_cands)
assert F_dev[0].shape == (len(dev_cands[0]), num_feature_keys)
assert F_dev[1].shape == (len(dev_cands[1]), num_feature_keys)
featurizer.apply(split=2, parallelism=PARALLEL)
# the number of Features should increate by the total number of test candidates
num_features += len(test_cands[0]) + len(test_cands[1])
assert session.query(Feature).count() == num_features
assert session.query(FeatureKey).count() == num_feature_keys
F_test = featurizer.get_feature_matrices(test_cands)
assert F_test[0].shape == (len(test_cands[0]), num_feature_keys)
assert F_test[1].shape == (len(test_cands[1]), num_feature_keys)
gold_file = "tests/data/hardware_tutorial_gold.csv"
labeler = Labeler(session, [PartTemp, PartVolt])
# This should raise an error, since gold labels are not yet loaded.
with pytest.raises(ValueError):
_ = labeler.get_gold_labels(train_cands, annotator="gold")
labeler.apply(
docs=last_docs,
lfs=[[gold], [gold]],
table=GoldLabel,
train=True,
parallelism=PARALLEL,
)
# All candidates should now be gold-labeled.
assert session.query(GoldLabel).count() == session.query(Candidate).count()
stg_temp_lfs = [
LF_storage_row,
LF_operating_row,
LF_temperature_row,
LF_tstg_row,
LF_to_left,
LF_negative_number_left,
]
ce_v_max_lfs = [
LF_bad_keywords_in_row,
LF_current_in_row,
LF_non_ce_voltages_in_row,
]
with pytest.raises(ValueError):
labeler.apply(split=0, lfs=stg_temp_lfs, train=True, parallelism=PARALLEL)
labeler.apply(
docs=train_docs,
lfs=[stg_temp_lfs, ce_v_max_lfs],
train=True,
parallelism=PARALLEL,
)
assert session.query(Label).count() == len(train_cands[0]) + len(train_cands[1])
num_label_keys = session.query(LabelKey).count()
assert num_label_keys == 9
L_train = labeler.get_label_matrices(train_cands)
assert L_train[0].shape == (len(train_cands[0]), num_label_keys)
assert L_train[1].shape == (len(train_cands[1]), num_label_keys)
assert len(labeler.get_keys()) == num_label_keys
# Test Dropping LabelerKey
labeler.drop_keys(["LF_storage_row"])
assert len(labeler.get_keys()) == num_label_keys - 1
# Test Upserting LabelerKey
labeler.upsert_keys(["LF_storage_row"])
assert "LF_storage_row" in [label.name for label in labeler.get_keys()]
L_train_gold = labeler.get_gold_labels(train_cands)
assert L_train_gold[0].shape == (len(train_cands[0]), 1)
L_train_gold = labeler.get_gold_labels(train_cands, annotator="gold")
assert L_train_gold[0].shape == (len(train_cands[0]), 1)
label_model = LabelModel(cardinality=2)
label_model.fit(L_train=L_train[0], n_epochs=500, seed=1234, log_freq=100)
train_marginals = label_model.predict_proba(L_train[0])
# Collect word counter
word_counter = collect_word_counter(train_cands)
emmental.init(fonduer.Meta.log_path)
# Training config
config = {
"meta_config": {"verbose": False},
"model_config": {"model_path": None, "device": 0, "dataparallel": False},
"learner_config": {
"n_epochs": 5,
"optimizer_config": {"lr": 0.001, "l2": 0.0},
"task_scheduler": "round_robin",
},
"logging_config": {
"evaluation_freq": 1,
"counter_unit": "epoch",
"checkpointing": False,
"checkpointer_config": {
"checkpoint_metric": {f"{ATTRIBUTE}/{ATTRIBUTE}/train/loss": "min"},
"checkpoint_freq": 1,
"checkpoint_runway": 2,
"clear_intermediate_checkpoints": True,
"clear_all_checkpoints": True,
},
},
}
emmental.Meta.update_config(config=config)
# Generate word embedding module
arity = 2
# Geneate special tokens
specials = []
for i in range(arity):
specials += [f"~~[[{i}", f"{i}]]~~"]
emb_layer = EmbeddingModule(
word_counter=word_counter, word_dim=300, specials=specials
)
diffs = train_marginals.max(axis=1) - train_marginals.min(axis=1)
train_idxs = np.where(diffs > 1e-6)[0]
train_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE,
train_cands[0],
F_train[0],
emb_layer.word2id,
train_marginals,
train_idxs,
),
split="train",
batch_size=100,
shuffle=True,
)
tasks = create_task(
ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model="LogisticRegression"
)
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader])
test_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE, test_cands[0], F_test[0], emb_layer.word2id, 2
),
split="test",
batch_size=100,
shuffle=False,
)
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.6)
true_pred = [test_cands[0][_] for _ in positive[0]]
pickle_file = "tests/data/parts_by_doc_dict.pkl"
with open(pickle_file, "rb") as f:
parts_by_doc = pickle.load(f)
(TP, FP, FN) = entity_level_f1(
true_pred, gold_file, ATTRIBUTE, test_docs, parts_by_doc=parts_by_doc
)
tp_len = len(TP)
fp_len = len(FP)
fn_len = len(FN)
prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info(f"prec: {prec}")
logger.info(f"rec: {rec}")
logger.info(f"f1: {f1}")
assert f1 < 0.7 and f1 > 0.3
stg_temp_lfs_2 = [
LF_to_left,
LF_test_condition_aligned,
LF_collector_aligned,
LF_current_aligned,
LF_voltage_row_temp,
LF_voltage_row_part,
LF_typ_row,
LF_complement_left_row,
LF_too_many_numbers_row,
LF_temp_on_high_page_num,
LF_temp_outside_table,
LF_not_temp_relevant,
]
labeler.update(split=0, lfs=[stg_temp_lfs_2, ce_v_max_lfs], parallelism=PARALLEL)
assert session.query(Label).count() == len(train_cands[0]) + len(train_cands[1])
num_label_keys = session.query(LabelKey).count()
assert num_label_keys == 16
L_train = labeler.get_label_matrices(train_cands)
assert L_train[0].shape == (len(train_cands[0]), num_label_keys)
label_model = LabelModel(cardinality=2)
label_model.fit(L_train=L_train[0], n_epochs=500, seed=1234, log_freq=100)
train_marginals = label_model.predict_proba(L_train[0])
diffs = train_marginals.max(axis=1) - train_marginals.min(axis=1)
train_idxs = np.where(diffs > 1e-6)[0]
train_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE,
train_cands[0],
F_train[0],
emb_layer.word2id,
train_marginals,
train_idxs,
),
split="train",
batch_size=100,
shuffle=True,
)
valid_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE,
train_cands[0],
F_train[0],
emb_layer.word2id,
np.argmax(train_marginals, axis=1),
train_idxs,
),
split="valid",
batch_size=100,
shuffle=False,
)
# Testing STL LogisticRegression
emmental.Meta.reset()
emmental.init(fonduer.Meta.log_path)
emmental.Meta.update_config(config=config)
tasks = create_task(
ATTRIBUTE,
2,
F_train[0].shape[1],
2,
emb_layer,
model="LogisticRegression",
mode="STL",
)
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader, valid_dataloader])
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_pred = [test_cands[0][_] for _ in positive[0]]
(TP, FP, FN) = entity_level_f1(
true_pred, gold_file, ATTRIBUTE, test_docs, parts_by_doc=parts_by_doc
)
tp_len = len(TP)
fp_len = len(FP)
fn_len = len(FN)
prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info(f"prec: {prec}")
logger.info(f"rec: {rec}")
logger.info(f"f1: {f1}")
assert f1 > 0.7
# Testing STL LSTM
emmental.Meta.reset()
emmental.init(fonduer.Meta.log_path)
emmental.Meta.update_config(config=config)
tasks = create_task(
ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model="LSTM", mode="STL"
)
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader])
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_pred = [test_cands[0][_] for _ in positive[0]]
(TP, FP, FN) = entity_level_f1(
true_pred, gold_file, ATTRIBUTE, test_docs, parts_by_doc=parts_by_doc
)
tp_len = len(TP)
fp_len = len(FP)
fn_len = len(FN)
prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info(f"prec: {prec}")
logger.info(f"rec: {rec}")
logger.info(f"f1: {f1}")
assert f1 > 0.7
# Testing MTL LogisticRegression
emmental.Meta.reset()
emmental.init(fonduer.Meta.log_path)
emmental.Meta.update_config(config=config)
tasks = create_task(
ATTRIBUTE,
2,
F_train[0].shape[1],
2,
emb_layer,
model="LogisticRegression",
mode="MTL",
)
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader, valid_dataloader])
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_pred = [test_cands[0][_] for _ in positive[0]]
(TP, FP, FN) = entity_level_f1(
true_pred, gold_file, ATTRIBUTE, test_docs, parts_by_doc=parts_by_doc
)
tp_len = len(TP)
fp_len = len(FP)
fn_len = len(FN)
prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info(f"prec: {prec}")
logger.info(f"rec: {rec}")
logger.info(f"f1: {f1}")
assert f1 > 0.7
# Testing LSTM
emmental.Meta.reset()
emmental.init(fonduer.Meta.log_path)
emmental.Meta.update_config(config=config)
tasks = create_task(
ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model="LSTM", mode="MTL"
)
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader])
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_pred = [test_cands[0][_] for _ in positive[0]]
(TP, FP, FN) = entity_level_f1(
true_pred, gold_file, ATTRIBUTE, test_docs, parts_by_doc=parts_by_doc
)
tp_len = len(TP)
fp_len = len(FP)
fn_len = len(FN)
prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info(f"prec: {prec}")
logger.info(f"rec: {rec}")
logger.info(f"f1: {f1}")
assert f1 > 0.7
| fonduer-master | tests/e2e/test_e2e.py |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
exec(open("../src/fonduer/_version.py").read())
sys.path.insert(0, os.path.abspath("../src/"))
autodoc_mock_imports = [
"bs4",
"cloudpickle",
"editdistance",
"emmental",
"lxml",
"mlflow",
"numpy",
"pandas",
"scipy",
"spacy",
"snorkel",
"torch",
"tqdm",
"treedlib",
"wand",
"yaml",
]
# -- Project information -----------------------------------------------------
project = "Fonduer"
copyright = "2018, HazyResearch"
author = "HazyResearch"
# The short X.Y version
version = __version__.split("+")[0]
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx_autodoc_typehints",
"sphinxcontrib.mermaid",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["static"]
html_logo = "static/img/fonduer.svg"
html_theme_options = {"logo_only": True, "display_version": False}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Fonduerdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "Fonduer.tex", "Fonduer Documentation", "HazyResearch", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "fonduer", "Fonduer Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Fonduer",
"Fonduer Documentation",
author,
"Fonduer",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
autoclass_content = "both"
| fonduer-master | docs/conf.py |
"""Fonduer version."""
__version__ = "0.9.0+dev"
| fonduer-master | src/fonduer/_version.py |
"""Fonduer package."""
from fonduer._version import __version__
from fonduer.meta import Meta, init_logging
__all__ = ["__version__", "Meta", "init_logging"]
| fonduer-master | src/fonduer/__init__.py |
"""Fonduer meta class."""
import logging
import os
import tempfile
from builtins import object
from datetime import datetime
from typing import Any, Optional, Type
from urllib.parse import urlparse
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
logger = logging.getLogger(__name__)
def init_logging(
log_dir: str = tempfile.gettempdir(),
format: str = "[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s",
level: int = logging.INFO,
) -> None:
"""Configure logging to output to the provided log_dir.
Will use a nested directory whose name is the current timestamp.
:param log_dir: The directory to store logs in.
:param format: The logging format string to use.
:param level: The logging level to use, e.g., logging.INFO.
"""
if not Meta.log_path:
# Generate a new directory using the log_dir, if it doesn't exist
dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log_path = os.path.join(log_dir, dt)
if not os.path.exists(log_path):
os.makedirs(log_path)
# Configure the logger using the provided path
logging.basicConfig(
format=format,
level=level,
handlers=[
logging.FileHandler(os.path.join(log_path, "fonduer.log")),
logging.StreamHandler(),
],
)
# Notify user of log location
logger.info(f"Setting logging directory to: {log_path}")
Meta.log_path = log_path
else:
logger.info(
f"Logging was already initialized to use {Meta.log_path}. "
"To configure logging manually, call fonduer.init_logging before "
"initialiting Meta."
)
# Defines procedure for setting up a sessionmaker
def new_sessionmaker() -> sessionmaker:
"""Create new sessionmaker.
Turning on autocommit for Postgres, see
http://oddbird.net/2014/06/14/sqlalchemy-postgres-autocommit/.
Otherwise performance suffers with multiple notebooks/processes/etc due to lock
contention on the tables.
"""
try:
engine = create_engine(
Meta.conn_string,
client_encoding="utf8",
executemany_mode="batch",
isolation_level="AUTOCOMMIT",
)
except sqlalchemy.exc.OperationalError as e:
raise ValueError(
f"{e}\n"
f"To resolve this error, check our FAQs at: "
f"https://fonduer.readthedocs.io/en/latest/user/faqs.html"
)
except Exception as e:
raise ValueError(
f"{e}\n"
f"Meta variables have not been initialized with "
f"a valid postgres connection string.\n"
f"Use the form: "
f"postgresql://<user>:<pw>@<host>:<port>/<database_name>"
)
# New sessionmaker
return sessionmaker(bind=engine)
def _update_meta(conn_string: str) -> None:
"""Update Meta class."""
url = urlparse(conn_string)
Meta.conn_string = conn_string
Meta.DBNAME = url.path[1:]
Meta.DBUSER = url.username
Meta.DBPWD = url.password
Meta.DBHOST = url.hostname
Meta.DBPORT = url.port
Meta.postgres = url.scheme.startswith("postgresql")
class Meta(object):
"""Singleton-like metadata class for all global variables.
Adapted from the Unique Design Pattern:
https://stackoverflow.com/questions/1318406/why-is-the-borg-pattern-better-than-the-singleton-pattern-in-python
"""
# Static class variables
conn_string: Optional[str] = None
DBNAME: Optional[str] = None
DBUSER: Optional[str] = None
DBHOST: Optional[str] = None
DBPORT: Optional[int] = None
DBPWD: Optional[str] = None
Session = None
engine = None
Base: Any = declarative_base(name="Base", cls=object)
postgres = False
log_path: Optional[str] = None
@classmethod
def init(cls, conn_string: Optional[str] = None) -> Type["Meta"]:
"""Return the unique Meta class."""
if conn_string:
_update_meta(conn_string)
# We initialize the engine within the models module because models'
# schema can depend on which data types are supported by the engine
Meta.Session = new_sessionmaker()
Meta.engine = Meta.Session.kw["bind"]
logger.info(
f"Connecting user:{Meta.DBUSER} "
f"to {Meta.DBHOST}:{Meta.DBPORT}/{Meta.DBNAME}"
)
Meta._init_db()
if not Meta.log_path:
init_logging()
return cls
@classmethod
def _init_db(cls) -> None:
"""Initialize the storage schema.
This call must be performed after all classes that extend
Base are declared to ensure the storage schema is initialized.
"""
# This list of import defines which SQLAlchemy classes will be
# initialized when Meta.init() is called. If a sqlalchemy class is not
# imported before the call to create_all(), it will not be created.
import fonduer.candidates.models # noqa
import fonduer.features.models # noqa
import fonduer.learning.models # noqa
import fonduer.parser.models # noqa
import fonduer.supervision.models # noqa
import fonduer.utils.models # noqa
logger.info("Initializing the storage schema")
Meta.Base.metadata.create_all(Meta.engine)
| fonduer-master | src/fonduer/meta.py |
"""Fonduer's candidate module."""
from fonduer.candidates.candidates import CandidateExtractor
from fonduer.candidates.mentions import (
MentionCaptions,
MentionCells,
MentionDocuments,
MentionExtractor,
MentionFigures,
MentionNgrams,
MentionParagraphs,
MentionSections,
MentionSentences,
MentionTables,
)
__all__ = [
"CandidateExtractor",
"MentionCaptions",
"MentionCells",
"MentionDocuments",
"MentionExtractor",
"MentionFigures",
"MentionNgrams",
"MentionParagraphs",
"MentionSections",
"MentionSentences",
"MentionTables",
]
| fonduer-master | src/fonduer/candidates/__init__.py |
"""Fonduer mention."""
import logging
import re
from builtins import map, range
from typing import Any, Collection, Dict, Iterable, Iterator, List, Optional, Set, Union
from sqlalchemy.orm import Session
from fonduer.candidates.matchers import _Matcher
from fonduer.candidates.models import Candidate, Mention
from fonduer.candidates.models.candidate import candidate_subclasses
from fonduer.candidates.models.caption_mention import TemporaryCaptionMention
from fonduer.candidates.models.cell_mention import TemporaryCellMention
from fonduer.candidates.models.document_mention import TemporaryDocumentMention
from fonduer.candidates.models.figure_mention import TemporaryFigureMention
from fonduer.candidates.models.paragraph_mention import TemporaryParagraphMention
from fonduer.candidates.models.section_mention import TemporarySectionMention
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.candidates.models.table_mention import TemporaryTableMention
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Context, Document, Sentence
from fonduer.utils.udf import UDF, UDFRunner
from fonduer.utils.utils import get_dict_of_stable_id
logger = logging.getLogger(__name__)
class MentionSpace(object):
"""Define the **space** of Mention objects.
Calling *apply(x)* given an object *x* returns a generator over mentions in
*x*.
"""
def __init__(self) -> None:
"""Initialize mention space."""
pass
def apply(self, x: Context) -> Iterator[TemporaryContext]:
"""Apply function takes a Context and return a mention generator.
:param x: The input Context.
:yield: The mention generator.
"""
raise NotImplementedError()
class Ngrams(MentionSpace):
"""Define the space of Mentions as all n-grams in a Sentence.
Define the space of Mentions as all n-grams (n_min <= n <= n_max) in a
Sentence *x*, indexing by **character offset**.
:param n_min: Lower limit for the generated n_grams.
:param n_max: Upper limit for the generated n_grams.
:param split_tokens: Tokens, on which unigrams are split into two separate
unigrams.
:type split_tokens: tuple, list of str.
"""
def __init__(
self, n_min: int = 1, n_max: int = 5, split_tokens: Collection[str] = []
) -> None:
"""Initialize Ngrams."""
MentionSpace.__init__(self)
self.n_min = n_min
self.n_max = n_max
self.split_rgx = (
r"(" + r"|".join(map(re.escape, sorted(split_tokens, reverse=True))) + r")"
if split_tokens and len(split_tokens) > 0
else None
)
def apply(self, context: Sentence) -> Iterator[TemporarySpanMention]:
"""Apply function takes a Sentence and return a mention generator.
:param x: The input Sentence.
:yield: The mention generator.
"""
# These are the character offset--**relative to the sentence
# start**--for each _token_
offsets = context.char_offsets
# Loop over all n-grams in **reverse** order (to facilitate
# longest-match semantics)
L = len(offsets)
seen: Set[TemporarySpanMention] = set()
for j in range(self.n_min, self.n_max + 1)[::-1]:
for i in range(L - j + 1):
w = context.words[i + j - 1]
start = offsets[i]
end = offsets[i + j - 1] + len(w) - 1
ts = TemporarySpanMention(
char_start=start, char_end=end, sentence=context
)
if ts not in seen:
seen.add(ts)
yield ts
# Check for split
if (
j == 1
and self.n_max >= 1
and self.n_min <= 1
and self.split_rgx is not None
and end - start > 0
):
text = context.text[start - offsets[0] : end - offsets[0] + 1]
start_idxs = [0]
end_idxs = []
for m in re.finditer(self.split_rgx, text):
start_idxs.append(m.end())
end_idxs.append(m.start())
end_idxs.append(len(text))
for start_idx in start_idxs:
for end_idx in end_idxs:
if start_idx < end_idx:
ts = TemporarySpanMention(
char_start=start_idx,
char_end=end_idx - 1,
sentence=context,
)
if ts not in seen and ts.get_span():
seen.add(ts)
yield ts
class MentionNgrams(Ngrams):
"""Defines the **space** of Mentions as n-grams in a Document.
Defines the space of Mentions as all n-grams (n_min <= n <= n_max) in a
Document *x*, divided into Sentences inside of html elements (such as table
cells).
:param n_min: Lower limit for the generated n_grams.
:param n_max: Upper limit for the generated n_grams.
:param split_tokens: Tokens, on which unigrams are split into two separate
unigrams.
:type split_tokens: tuple, list of str.
"""
def __init__(
self, n_min: int = 1, n_max: int = 5, split_tokens: Collection[str] = []
) -> None:
"""Initialize MentionNgrams."""
Ngrams.__init__(self, n_min=n_min, n_max=n_max, split_tokens=split_tokens)
def apply(self, doc: Document) -> Iterator[TemporarySpanMention]:
"""Generate MentionNgrams from a Document by parsing all of its Sentences.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionNgrams.apply() must be of type Document"
)
for sentence in doc.sentences:
for ts in Ngrams.apply(self, sentence):
yield ts
class MentionFigures(MentionSpace):
"""Defines the space of Mentions as all figures in a Document *x*.
:param types: If specified, only yield TemporaryFigureMentions whose url ends in
one of the specified types. Example: types=["png", "jpg", "jpeg"].
:type types: list, tuple of str
"""
def __init__(self, types: Optional[str] = None) -> None:
"""Initialize MentionFigures."""
MentionSpace.__init__(self)
if types is not None:
self.types = [t.strip().lower() for t in types]
else:
self.types = None
def apply(self, doc: Document) -> Iterator[TemporaryFigureMention]:
"""
Generate MentionFigures from a Document by parsing all of its Figures.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionFigures.apply() must be of type Document"
)
for figure in doc.figures:
if self.types is None or any(
figure.url.lower().endswith(type) for type in self.types
):
yield TemporaryFigureMention(figure)
class MentionSentences(MentionSpace):
"""Defines the space of Mentions as all sentences in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionSentences."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporarySpanMention]:
"""
Generate MentionSentences from a Document by parsing all of its Sentences.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionSentences.apply() must be of type Document"
)
for sentence in doc.sentences:
yield TemporarySpanMention(
char_start=0, char_end=len(sentence.text) - 1, sentence=sentence
)
class MentionParagraphs(MentionSpace):
"""Defines the space of Mentions as all paragraphs in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionParagraphs."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporaryParagraphMention]:
"""
Generate MentionParagraphs from a Document by parsing all of its Paragraphs.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionParagraphs.apply() must be of type Document"
)
for paragraph in doc.paragraphs:
yield TemporaryParagraphMention(paragraph)
class MentionCaptions(MentionSpace):
"""Defines the space of Mentions as all captions in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionCaptions."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporaryCaptionMention]:
"""
Generate MentionCaptions from a Document by parsing all of its Captions.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionCaptions.apply() must be of type Document"
)
for caption in doc.captions:
yield TemporaryCaptionMention(caption)
class MentionCells(MentionSpace):
"""Defines the space of Mentions as all cells in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionCells."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporaryCellMention]:
"""
Generate MentionCells from a Document by parsing all of its Cells.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionCells.apply() must be of type Document"
)
for cell in doc.cells:
yield TemporaryCellMention(cell)
class MentionTables(MentionSpace):
"""Defines the space of Mentions as all tables in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionTables."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporaryTableMention]:
"""
Generate MentionTables from a Document by parsing all of its Tables.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionTables.apply() must be of type Document"
)
for table in doc.tables:
yield TemporaryTableMention(table)
class MentionSections(MentionSpace):
"""Defines the space of Mentions as all sections in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionSections."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporarySectionMention]:
"""
Generate MentionSections from a Document by parsing all of its Sections.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionSections.apply() must be of type Document"
)
for section in doc.sections:
yield TemporarySectionMention(section)
class MentionDocuments(MentionSpace):
"""Defines the space of Mentions as a document in a Document *x*."""
def __init__(self) -> None:
"""Initialize MentionDocuments."""
MentionSpace.__init__(self)
def apply(self, doc: Document) -> Iterator[TemporaryDocumentMention]:
"""
Generate MentionDocuments from a Document by using document.
:param doc: The ``Document`` to parse.
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionDocuments.apply() must be of type Document"
)
yield TemporaryDocumentMention(doc)
class MentionExtractor(UDFRunner):
"""An operator to extract Mention objects from a Context.
:Example:
Assuming we want to extract two types of ``Mentions``, a Part and a
Temperature, and we have already defined Matchers to use::
part_ngrams = MentionNgrams(n_max=3)
temp_ngrams = MentionNgrams(n_max=2)
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
mention_extractor = MentionExtractor(
session,
[Part, Temp],
[part_ngrams, temp_ngrams],
[part_matcher, temp_matcher]
)
:param session: An initialized database session.
:param mention_classes: The type of relation to extract, defined using
:func: fonduer.mentions.mention_subclass.
:param mention_spaces: one or list of :class:`MentionSpace` objects, one for
each relation argument. Defines space of Contexts to consider
:param matchers: one or list of :class:`fonduer.matchers.Matcher` objects,
one for each relation argument. Only tuples of Contexts for which each
element is accepted by the corresponding Matcher will be returned as
Mentions
:param parallelism: The number of processes to use in parallel for calls
to apply().
:raises ValueError: If mention classes, spaces, and matchers are not the
same length.
"""
def __init__(
self,
session: Session,
mention_classes: List[Mention],
mention_spaces: List[MentionSpace],
matchers: List[_Matcher],
parallelism: int = 1,
):
"""Initialize the MentionExtractor."""
super().__init__(
session,
MentionExtractorUDF,
parallelism=parallelism,
mention_classes=mention_classes,
mention_spaces=mention_spaces,
matchers=matchers,
)
# Check that arity is same
arity = len(mention_classes)
if not all(
len(x) == arity # type: ignore
for x in [mention_classes, mention_spaces, matchers]
):
raise ValueError(
"Mismatched arity of mention classes, spaces, and matchers."
)
self.mention_classes = mention_classes
def apply( # type: ignore
self,
docs: Collection[Document],
clear: bool = True,
parallelism: Optional[int] = None,
progress_bar: bool = True,
) -> None:
"""Run the MentionExtractor.
:Example: To extract mentions from a set of training documents using
4 cores::
mention_extractor.apply(train_docs, parallelism=4)
:param docs: Set of documents to extract from.
:param clear: Whether or not to clear the existing Mentions
beforehand.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the
MentionExtractor if it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
"""
super().apply(
docs, clear=clear, parallelism=parallelism, progress_bar=progress_bar
)
def clear(self) -> None: # type: ignore
"""Delete Mentions of each class in the extractor from the given split."""
# Create set of candidate_subclasses associated with each mention_subclass
cand_subclasses = set()
for mentions, tablename in [
(_[1][0], _[1][1]) for _ in candidate_subclasses.values()
]:
for mention in mentions:
if mention in self.mention_classes:
cand_subclasses.add(tablename)
# First, clear all the Mentions. This will cascade and remove the
# mention_subclasses and corresponding candidate_subclasses.
for mention_class in self.mention_classes:
logger.info(f"Clearing table: {mention_class.__tablename__}")
self.session.query(Mention).filter_by(
type=mention_class.__tablename__
).delete(synchronize_session="fetch")
# Next, clear the Candidates. This is done manually because we have
# no cascading relationship from candidate_subclass to Candidate.
for cand_subclass in cand_subclasses:
logger.info(f"Cascading to clear table: {cand_subclass}")
self.session.query(Candidate).filter_by(type=cand_subclass).delete(
synchronize_session="fetch"
)
def clear_all(self) -> None:
"""Delete all Mentions from given split the database."""
logger.info("Clearing ALL Mentions.")
self.session.query(Mention).delete(synchronize_session="fetch")
# With no Mentions, there should be no Candidates also
self.session.query(Candidate).delete(synchronize_session="fetch")
logger.info("Cleared ALL Mentions (and Candidates).")
def get_mentions(
self, docs: Union[Document, Iterable[Document], None] = None, sort: bool = False
) -> List[List[Mention]]:
"""Return a list of lists of the mentions associated with this extractor.
Each list of the return will contain the Mentions for one of the
mention classes associated with the MentionExtractor.
:param docs: If provided, return Mentions from these documents. Else,
return all Mentions.
:param sort: If sort is True, then return all Mentions sorted by stable_id.
:return: Mentions for each mention_class.
"""
result = []
if docs:
docs = docs if isinstance(docs, Iterable) else [docs]
# Get cands from all splits
for mention_class in self.mention_classes:
mentions = (
self.session.query(mention_class)
.filter(mention_class.document_id.in_([doc.id for doc in docs]))
.order_by(mention_class.id)
.all()
)
if sort:
mentions = sorted(mentions, key=lambda x: x[0].get_stable_id())
result.append(mentions)
else:
for mention_class in self.mention_classes:
mentions = (
self.session.query(mention_class).order_by(mention_class.id).all()
)
if sort:
mentions = sorted(mentions, key=lambda x: x[0].get_stable_id())
result.append(mentions)
return result
class MentionExtractorUDF(UDF):
"""UDF for performing mention extraction."""
def __init__(
self,
mention_classes: Union[Mention, List[Mention]],
mention_spaces: Union[MentionSpace, List[MentionSpace]],
matchers: Union[_Matcher, List[_Matcher]],
**kwargs: Any,
):
"""Initialize the MentionExtractorUDF."""
self.mention_classes = (
mention_classes
if isinstance(mention_classes, (list, tuple))
else [mention_classes]
)
self.mention_spaces = (
mention_spaces
if isinstance(mention_spaces, (list, tuple))
else [mention_spaces]
)
self.matchers = matchers if isinstance(matchers, (list, tuple)) else [matchers]
# Preallocates internal data structure
self.child_context_set: Set[TemporaryContext] = set()
super().__init__(**kwargs)
def apply(self, doc: Document, **kwargs: Any) -> Document:
"""Extract mentions from the given Document.
:param doc: A document to process.
"""
# Get a dict of stable_id of contexts.
dict_of_stable_id: Dict[str, Context] = get_dict_of_stable_id(doc)
# Iterate over each mention class
for i, mention_class in enumerate(self.mention_classes):
# Generate TemporaryContexts that are children of the context using
# the mention_space and filtered by the Matcher
for child_context in self.matchers[i].apply(
self.mention_spaces[i].apply(doc)
):
# Skip if this temporary context is used by this mention class.
stable_id = child_context.get_stable_id()
if hasattr(doc, mention_class.__tablename__ + "s") and any(
[
m.context.stable_id == stable_id
for m in getattr(doc, mention_class.__tablename__ + "s")
]
):
continue
# Re-use a persisted context if exists.
if stable_id in dict_of_stable_id:
context = dict_of_stable_id[stable_id]
# Persist a temporary context.
else:
context_type = child_context._get_table()
context = context_type(child_context)
dict_of_stable_id[stable_id] = context
mention_args = {"document": doc, "context": context}
# Add Mention to session
mention_class(**mention_args)
return doc
| fonduer-master | src/fonduer/candidates/mentions.py |
"""Fonduer matcher."""
import re
from typing import Iterator, Set
from fonduer.candidates.models.figure_mention import TemporaryFigureMention
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.candidates.models.temporary_context import TemporaryContext
WORDS = "words"
class _Matcher(object):
"""Matcher class.
Apply a function ``f : m -> {True, False}`` to a generator of mentions,
returning only mentions *m* s.t. *f(m) == True*,
where f can be compositionally defined.
"""
def __init__(self, *children, **opts): # type: ignore
self.children = children
self.opts = opts
self.longest_match_only = self.opts.get("longest_match_only", True)
self.init()
self._check_opts()
def init(self) -> None:
pass
def _check_opts(self) -> None:
"""Check for unsupported opts, throws error if found.
NOTE: Must be called _after_ init()
"""
for opt in self.opts.keys():
if opt not in self.__dict__:
raise Exception(f"Unsupported option: {opt}")
def _f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
return True
def f(self, m: TemporaryContext) -> bool:
"""(Recursively) Composed version of filter function f.
By default, returns logical **conjunction** of operator and single
child operator
"""
if len(self.children) == 0:
return self._f(m)
elif len(self.children) == 1:
return self._f(m) and self.children[0].f(m)
else:
raise Exception(
f"{self.__class__.__name__} does not support two or more child Matcher"
)
def apply(self, mentions: Iterator[TemporaryContext]) -> Iterator[TemporaryContext]:
"""Apply the Matcher to a **generator** of mentions.
Optionally only takes the longest match (NOTE: assumes this is the
*first* match)
"""
seen_mentions: Set[TemporaryContext] = set()
for m in mentions:
if self.f(m) and (
not self.longest_match_only or not any([m in s for s in seen_mentions])
):
if self.longest_match_only:
seen_mentions.add(m)
yield m
class DictionaryMatch(_Matcher):
"""Select mention Ngrams that match against a given list *d*.
:param d: A list of strings representing a dictionary.
:type d: list of str
:param ignore_case: Whether to ignore the case when matching. Default True.
:type ignore_case: bool
:param inverse: Whether to invert the results (e.g., return those which are
not in the list). Default False.
:type inverse: bool
:param stemmer: Optionally provide a stemmer to preprocess the dictionary.
Can be any object which has a ``stem(str) -> str`` method
like ``PorterStemmer()``. Default None.
"""
def init(self) -> None:
"""Initialize the dictionary matcher."""
self.ignore_case = self.opts.get("ignore_case", True)
self.attrib = self.opts.get("attrib", WORDS)
self.inverse = self.opts.get("inverse", False)
try:
self.d = frozenset(
w.lower() if self.ignore_case else w for w in self.opts["d"]
)
except KeyError:
raise Exception("Please supply a dictionary (list of strings) d as d=d.")
# Optionally use a stemmer, preprocess the dictionary
# Note that user can provide *an object having a stem() method*
self.stemmer = self.opts.get("stemmer", None)
if self.stemmer:
self.d = frozenset(self._stem(w) for w in list(self.d))
def _stem(self, w: str) -> str:
"""Apply stemmer, handling encoding errors."""
try:
return self.stemmer.stem(w)
except UnicodeDecodeError:
return w
def _f(self, m: TemporaryContext) -> bool:
if not isinstance(m, TemporarySpanMention):
raise ValueError(
f"{self.__class__.__name__} only supports TemporarySpanMention"
)
p = m.get_attrib_span(self.attrib)
p = p.lower() if self.ignore_case else p
p = self._stem(p) if self.stemmer is not None else p
return (not self.inverse) if p in self.d else self.inverse
class LambdaFunctionMatcher(_Matcher):
"""Select ``Ngrams`` that return True when fed to a function f.
:param func: The function to evaluate with a signature of ``f: m -> {True, False}``,
where ``m`` denotes a mention. More precisely, ``m`` is an instance of child
class of :class:`TemporaryContext`, depending on which :class:`MentionSpace` is
used. E.g., :class:`TemporarySpanMention` when :class:`MentionNgrams` is used.
:type func: function
:param longest_match_only: Whether to only return the longest span matched,
rather than all spans. Default False.
:type longest_match_only: bool
"""
def init(self) -> None:
"""Initialize the lambda function matcher."""
self.attrib = self.opts.get("attrib", WORDS)
# Set longest match only to False by default.
self.longest_match_only = self.opts.get("longest_match_only", False)
try:
self.func = self.opts["func"]
except KeyError:
raise Exception("Please supply a function f as func=f.")
def _f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
if not isinstance(m, TemporarySpanMention):
raise ValueError(
f"{self.__class__.__name__} only supports TemporarySpanMention"
)
return self.func(m)
class Union(_Matcher):
"""Take the union of mention sets returned by the provided ``Matchers``.
:param longest_match_only: If True, only return the longest match. Default True.
Overrides longest_match_only of its child ``Matchers``.
:type longest_match_only: bool
"""
def f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
for child in self.children:
if child.f(m):
return True
return False
class Intersect(_Matcher):
"""Take the intersection of mention sets returned by the provided ``Matchers``.
:param longest_match_only: If True, only return the longest match. Default True.
Overrides longest_match_only of its child ``Matchers``.
:type longest_match_only: bool
"""
def f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
for child in self.children:
if not child.f(m):
return False
return True
class Inverse(_Matcher):
"""Return the opposite result of ifs child ``Matcher``.
:raises ValueError: If more than one Matcher is provided.
:param longest_match_only: If True, only return the longest match. Default True.
Overrides longest_match_only of its child ``Matchers``.
:type longest_match_only: bool
"""
def __init__(self, *children, **opts): # type: ignore
"""Initialize inverse matcher."""
if not len(children) == 1:
raise ValueError("Provide a single Matcher.")
super().__init__(*children, **opts)
def f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
child = self.children[0]
return not child.f(m)
class Concat(_Matcher):
"""Concatenate mentions generated by Matchers.
Select mentions which are the concatenation of adjacent matches from child
operators.
:Example:
A concatenation of a NumberMatcher and PersonMatcher could match on
a span of text like "10 Obama".
:param permutations: Default False.
:type permutations: bool
:param left_required: Whether or not to require the left child to match.
Default True.
:type left_required: bool
:param right_required: Whether or not to require the right child to match.
Default True.
:type right_required: bool
:param ignore_sep: Whether or not to ignore the separator. Default True.
:type ignore_sep: bool
:param sep: If not ignoring the separator, specify which separator to look
for. Default sep=" ".
:type set: str
:raises ValueError: If Concat is not provided with two child matcher
objects.
.. note:: Currently slices on **word index** and considers concatenation
along these divisions only.
"""
def init(self) -> None:
"""Initialize concatenate matcher."""
self.permutations = self.opts.get("permutations", False)
self.left_required = self.opts.get("left_required", True)
self.right_required = self.opts.get("right_required", True)
self.ignore_sep = self.opts.get("ignore_sep", True)
self.sep = self.opts.get("sep", " ")
def f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
if not isinstance(m, TemporarySpanMention):
raise ValueError(
f"{self.__class__.__name__} only supports TemporarySpanMention"
)
if len(self.children) != 2:
raise ValueError("Concat takes two child Matcher objects as arguments.")
if not self.left_required and self.children[1].f(m):
return True
if not self.right_required and self.children[0].f(m):
return True
# Iterate over mention splits **at the word boundaries**
for wsplit in range(m.get_word_start_index() + 1, m.get_word_end_index() + 1):
csplit = (
m._word_to_char_index(wsplit) - m.char_start
) # NOTE the switch to **mention-relative** char index
# Optionally check for specific separator
if self.ignore_sep or m.get_span()[csplit - 1] == self.sep:
m1 = m[: csplit - len(self.sep)]
m2 = m[csplit:]
if self.children[0].f(m1) and self.children[1].f(m2):
return True
if (
self.permutations
and self.children[1].f(m1)
and self.children[0].f(m2)
):
return True
return False
class _RegexMatch(_Matcher):
"""Regex matcher class.
Select mentions using a regular expression. Does not specify the
semantics of *what* is being matched yet.
"""
def init(self) -> None:
try:
self.rgx = self.opts["rgx"]
except KeyError:
raise Exception("Please supply a regular expression string r as rgx=r.")
self.ignore_case = self.opts.get("ignore_case", True)
self.attrib = self.opts.get("attrib", WORDS)
self.sep = self.opts.get("sep", "")
# Extending the _RegexMatch to handle search(instead of only match)
# and adding a toggle for full span match.
# Default values are set to False and True for search flag and full
# span matching flag respectively.
self.search = self.opts.get("search", False)
self.full_match = self.opts.get("full_match", True)
# Compile regex matcher
# NOTE: Enforce full span matching by ensuring that regex ends with $.
# Group self.rgx first so that $ applies to all components of an 'OR'
# expression. (e.g., we want r'(a|b)$' instead of r'a|b$')
self.rgx = (
self.rgx
if self.rgx.endswith("$") or not self.full_match
else ("(" + self.rgx + ")$")
)
self.r = re.compile(
self.rgx, flags=(re.I if self.ignore_case else 0) | re.UNICODE
)
def _f(self, m: TemporaryContext) -> bool:
raise NotImplementedError()
class RegexMatchSpan(_RegexMatch):
"""Match regex pattern on **full concatenated span**.
:param rgx: The RegEx pattern to use.
:type rgx: str
:param ignore_case: Whether or not to ignore case in the RegEx. Default
True.
:type ignore_case: bool
:param search: If True, *search* the regex pattern through the concatenated span.
If False, try to *match* the regex patten only at its beginning. Default False.
:type search: bool
:param full_match: If True, wrap the provided rgx with ``(<rgx>)$``.
Default True.
:type full_match: bool
:param longest_match_only: If True, only return the longest match. Default True.
Will be overridden by the parent matcher like :class:`Union` when it is wrapped
by :class:`Union`, :class:`Intersect`, or :class:`Inverse`.
:type longest_match_only: bool
"""
def _f(self, m: TemporaryContext) -> bool:
if not isinstance(m, TemporarySpanMention):
raise ValueError(
f"{self.__class__.__name__} only supports TemporarySpanMention"
)
if self.search:
return (
True
if self.r.search(m.get_attrib_span(self.attrib, sep=self.sep))
is not None
else False
)
else:
return (
True
if self.r.match(m.get_attrib_span(self.attrib, sep=self.sep))
is not None
else False
)
class RegexMatchEach(_RegexMatch):
"""Match regex pattern on **each token**.
:param rgx: The RegEx pattern to use.
:type rgx: str
:param ignore_case: Whether or not to ignore case in the RegEx. Default
True.
:type ignore_case: bool
:param full_match: If True, wrap the provided rgx with ``(<rgx>)$``.
Default True.
:type full_match: bool
:param longest_match_only: If True, only return the longest match. Default
True.
:type longest_match_only: bool
"""
def _f(self, m: TemporaryContext) -> bool:
if not isinstance(m, TemporarySpanMention):
raise ValueError(
f"{self.__class__.__name__} only supports TemporarySpanMention"
)
tokens = m.get_attrib_tokens(self.attrib)
return (
True
if tokens and all([self.r.match(t) is not None for t in tokens])
else False
)
class PersonMatcher(RegexMatchEach):
"""
Match Spans that are the names of people, as identified by spaCy.
A convenience class for setting up a RegexMatchEach to match spans
for which each token was tagged as a person (PERSON).
"""
def __init__(self, *children, **kwargs): # type: ignore
"""Initialize person matcher."""
kwargs["attrib"] = "ner_tags"
kwargs["rgx"] = "PERSON"
super().__init__(*children, **kwargs)
class LocationMatcher(RegexMatchEach):
"""
Match Spans that are the names of locations, as identified by spaCy.
A convenience class for setting up a RegexMatchEach to match spans
for which each token was tagged as a location (GPE or LOC).
"""
def __init__(self, *children, **kwargs): # type: ignore
"""Initialize location matcher."""
kwargs["attrib"] = "ner_tags"
kwargs["rgx"] = "GPE|LOC"
super().__init__(*children, **kwargs)
class OrganizationMatcher(RegexMatchEach):
"""
Match Spans that are the names of organizations, as identified by spaCy.
A convenience class for setting up a RegexMatchEach to match spans
for which each token was tagged as an organization (NORG or ORG).
"""
def __init__(self, *children, **kwargs): # type: ignore
"""Initialize organization matcher."""
kwargs["attrib"] = "ner_tags"
kwargs["rgx"] = "NORG|ORG"
super().__init__(*children, **kwargs)
class DateMatcher(RegexMatchEach):
"""
Match Spans that are dates, as identified by spaCy.
A convenience class for setting up a RegexMatchEach to match spans
for which each token was tagged as a date (DATE).
"""
def __init__(self, *children, **kwargs): # type: ignore
"""Initialize date matcher."""
kwargs["attrib"] = "ner_tags"
kwargs["rgx"] = "DATE"
super().__init__(*children, **kwargs)
class NumberMatcher(RegexMatchEach):
"""
Match Spans that are numbers, as identified by spaCy.
A convenience class for setting up a RegexMatchEach to match spans
for which each token was tagged as a number (CARDINAL or QUANTITY).
"""
def __init__(self, *children, **kwargs): # type: ignore
"""Initialize number matcher."""
kwargs["attrib"] = "ner_tags"
kwargs["rgx"] = "CARDINAL|QUANTITY"
super().__init__(*children, **kwargs)
class MiscMatcher(RegexMatchEach):
"""
Match Spans that are miscellaneous named entities, as identified by spaCy.
A convenience class for setting up a RegexMatchEach to match spans
for which each token was tagged as miscellaneous (MISC).
"""
def __init__(self, *children, **kwargs): # type: ignore
"""Initialize miscellaneous matcher."""
kwargs["attrib"] = "ner_tags"
kwargs["rgx"] = "MISC"
super().__init__(*children, **kwargs)
class LambdaFunctionFigureMatcher(_Matcher):
"""Select Figures that return True when fed to a function f.
:param func: The function to evaluate. See :class:`LambdaFunctionMatcher` for
details.
:type func: function
"""
def init(self) -> None:
"""Initialize lambda function figure matcher."""
# Set longest match only to False
self.longest_match_only = False
try:
self.func = self.opts["func"]
except KeyError:
raise Exception("Please supply a function f as func=f.")
def _f(self, m: TemporaryContext) -> bool:
"""Non-composed version of filter function f."""
if not isinstance(m, TemporaryFigureMention):
raise ValueError(
f"{self.__class__.__name__} only supports TemporaryFigureMention"
)
return self.func(m)
class DoNothingMatcher(_Matcher):
"""Matcher class for doing nothing."""
pass
| fonduer-master | src/fonduer/candidates/matchers.py |
"""Fonduer candidate."""
import logging
from builtins import range
from itertools import product
from typing import (
Any,
Callable,
Collection,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
from sqlalchemy.orm import Session
from fonduer.candidates.models import Candidate, Mention
from fonduer.parser.models.document import Document
from fonduer.utils.udf import UDF, UDFRunner
from fonduer.utils.utils import get_set_of_stable_ids
logger = logging.getLogger(__name__)
class CandidateExtractor(UDFRunner):
"""An operator to extract Candidate objects from a Context.
:Example:
Assuming we have already defined a Part and Temp ``Mention`` subclass,
and a throttler called templ_throttler, we can create a candidate
extractor as follows::
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
candidate_extractor = CandidateExtractor(
session, [PartTemp], throttlers=[temp_throttler]
)
:param session: An initialized database session.
:param candidate_classes: The types of relation to extract, defined using
:func:`fonduer.candidates.candidate_subclass`.
:param throttlers: optional functions for filtering out candidates
which returns a Boolean expressing whether or not the candidate should
be instantiated.
:type throttlers: list of throttlers.
:param self_relations: Boolean indicating whether to extract Candidates
that relate the same context. Only applies to binary relations.
:param nested_relations: Boolean indicating whether to extract Candidates
that relate one Context with another that contains it. Only applies to
binary relations.
:param symmetric_relations: Boolean indicating whether to extract symmetric
Candidates, i.e., rel(A,B) and rel(B,A), where A and B are Contexts.
Only applies to binary relations.
:param parallelism: The number of processes to use in parallel for calls
to apply().
:raises ValueError: If throttlers are provided, but a throtters are not the
same length as candidate classes.
"""
def __init__(
self,
session: Session,
candidate_classes: List[Type[Candidate]],
throttlers: Optional[List[Callable[[Tuple[Mention, ...]], bool]]] = None,
self_relations: bool = False,
nested_relations: bool = False,
symmetric_relations: bool = True,
parallelism: int = 1,
) -> None:
"""Set throttlers match candidate_classes if not provide."""
if throttlers is None:
throttlers = [None] * len(candidate_classes)
"""Initialize the CandidateExtractor."""
super().__init__(
session,
CandidateExtractorUDF,
parallelism=parallelism,
candidate_classes=candidate_classes,
throttlers=throttlers,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations,
)
# Check that arity is sensible
if len(candidate_classes) != len(throttlers):
raise ValueError(
"Provided different number of throttlers and candidate classes."
)
self.candidate_classes = candidate_classes
def apply( # type: ignore
self,
docs: Collection[Document],
split: int = 0,
clear: bool = True,
parallelism: Optional[int] = None,
progress_bar: bool = True,
) -> None:
"""Run the CandidateExtractor.
:Example: To extract candidates from a set of training documents using
4 cores::
candidate_extractor.apply(train_docs, split=0, parallelism=4)
:param docs: Set of documents to extract from.
:param split: Which split to assign the extracted Candidates to.
:param clear: Whether or not to clear the existing Candidates
beforehand.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the
CandidateExtractor if it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
"""
super().apply(
docs,
split=split,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
def clear(self, split: int) -> None: # type: ignore
"""Clear Candidates of each class.
Delete Candidates of each class initialized with the CandidateExtractor
from the given split in the database.
:param split: Which split to clear.
"""
for candidate_class in self.candidate_classes:
logger.info(
f"Clearing table {candidate_class.__tablename__} (split {split})"
)
self.session.query(Candidate).filter(
Candidate.type == candidate_class.__tablename__
).filter(Candidate.split == split).delete(synchronize_session="fetch")
def clear_all(self, split: int) -> None:
"""Delete ALL Candidates from given split the database.
:param split: Which split to clear.
"""
logger.info("Clearing ALL Candidates.")
self.session.query(Candidate).filter(Candidate.split == split).delete(
synchronize_session="fetch"
)
def get_candidates(
self,
docs: Union[Document, Iterable[Document], None] = None,
split: int = 0,
sort: bool = False,
) -> List[List[Candidate]]:
"""Return a list of lists of the candidates associated with this extractor.
Each list of the return will contain the candidates for one of the
candidate classes associated with the CandidateExtractor.
:param docs: If provided, return candidates from these documents from
all splits.
:param split: If docs is None, then return all the candidates from this
split.
:param sort: If sort is True, then return all candidates sorted by stable_id.
:return: Candidates for each candidate_class.
"""
result = []
if docs:
docs = docs if isinstance(docs, Iterable) else [docs]
# Get cands from all splits
for candidate_class in self.candidate_classes:
cands = (
self.session.query(candidate_class)
.filter(candidate_class.document_id.in_([doc.id for doc in docs]))
.order_by(candidate_class.id)
.all()
)
if sort:
cands = sorted(
cands,
key=lambda x: "_".join(
[x[i].context.get_stable_id() for i in range(len(x))]
),
)
result.append(cands)
else:
for candidate_class in self.candidate_classes:
# Filter by candidate_ids in a particular split
sub_query = (
self.session.query(Candidate.id)
.filter(Candidate.split == split)
.subquery()
)
cands = (
self.session.query(candidate_class)
.filter(candidate_class.id.in_(sub_query))
.order_by(candidate_class.id)
.all()
)
if sort:
cands = sorted(
cands,
key=lambda x: "_".join(
[x[i].context.get_stable_id() for i in range(len(x))]
),
)
result.append(cands)
return result
# Type alias for throttler
Throttler = Callable[[Tuple[Mention, ...]], bool]
class CandidateExtractorUDF(UDF):
"""UDF for performing candidate extraction."""
def __init__(
self,
candidate_classes: Union[Type[Candidate], List[Type[Candidate]]],
throttlers: Union[Throttler, List[Throttler]],
self_relations: bool,
nested_relations: bool,
symmetric_relations: bool,
**kwargs: Any,
) -> None:
"""Initialize the CandidateExtractorUDF."""
self.candidate_classes = (
candidate_classes
if isinstance(candidate_classes, (list, tuple))
else [candidate_classes]
)
self.throttlers = (
throttlers if isinstance(throttlers, (list, tuple)) else [throttlers]
)
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
self.arities = [len(cclass.__argnames__) for cclass in self.candidate_classes]
super().__init__(**kwargs)
def apply( # type: ignore
self, doc: Document, split: int, **kwargs: Any
) -> Document:
"""Extract candidates from the given Context.
:param doc: A document to process.
:param split: Which split to use.
"""
logger.debug(f"Document: {doc}")
# Iterate over each candidate class
for i, candidate_class in enumerate(self.candidate_classes):
logger.debug(f" Relation: {candidate_class.__name__}")
# Generates and persists candidates
candidate_args = {"split": split}
candidate_args["document"] = doc
cands = product(
*[
enumerate(
# a list of mentions for each mention subclass within a doc
getattr(doc, mention.__tablename__ + "s")
+ ([None] if nullable else [])
)
for mention, nullable in zip(
candidate_class.mentions, candidate_class.nullables
)
]
)
# Get a set of stable_ids of candidates.
set_of_stable_ids = get_set_of_stable_ids(doc, candidate_class)
for cand in cands:
# Apply throttler if one was given.
# Accepts a tuple of Mention objects
# (throttler returns whether or not proposed candidate
# passes throttling condition)
if self.throttlers[i]:
if not self.throttlers[i](
tuple(cand[j][1] for j in range(self.arities[i]))
):
continue
# TODO: Make this work for higher-order relations
if self.arities[i] == 2:
ai, a = (cand[0][0], cand[0][1].context if cand[0][1] else None)
bi, b = (cand[1][0], cand[1][1].context if cand[1][1] else None)
# Check for self-joins, "nested" joins (joins from context to
# its subcontext), and flipped duplicate "symmetric" relations
if not self.self_relations and a == b:
logger.debug(f"Skipping self-joined candidate {cand}")
continue
# Skip the check if either is None as None is not iterable.
if not self.nested_relations and (a and b) and (a in b or b in a):
logger.debug(f"Skipping nested candidate {cand}")
continue
if not self.symmetric_relations and ai > bi:
logger.debug(f"Skipping symmetric candidate {cand}")
continue
# Assemble candidate arguments
for j, arg_name in enumerate(candidate_class.__argnames__):
candidate_args[arg_name] = cand[j][1]
stable_ids = tuple(
cand[j][1].context.get_stable_id() if cand[j][1] else None
for j in range(self.arities[i])
)
# Skip if this (temporary) candidate is used by this candidate class.
if (
hasattr(doc, candidate_class.__tablename__ + "s")
and stable_ids in set_of_stable_ids
):
continue
# Add Candidate to session
candidate_class(**candidate_args)
return doc
| fonduer-master | src/fonduer/candidates/candidates.py |
"""Fonduer figure mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Figure
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporaryFigureMention(TemporaryContext):
"""The TemporaryContext version of FigureMention."""
def __init__(self, figure: Figure) -> None:
"""Initialize TemporaryFigureMention."""
super().__init__()
self.figure = figure # The figure Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryFigureMention):
return NotImplemented
return self.figure == other.figure
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryFigureMention):
return NotImplemented
return self.figure != other.figure
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryFigureMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryFigureMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.figure)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(self.figure, self._get_polymorphic_identity(), 0, 0)
def _get_table(self) -> Type["FigureMention"]:
return FigureMention
def _get_polymorphic_identity(self) -> str:
return "figure_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"figure_id": self.figure.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"document={self.figure.document.name}, "
f"position={self.figure.position}, "
f"url={self.figure.url}"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporaryFigureMention":
return TemporaryFigureMention(**kwargs)
class FigureMention(Context, TemporaryFigureMention):
"""A figure ``Mention``."""
__tablename__ = "figure_mention"
#: The unique id of the ``FigureMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Figure``.
figure_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Figure``.
figure = relationship("Context", foreign_keys=figure_id)
__table_args__ = (UniqueConstraint(figure_id),)
__mapper_args__ = {
"polymorphic_identity": "figure_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryFigureMention):
"""Initialize FigureMention."""
self.stable_id = tc.get_stable_id()
self.figure = tc.figure
| fonduer-master | src/fonduer/candidates/models/figure_mention.py |
"""Fonduer table mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Table
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporaryTableMention(TemporaryContext):
"""The TemporaryContext version of TableMention."""
def __init__(self, table: Table) -> None:
"""Initialize TemporaryTableMention."""
super().__init__()
self.table = table # The table Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryTableMention):
return NotImplemented
return self.table == other.table
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryTableMention):
return NotImplemented
return self.table != other.table
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryTableMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryTableMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.table)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(self.table, self._get_polymorphic_identity(), 0, 0)
def _get_table(self) -> Type["TableMention"]:
return TableMention
def _get_polymorphic_identity(self) -> str:
return "table_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"table_id": self.table.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"document={self.table.document.name}, "
f"position={self.table.position}"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporaryTableMention":
return TemporaryTableMention(**kwargs)
class TableMention(Context, TemporaryTableMention):
"""A table ``Mention``."""
__tablename__ = "table_mention"
#: The unique id of the ``TableMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Table``.
table_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Table``.
table = relationship("Context", foreign_keys=table_id)
__table_args__ = (UniqueConstraint(table_id),)
__mapper_args__ = {
"polymorphic_identity": "table_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryTableMention):
"""Initialize TableMention."""
self.stable_id = tc.get_stable_id()
self.table = tc.table
| fonduer-master | src/fonduer/candidates/models/table_mention.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.