python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Some utilities """
import os
import math
import warnings
import configargparse
import torch
from nets import ConvNet
def argument_parser():
parser = configargparse.ArgParser(
description='First-order vulnerability and input dimension')
parser.add(
'--config', required=True, is_config_file=True,
help='configuration file path')
parser.add_argument(
'--name', type=str,
help='Experiment name. Results will be saved/loaded from directory '
'./results/name (which will be created if needed).')
parser.add_argument(
'--datapath', type=str, default=None,
help="Data location. Default: '~/datasets/' + `dataset`")
parser.add_argument(
'--dataset', type=str, default='cifar',
help='mnist, cifar, imgnet12 (default: cifar)')
parser.add_argument(
'--img_size', type=int, default=None,
help='only for imgnet. Resize img to 32, 64, 128 or 256.')
parser.add_argument(
'--n_layers', type=int, default=5,
help='number of hidden layers')
parser.add_argument(
'--bs', type=int, default=128,
help='batch size')
parser.add_argument(
'--epochs', type=int, default=200,
help='number of training epochs')
parser.add_argument(
'--no_BN', action='store_true',
help='Do not use batch norms (except before the very 1st layer)')
parser.add_argument(
'--no_act', action='store_true',
help='No activation functions (f.ex. no ReLUs)')
parser.add_argument(
'--raw_inputs', action='store_true',
help='Do not normalize inputs (hence no bn as first network layer)')
parser.add_argument(
'--log_step', type=int, default=None,
help='print training info every log_step batches (default: None)')
# training
parser.add_argument(
'--lr', type=float, default=.01,
help='Initial learning rate')
parser.add_argument(
'--no_training', action='store_true',
help='Do not train the network')
parser.add_argument(
'--crop', action='store_true',
help='Use cropping instead of resizing image.')
# Penalties/Regularizers
penalties = ['grad', 'adv', 'pgd', 'crossLip']
parser.add_argument(
'--lam', type=float, default=0.,
help='global regularization weight')
parser.add_argument(
'--penalty', type=str, choices=penalties, default=None,
help='penalty type:' + ' | '.join(penalties))
parser.add_argument(
'--q', type=int, default=None,
help="defense-norm q; dual of attack-norm p. "
"For FGSM, use penalty='adv' and 'q=1'")
parser.add_argument(
'--steps', type=int, default=None,
help='number of optimization steps per attack when using PGD')
# Vulnerability.py specific
parser.add_argument(
'--n_attacks', type=int, default=-1,
help='number of attack iterations; -1 for whole dataset')
parser.add_argument(
'--log_vul', action='store_true',
help='Print detailed logs of vulnerability computation')
# ConvNet specific
pooltypes = ['avgpool', 'maxpool', 'weightpool', 'subsamp']
last_layers = ['maxpool', 'avgpool', 'fc', 'weightpool']
parser.add_argument(
'--poolings', nargs='*', type=int, default=[],
help='Where to do poolings. Should be a list of '
'integers smaller than n_layers. Defaults to None. (ConvNet)')
parser.add_argument(
'--pooltype', type=str,
choices=pooltypes, default='subsamp',
help='penalty type:' + ' | '.join(penalties) + 'default: subsamp')
parser.add_argument(
'--dilations', nargs='*', type=int, default=None,
help='Dilations to use for each layer. List of n_layers int. '
'Defaults to 1 for all layers. (ConvNet)')
parser.add_argument(
'--last_layers', type=str, choices=last_layers,
default='avgpool', help='penalty type:' + ' | '.join(last_layers))
args = parser.parse_args()
if args.datapath is None:
args.datapath = os.path.join('~/datasets/', args.dataset)
args.datapath = os.path.expanduser(args.datapath)
# DATASET SPECIFIC SETTINGS
if args.dataset == 'mnist':
if args.img_size is None:
args.img_size = 32
elif args.img_size not in {32, 64, 128, 256, 512}:
raise Exception(
"img_size must be 32, 64, 128, 256. "
"But provided %r" % args.img_size)
args.categories = 10
args.in_planes = 1
elif args.dataset == 'cifar':
if args.img_size is None:
args.img_size = 32
elif args.img_size not in {32, 64, 128, 256, 512}:
raise Exception(
"img_size must be 32, 64, 128, 256, or 512. "
"But provided %r" % args.img_size)
args.categories = 10
args.in_planes = 3
elif args.dataset == 'imgnet12':
if args.img_size is None:
args.img_size = 256
elif args.img_size not in {32, 64, 128, 256}:
raise Exception(
"img_size must be 32, 64, 128, or 256. "
"But provided %r" % args.img_size)
if args.bs > 32:
raise Exception(
"With imgnet12, Batchsize bs should be <= 32. "
"Otherwise, you'll probably run out of GPU memory")
args.categories = 12
args.in_planes = 3
else:
raise NotImplementedError("Dataset unknown")
# NETWORK DOUBLE-CHECKS/WARNINGS
if args.no_BN and args.raw_inputs:
warnings.warn(
"no_BN also removes the first BN layer before the net "
"which serves as normalization of data when using raw_inputs. "
"Thus data input data stays unnormalized between 0 and 1")
if args.dilations is None:
dilation = 1 if args.crop else int(args.img_size / 32)
args.dilations = [dilation] * args.n_layers
elif len(args.dilations) == 1:
args.dilations = args.dilations * args.n_layers
elif len(args.dilations) != args.n_layers:
raise Exception(
'Argument dilations must be single integer, or a list of '
'integers of length n_layers')
# PENALTY/REGULARIZATION WARNINGS
if (args.lam, args.penalty, args.q) != (0., None, None):
if args.lam == 0.:
warnings.warn(
"Arguments penalty and/or q are given, but lam = 0. "
"Set lam > 0., otherwise not penalty is used")
elif args.penalty is None:
raise Exception("Argument lam > 0., but no penalty is defined.")
elif (args.penalty in {'adv', 'grad'}) and (args.q is None):
raise Exception(
"If argument penalty is 'adv' or 'grad', q must be in "
"[1, infty]")
if (args.penalty == 'pgd') and (args.steps is None):
raise Exception(
"Arguments steps must be specified with "
"penalty-option pgd")
return parser, args
def create_net(args):
net = ConvNet(
args.categories, args.n_layers, args.img_size, args.poolings,
args.pooltype, args.no_BN, args.no_act, args.dilations,
normalize_inputs=(not args.raw_inputs),
last_layers=args.last_layers, in_planes=args.in_planes)
return net
def initialize_params(m, no_act=False, distribution='normal'):
# gain = sqrt 2 for ReLU
gain = 1. if no_act else math.sqrt(2)
try: # if last layer, then gain = 1.
if m.unit_gain: # test if module as attribute 'last'
gain = 1.
except AttributeError:
pass
if type(m) in {torch.nn.Conv2d, torch.nn.Linear}:
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0.)
out_ = m.weight.data.size(0)
in_ = m.weight.data.view(out_, -1).size(1)
sigma = gain / math.sqrt(in_)
if distribution is 'uniform':
xmax = math.sqrt(3) * sigma
torch.nn.init.uniform_(m.weight, a=-xmax, b=xmax)
elif distribution is 'normal':
torch.nn.init.normal_(m.weight, std=sigma)
else:
raise NotImplementedError(
"Argument distribution must be 'uniform' or 'normal'. "
"Got: '%r'" % distribution)
elif type(m) == torch.nn.BatchNorm2d:
if m.affine:
torch.nn.init.constant_(m.bias, 0.)
torch.nn.init.constant_(m.weight, 1.)
if m.track_running_stats:
torch.nn.init.constant_(m.running_mean, 0.)
torch.nn.init.constant_(m.running_var, 1.)
| AdversarialAndDimensionality-master | utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import time
import torch
import torch.nn.functional as F
from torch.autograd import grad
from data import CIFAR10, IMGNET12, MNIST
from vulnerability import compute_vulnerability
from utils import argument_parser, create_net, initialize_params
from penalties import addPenalty, pgd
# NB: Logger cannot be pushed to utils.py, because of eval(name)
class Logger(object):
def __init__(self):
self.logs = dict()
def log(self, step, delta_time, *args):
for name in args:
if type(name) != str:
raise Exception(
"Logger takes strings as inputs. "
"But got %s" % type(name))
if name not in self.logs:
self.logs[name] = []
self.logs[name].append([eval(name), step, delta_time])
def get_logs(self):
return self.logs
def set_logs(self, logs):
self.logs = logs # logs : dict
return
def grad_norms(loss, inputs, train=False):
bs = inputs.size(0)
g = grad(loss, inputs, retain_graph=train)[0] * bs
g = g.view(bs, -1)
norm1, norm2 = g.norm(1, 1).mean(), g.norm(2, 1).mean()
return norm1.item(), norm2.item()
def do_epoch(epoch, net, optimizer, loader, mode, args):
if mode not in {'train', 'eval', 'test', 'init'}:
# 'init' -> for initialization of batchnorms
# 'train' -> training (but no logging of vul & dam)
# 'eval' -> compute acc & gnorms but not vul & dam on validation
# 'test' -> compute all logged values on test set
raise Exception('Argument mode must be train, eval or init')
net.eval() if mode in {'eval', 'test'} else net.train()
device = next(net.parameters()).device
cum_loss = cum_pen = cum_norm1 = cum_norm2 = total = correct = 0.
advVul = advCorrect = cum_dam = 0.
predictedAdv = None
for i, (inputs, targets) in enumerate(loader):
optimizer.zero_grad()
inputs, targets = inputs.to(device), targets.to(device)
inputs.requires_grad = True
outputs = net(inputs)
loss = F.cross_entropy(outputs, targets)
norm1, norm2 = grad_norms(loss, inputs, mode == 'train')
if mode == 'train':
if args.lam > 0.:
penalty = addPenalty(net, inputs, outputs, targets, loss, args)
loss += penalty
cum_pen += penalty.item()
cum_loss += loss.item()
loss.backward()
optimizer.step()
elif mode == 'test': # compute adv vul & damage using custom PGD
eps = .004
advDam, advOutputs = pgd(
net, inputs, targets, loss, lam=eps, steps=10,
step_size=eps / (.75 * 10), random_start=False, train=False)
# Compute logging info
cum_norm1 += norm1
cum_norm2 += norm2
cum_loss += loss.item()
total += targets.size(0)
_, predicted = torch.max(outputs.data, 1)
correct += predicted.eq(targets.data).float().cpu().sum().item()
if mode == 'test':
cum_dam += advDam.item() / eps
_, predictedAdv = torch.max(advOutputs.data, 1)
advVul += predicted.size(0) - (
predictedAdv.eq(predicted.data).float().cpu().sum().item())
advCorrect += predictedAdv.eq(
targets.data).float().cpu().sum().item()
results = {
'acc': 100 * correct / total, # accuracy
'loss': cum_loss / (i + 1), # loss
'pen': cum_pen / (i + 1), # penalty
'norm1': cum_norm1 / (i + 1), # avg l1-gradient norm
'norm2': cum_norm2 / (i + 1), # avg l2-gradient norm
'av': 100 * advVul / total, # adversarial vulnerability
'da': cum_dam / (i + 1), # adversarial damage
'aa': 100 * advCorrect / total # adversarial accuracy
}
if args.log_step is not None and i % args.log_step == 0:
print("Epoch: %03d Batch: %04d Mode: %-5s Acc: %4.1f Loss: %4.2f "
"Pen: %5.3f gNorm1: %6.2f gNorm2: %6.3f Vul: %4.1f "
"Dam: %6.2f AdAcc %4.1f" % (
epoch, i, mode, *[
results[i] for i in ['acc', 'loss', 'pen', 'norm1',
'norm2', 'av', 'da', 'aa']]))
return results
if __name__ == '__main__':
parser, args = argument_parser()
logger = Logger()
args.path = os.path.join('results', args.name)
net = create_net(args)
# print(net)
if not os.path.exists(args.path):
os.makedirs(args.path, exist_ok=True) # requires Python >= 3.2
if os.path.isfile(os.path.join(args.path, 'last.pt')):
print('> Loading last saved state/network...')
state = torch.load(os.path.join(args.path, 'last.pt'))
net.load_state_dict(state['state_dict'])
lr = state['lr']
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
optimizer.load_state_dict(state['optimizer'])
best_va_acc = state['best_va_acc']
start_ep = state['epoch'] + 1
logger.set_logs(state['logs'])
else: # initialize new net
print('> Initializing new network...')
net.apply(lambda m: initialize_params(m, args.no_act, 'normal'))
lr = args.lr
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
best_va_acc = -1.
start_ep = -1
print('> Done.')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
torch.backends.cudnn.benchmark = True
print('> Loading dataset...')
if args.dataset == 'mnist':
tr_loader, va_loader, te_loader = MNIST(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
elif args.dataset == 'cifar':
tr_loader, va_loader, te_loader = CIFAR10(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
elif args.dataset == 'imgnet12':
tr_loader, va_loader, te_loader = IMGNET12(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
else:
raise NotImplementedError
print('> Done.')
print('> Starting training.')
time_start = time.time()
epochs = 0 if args.no_training else args.epochs
for epoch in range(start_ep, epochs):
time_start = time.time()
if epoch % 30 == 0 and epoch > 0:
# reload best parameters on validation set
net.load_state_dict(
torch.load(os.path.join(
args.path, 'best.pt'))['state_dict'])
# update learning rate
lr *= .5
for param_group in optimizer.param_groups:
param_group['lr'] = lr
mode = 'init' if epoch < 0 else 'train'
tr_res = do_epoch(epoch, net, optimizer, tr_loader, mode, args)
va_res = do_epoch(epoch, net, optimizer, va_loader, 'eval', args)
te_res = do_epoch(epoch, net, optimizer, te_loader, 'test', args)
time_per_epoch = time.time() - time_start
print("epoch %3d lr %.1e te_norm1 %7.3f te_norm2 %6.4f tr_loss %6.3f "
"tr_acc %5.2f te_acc %5.2f te_aa %5.2f te_av %5.2f te_da %6.3f "
"va_acc %5.2f be_va_acc %5.2f time %d" % (
epoch, lr, te_res['norm1'], te_res['norm2'], tr_res['loss'],
tr_res['acc'], te_res['acc'], te_res['aa'], te_res['av'],
te_res['da'], va_res['acc'], best_va_acc,
time_per_epoch))
# Log and save results
logger.log(epoch, time_per_epoch, 'lr', 'tr_res', 'va_res', 'te_res')
state = {
'lr': lr,
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'args': args,
'logs': logger.get_logs(),
'best_va_acc': best_va_acc
}
torch.save(state, os.path.join(args.path, 'last.pt'))
if va_res['acc'] > best_va_acc:
best_va_acc = va_res['acc']
torch.save(state, os.path.join(args.path, 'best.pt'))
print('> Finished Training')
# Compute adversarial vulnerability with foolbox
print('\n> Starting attacks.')
attacks = {'l1'}
# attacks = {'l1', 'l2', 'itl1', 'itl2', 'deepFool', 'pgd', 'boundary'}
for attack in attacks:
vulnerability = compute_vulnerability(
args, attack, net, args.n_attacks)
torch.save(vulnerability,
os.path.join(args.path, 'vulnerability_%s.pt' % attack))
| AdversarialAndDimensionality-master | main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import time
import numpy as np
import scipy.stats as st
from functools import partial
import torch
from torch.autograd import grad
import foolbox
from foolbox.distances import Linfinity, MSE
from data import CIFAR10, IMGNET12, MNIST
def do_pass(net, loader, args, means, stds):
correct = total = 0.
device = next(net.parameters()).device
means = torch.FloatTensor(means).to(device)
stds = torch.FloatTensor(stds).to(device)
for i, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = (inputs - means) / stds
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).float().sum().item()
if args.log_step is not None and i % args.log_step == 0:
print("Batch: %03d Acc: %4.1f" % (i, 100 * correct / total))
return 100 * correct / total
def classify(net, x, args, means, stds):
device = next(net.parameters()).device
x = x.to(device).view(1, 3, args.img_size, args.img_size)
means = torch.FloatTensor(means).to(device)
stds = torch.FloatTensor(stds).to(device)
x = ((x - means) / stds).detach()
x.requires_grad = True
y = net(x)
g = grad(y.sum(), x)[0].view(x.size(0), -1).norm().item()
_, top_indices = y.data.cpu().view(-1).topk(2)
return top_indices[0].item(), g
def myPrint(string, args):
if args.log_vul:
print(string)
def conf95(a):
return st.t.interval(
0.95, len(a) - 1, loc=np.nanmean(a),
scale=st.sem(a, nan_policy='omit'))
def compute_vulnerability(args, attack_name, net, n_attacks=-1):
"""
Computes vulnerability using foolbox package of net
Parameters
----------
args : :class:`argparse.ArgumentParser`
The arguments passed to main.py
attack_name : string
The attack type. Must be one of
{'l1', 'l2', 'itl1', 'itl2', 'pgd', 'deepfool'}
net : :class:`torch.nn.Module`
The network whose vulnerability is computed.
n_attacks : int
The number of attacks to use for the computation of vulnerbaility.
If -1 or greater than dataset-size, uses the entire dataset.
Default: -1.
"""
print('\nStarting attacks of type ' + attack_name)
# Reload data without normalizing it
print('> Loading dataset %s...' % args.dataset)
if args.dataset == 'mnist':
_, loader = MNIST(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
elif args.dataset == 'cifar':
_, loader = CIFAR10(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
elif args.dataset == 'imgnet12':
_, loader = IMGNET12(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
else:
raise NotImplementedError
print('> Done.')
# Image-normalizations (must be same as in data.py)
if args.raw_inputs:
means = [0., 0., 0.]
stds = [1., 1., 1.]
elif args.dataset == "mnist":
means = [0.1307]
stds = [0.3081]
elif args.dataset == "cifar":
means = [0.4914, 0.4822, 0.4465]
stds = [0.2023, 0.1994, 0.2010]
elif args.dataset == "imgnet12":
means = [.453, .443, .403]
stds = {
256: [.232, .226, .225],
128: [.225, .218, .218],
64: [.218, .211, .211],
32: [.206, .200, .200]
}[args.img_size]
else:
raise NotImplementedError
means = np.array(means).reshape(-1, 1, 1)
stds = np.array(stds).reshape(-1, 1, 1)
net.eval()
print('> Computing test accuracy...')
te_acc = do_pass(net, loader, args, means, stds)
print('> Done. Computed test accuracy: %5.2f' % te_acc)
# construct attack
bounds = (0, 1)
model = foolbox.models.PyTorchModel(net, bounds=bounds,
preprocessing=(means, stds),
num_classes=args.categories)
# Choosing attack type
if attack_name == 'l1':
# vulnerability increases like sqrt(d) \propto img_size
# therefore, we divide the linfty-threshold by img_size
attack = partial(foolbox.attacks.FGSM(model, distance=Linfinity),
epsilons=1000, max_epsilon=1. / args.img_size)
elif attack_name == 'l2':
# to be visually constant, the l2-threshold increases like sqrt d;
# but vulnerability also increases like sqrt d;
# therefore, use constant max_epsilon accross dimension d
attack = partial(foolbox.attacks.GradientAttack(model, distance=MSE),
epsilons=1000, max_epsilon=1.)
elif attack_name == 'itl1':
it, eps = 10, 1. / args.img_size
attack = partial(
foolbox.attacks.LinfinityBasicIterativeAttack(
model, distance=Linfinity),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'itl2':
it, eps = 10, 1.
attack = partial(
foolbox.attacks.L2BasicIterativeAttack(
model, distance=MSE),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'pgd':
it, eps = 10, 1. / args.img_size
attack = partial(foolbox.attacks.RandomPGD(model, distance=Linfinity),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'deepFool':
attack = foolbox.attacks.DeepFoolAttack(model, distance=MSE)
elif attack_name == 'boundary':
attack = partial(foolbox.attacks.BoundaryAttack(model, distance=MSE),
iterations=2000, log_every_n_steps=np.Infinity,
verbose=False)
else:
raise NotImplementedError(
"attack_name must be 'l1', 'l2', 'itl1', 'itl2', "
"'deepFool' or 'boundary'")
n_iterations = 0
results = {}
results['l2_snr'] = []
results['clean_grad'] = []
results['dirty_grad'] = []
results['l2_norm'] = []
results['linf_norm'] = []
n_fooled = 0
print('> Creating empty image-tensors')
n_saved = 64 if (n_attacks == -1) else min(n_attacks, 64)
clean_images = torch.zeros(n_saved, 3, args.img_size, args.img_size)
dirty_images = torch.zeros(n_saved, 3, args.img_size, args.img_size)
print('> Done.')
myPrint(("{:>15} " * 5).format(
"clean_grad", "dirty_grad", "linf_norm", "l2_norm", "l2_snr"), args)
t0 = time.time()
for i, (images, labels) in enumerate(loader):
if n_iterations == n_attacks:
break
for i, clean_image in enumerate(images):
clean_label, clean_grad = classify(net, clean_image,
args, means, stds)
dirty_image_np = attack(clean_image.numpy(), clean_label)
if dirty_image_np is not None: # i.e. if adversarial was found
dirty_image = torch.Tensor(dirty_image_np)
_, dirty_grad = classify(net, dirty_image,
args, means, stds)
if i < n_saved: # only save n_saved first images
dirty_images[i] = dirty_image.clone()
clean_images[i] = clean_image.clone()
l2_norm = (clean_image - dirty_image).norm().item()
linf_norm = (clean_image - dirty_image).abs().max().item()
l2_snr = 20. * math.log10(
clean_image.norm().item() / (l2_norm + 1e-6))
else:
l2_snr = dirty_grad = l2_norm = linf_norm = np.NaN
results['l2_snr'].append(l2_snr)
results['clean_grad'].append(clean_grad)
results['dirty_grad'].append(dirty_grad)
results['l2_norm'].append(l2_norm)
results['linf_norm'].append(linf_norm)
fmt_str = "{:>15.6f} " * 5
if ((attack.func._default_distance == MSE and
l2_norm < .005 * np.sqrt(args.img_size)) or
(attack.func._default_distance == Linfinity and
linf_norm < .005)):
fmt_str += " * fooled!"
n_fooled += 1
myPrint(fmt_str.format(clean_grad, dirty_grad, linf_norm,
l2_norm, l2_snr),
args)
n_iterations += 1
if n_iterations == n_attacks:
break
# Printing summary
summary = {}
print("\n Summary for network in '{}' of test accuracy {}".format(
args.path, te_acc))
for key, value in results.items():
low95, high95 = conf95(value)
print("{:>10} mean:{:>10.5f} std:{:>10.5f} conf95:({:>10.5f}, "
"{:>10.5f}) minmax:({:>10.5f}, {:>10.5f})".format(
key, np.nanmean(value), np.nanstd(value), low95, high95,
np.nanmin(value), np.nanmax(value)))
summary[key] = [np.nanmean(value), np.nanstd(value), low95, high95]
percent = 100 * n_fooled / float(n_iterations)
print("{:>10} {:10d}s".format("Time", int(time.time() - t0)))
print("{:>10} {:10.1f}%".format("percent", percent))
# Preparing the output
output = dict()
output['summary'] = summary
output['results'] = results
output['clean_images'] = clean_images
output['dirty_images'] = dirty_images
output['percent'] = percent
output['te_acc'] = te_acc
return output
| AdversarialAndDimensionality-master | vulnerability.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
from PIL import Image
import torch
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def IMGNET12(root='~/datasets/imgnet12/', bs=32, bs_test=None, num_workers=32,
valid_size=.1, size=256, crop=False, normalize=False):
# Datafolder '~/datasets/imgnet12/' should contain folders train/ and val/,
# each of which whould contain 12 subfolders (1 per class) with .jpg files
root = os.path.expanduser(root)
# original means = [.485, .456, .406]
# original stds = [0.229, 0.224, 0.225]
means = [.453, .443, .403]
stds = {
256: [.232, .226, .225],
128: [.225, .218, .218],
64: [.218, .211, .211],
32: [.206, .200, .200]
}
if normalize:
normalize = transforms.Normalize(mean=means,
std=stds[size])
else:
normalize = transforms.Normalize((0., 0., 0),
(1., 1., 1.))
if bs_test is None:
bs_test = bs
if crop:
tr_downsamplingOp = transforms.RandomCrop(size)
te_downsamplingOp = transforms.CenterCrop(size)
else:
tr_downsamplingOp = transforms.Resize(size)
te_downsamplingOp = transforms.Resize(size)
preprocess = [transforms.Resize(256), transforms.CenterCrop(256)]
tr_transforms = transforms.Compose([
*preprocess,
tr_downsamplingOp,
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize, ])
te_transforms = transforms.Compose([
*preprocess,
te_downsamplingOp,
transforms.ToTensor(),
normalize, ])
tr_dataset = datasets.ImageFolder(root + '/train', transform=tr_transforms)
te_dataset = datasets.ImageFolder(root + '/val', transform=te_transforms)
# Split training in train and valid set
num_train = len(tr_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
tr_idx, va_idx = indices[split:], indices[:split]
tr_sampler = SubsetRandomSampler(tr_idx)
va_sampler = SubsetRandomSampler(va_idx)
tr_loader = torch.utils.data.DataLoader(
tr_dataset, batch_size=bs,
num_workers=num_workers, pin_memory=True, sampler=tr_sampler)
va_loader = torch.utils.data.DataLoader(
tr_dataset, batch_size=bs_test,
num_workers=num_workers, pin_memory=True, sampler=va_sampler)
te_loader = torch.utils.data.DataLoader(
te_dataset, batch_size=bs_test, shuffle=False,
num_workers=num_workers, pin_memory=True)
if valid_size > 0.:
return tr_loader, va_loader, te_loader
else:
return tr_loader, te_loader
def CIFAR10(root='~/datasets/cifar10/', bs=128, bs_test=None,
augment_training=True, valid_size=0., size=32, num_workers=1,
normalize=False):
root = os.path.expanduser(root)
if bs_test is None:
bs_test = bs
if normalize:
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))
else:
normalize = transforms.Normalize((0., 0., 0),
(1., 1., 1.))
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Resize(size, Image.NEAREST),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(size, Image.NEAREST),
transforms.ToTensor(),
normalize
])
transform_valid = transform_test
if augment_training is False:
transform_train = transform_test
dataset_tr = datasets.CIFAR10(root=root,
train=True,
transform=transform_train)
dataset_va = datasets.CIFAR10(root=root,
train=True,
transform=transform_valid)
dataset_te = datasets.CIFAR10(root=root,
train=False,
transform=transform_test)
# Split training in train and valid set
num_train = len(dataset_tr)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
loader_tr = torch.utils.data.DataLoader(dataset_tr,
batch_size=bs,
sampler=train_sampler,
num_workers=num_workers)
loader_va = torch.utils.data.DataLoader(dataset_va,
batch_size=bs,
sampler=valid_sampler,
num_workers=num_workers)
# add pin_memory
loader_te = torch.utils.data.DataLoader(dataset_te,
batch_size=bs_test,
shuffle=False,
num_workers=num_workers)
if valid_size > 0:
return loader_tr, loader_va, loader_te
else:
return loader_tr, loader_te
def MNIST(root='~/datasets/mnist/', bs=128, bs_test=None,
augment_training=True, valid_size=0., size=32, num_workers=1,
normalize=False):
root = os.path.expanduser(root)
if bs_test is None:
bs_test = bs
if normalize:
normalize = transforms.Normalize((0.1307,), (0.3081,))
else:
normalize = transforms.Normalize((0.,), (1.,))
transform = transforms.Compose([
transforms.Resize(32, Image.BILINEAR),
transforms.Resize(size, Image.NEAREST),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
normalize
])
dataset_tr = datasets.MNIST(root=root,
train=True,
transform=transform)
dataset_va = datasets.MNIST(root=root,
train=True,
transform=transform)
dataset_te = datasets.MNIST(root=root,
train=False,
transform=transform)
# Split training in train and valid set
num_train = len(dataset_tr)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
loader_tr = torch.utils.data.DataLoader(dataset_tr,
batch_size=bs,
sampler=train_sampler,
num_workers=num_workers)
loader_va = torch.utils.data.DataLoader(dataset_va,
batch_size=bs,
sampler=valid_sampler,
num_workers=num_workers)
# add pin_memory
loader_te = torch.utils.data.DataLoader(dataset_te,
batch_size=bs_test,
shuffle=False,
num_workers=num_workers)
if valid_size > 0:
return loader_tr, loader_va, loader_te
else:
return loader_tr, loader_te
| AdversarialAndDimensionality-master | data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import reduce
import torch.nn as nn
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class FlexibleAvgPool2d(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return F.avg_pool2d(inputs, kernel_size=inputs.size(2))
class WeightPool(nn.Module):
def __init__(self, in_planes, kernel_size):
super(WeightPool, self).__init__()
self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size,
stride=kernel_size, groups=in_planes, bias=False)
self.conv.unit_gain = True
def forward(self, x):
return self.conv(x)
class WeightPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(WeightPoolOut, self).__init__()
self.in_planes = in_planes
self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=plane_size,
groups=in_planes, bias=False)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.conv(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class MaxPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(MaxPoolOut, self).__init__()
self.in_planes = in_planes
self.maxpool = nn.MaxPool2d(kernel_size=plane_size)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.maxpool(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class AvgPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(AvgPoolOut, self).__init__()
self.in_planes = in_planes
self.avgpool = nn.AvgPool2d(kernel_size=plane_size)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.avgpool(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class FCout(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(FCout, self).__init__()
if type(plane_size) == tuple and len(plane_size) == 2:
plane_size = reduce(lambda x, y: x * y, plane_size)
else:
plane_size = plane_size ** 2
print('Plane size = ', plane_size)
self.in_planes = in_planes
self.plane_size = plane_size
self.linear = nn.Linear(in_planes * plane_size, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = x.view(-1, self.in_planes * self.plane_size)
return self.linear(out)
class ConvLayer(nn.Module):
def __init__(self, in_planes, planes, pooltype=None, no_BN=False,
no_act=False, dilation=1):
super(ConvLayer, self).__init__()
self.pad = nn.ReflectionPad2d(dilation)
if pooltype is None: # Usual conv
self.conv = nn.Conv2d(in_planes, planes, 3, padding=0,
stride=1, dilation=dilation)
elif pooltype == 'avgpool': # Average Pool
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
nn.AvgPool2d(2))
elif pooltype == 'subsamp': # Strided Conv
self.conv = nn.Conv2d(
in_planes, planes, 3, stride=2, dilation=dilation)
elif pooltype == 'maxpool': # Max Pool
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
nn.MaxPool2d(2))
elif pooltype == 'weightpool':
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
WeightPool(planes, 2))
else:
raise NotImplementedError
if no_act:
self.act = lambda x: x
else:
self.act = nn.ReLU()
if no_BN:
self.bn = lambda x: x # Identity()
else:
self.bn = nn.BatchNorm2d(planes)
def forward(self, x):
out = self.act(self.bn(self.conv(self.pad(x))))
return out
class ConvNet(nn.Module):
def __init__(
self, categories=10, n_layers=3, in_size=32, poolings=None,
pooltype='avgpool', no_BN=False, no_act=False, dilations=1,
normalize_inputs=False, last_layers='maxpool', in_planes=3):
# last_layers in {'maxpool', 'fc', 'weightpool'}
super(ConvNet, self).__init__()
poolings = [] if poolings is None else poolings
if type(dilations) != list:
dilations = [dilations] * n_layers
self.in_planes = in_planes
if normalize_inputs or no_BN:
self.bn = (lambda x: x)
else:
self.bn = nn.BatchNorm2d(self.in_planes)
self.layers = self._make_layers(
ConvLayer, 64, n_layers, poolings, pooltype,
no_BN, no_act, dilations)
# compute input-size to last layers from input-size of the net
# self.in_planes is changed by _make_layers to the nbr of out-planes
out_size = int(in_size / (2 ** (len(poolings))))
self.last_layers = self._make_last_layers(
out_size, categories, last_layers)
def _make_layers(self, block, planes, num_blocks, poolings,
pooltype, no_BN, no_act, dilations):
# pooltypes = [0] + [0] * (num_blocks - 1)
pooltypes = [None] * num_blocks
for pool in poolings:
pooltypes[pool] = pooltype
layers = []
for pool, dilation in zip(pooltypes, dilations):
layers.append(block(self.in_planes, planes, pool, no_BN, no_act,
dilation))
self.in_planes = planes
return nn.Sequential(*layers)
def _make_last_layers(self, in_size, categories, last_layers):
if last_layers == 'maxpool':
last_layers = MaxPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'avgpool':
last_layers = AvgPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'weightpool':
last_layers = WeightPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'fc':
last_layers = FCout(
self.in_planes, in_size, categories, unit_gain=True)
else:
raise NotImplementedError(
'Argument last_layers must be maxpool, fc, weightpool. '
'But got: %s' % last_layers)
return last_layers
def forward(self, x):
out = self.layers(self.bn(x))
out = self.last_layers(out)
return out
| AdversarialAndDimensionality-master | nets.py |
#!/usr/bin/env python3
import argparse
import json
import logging
import os
import pickle
import random
import re
from collections import Counter, OrderedDict
from sklearn.cluster import DBSCAN, AffinityPropagation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import linear_kernel
working_dir = None
config = None
vocab = None
options = None
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--corpus",
action="store",
dest="corpus",
default=None,
help="Process raw ASTs, featurize, and store in the working directory.",
)
parser.add_argument(
"-d",
"--working-dir",
action="store",
dest="working_dir",
help="Working directory.",
required=True,
)
parser.add_argument(
"-f",
"--file-query",
action="append",
dest="file_query",
default=[],
help="File containing the query AST of a query code as JSON.",
)
parser.add_argument(
"-k",
"--keywords",
action="append",
dest="keywords",
default=[],
help="Keywords to search for.",
)
parser.add_argument(
"-i",
"--index-query",
type=int,
action="store",
dest="index_query",
default=None,
help="Index of the query AST in the corpus.",
)
parser.add_argument(
"-t",
"--testall",
dest="testall",
action="store_true",
default=False,
help="Sample config.N_SAMPLES snippets and search.",
)
options = parser.parse_args()
logging.info(options)
return options
class Config:
def __init__(self):
self.MIN_MERGED_CODE = 3
self.MIN_PRUNED_SCORE = 0.65
self.N_PARENTS = 3
self.N_SIBLINGS = 1
self.N_VAR_SIBLINGS = 2
self.NUM_SIMILARS = 100
self.MIN_SIMILARITY_SCORE = 0.4
self.VOCAB_FILE = "vocab.pkl"
self.TFIDF_FILE = "tfidf.pkl"
self.FEATURES_FILE = "features.json"
self.NUM_FEATURE_MIN = 10
self.DBSCAN_EPS = 0.1
self.SAMPLE_METHOD_MIN_LINES = 12
self.SAMPLE_METHOD_MAX_LINES = 7
self.METHOD_MAX_LINES = 150
self.SEED = 119
self.N_SAMPLES = 100
self.IGNORE_VAR_NAMES = True
self.IGNORE_SIBLING_FEATURES = False
self.IGNORE_VAR_SIBLING_FEATURES = False
self.CLUSTER = True
self.PRINT_SIMILAR = True
self.USE_DBSCAN = True
self.THRESHOLD1 = 0.9
self.THRESHOLD2 = 1.5
self.TOP_N = 5
class Vocab:
def __init__(self):
self.vocab = OrderedDict()
self.words = []
def get_word(self, i):
if i <= config.NUM_FEATURE_MIN:
return "#UNK"
return self.words[i - 1 - config.NUM_FEATURE_MIN]
def add_and_get_index(self, word):
if not (word in self.vocab):
self.words.append(word)
self.vocab[word] = [0, len(self.vocab) + 1 + config.NUM_FEATURE_MIN]
value = self.vocab[word]
value[0] += 1
return value[1]
def get_index(self, word):
if word in self.vocab:
return self.vocab[word][1]
else:
return config.NUM_FEATURE_MIN
def dump(self):
with open(os.path.join(options.working_dir, config.VOCAB_FILE), "wb") as out:
pickle.dump([self.vocab, self.words], out)
logging.info(f"Dumped vocab with size {len(self.vocab)}")
@staticmethod
def load(init=False):
tmp = Vocab()
if not init:
try:
with open(
os.path.join(options.working_dir, config.VOCAB_FILE), "rb"
) as out:
[tmp.vocab, tmp.words] = pickle.load(out)
logging.info(f"Loaded vocab with size {len(tmp.vocab)}")
except:
logging.info("Initialized vocab.")
pass
return tmp
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def ast_to_code_aux(ast, token_list):
if isinstance(ast, list):
for elem in ast:
ast_to_code_aux(elem, token_list)
elif isinstance(ast, dict):
token_list.append(ast["leading"])
token_list.append(ast["token"])
def ast_to_code_collect_lines(ast, line_list):
if isinstance(ast, list):
for elem in ast:
ast_to_code_collect_lines(elem, line_list)
elif isinstance(ast, dict):
if "line" in ast:
line_list.append(ast["line"])
def ast_to_code_print_lines(ast, line_list, token_list):
if isinstance(ast, list):
for elem in ast:
ast_to_code_print_lines(elem, line_list, token_list)
elif isinstance(ast, dict):
if "line" in ast and ast["line"] in line_list:
if len(token_list) > 0 and token_list[-1] == "//":
token_list.append(" your code ...\n")
token_list.append(ast["leading"])
token_list.append(ast["token"])
else:
if len(token_list) > 0 and token_list[-1] != "//":
token_list.append("\n")
token_list.append("//")
def featurize_records_file(rpath, wpath):
with open(rpath, "r") as inp:
with open(wpath, "w") as outp:
i = 0
for line in inp:
obj = json.loads(line)
obj["features"] = collect_features_as_list(obj["ast"], True, False)[0]
obj["index"] = i
i += 1
outp.write(json.dumps(obj))
outp.write("\n")
def append_feature_index(is_init, is_counter, key, feature_list, c):
if is_init:
n = vocab.add_and_get_index(key)
else:
n = vocab.get_index(key)
if is_counter:
if n != str(config.NUM_FEATURE_MIN):
c[n] += 1
else:
feature_list.append(n)
def append_feature_pair(
is_init, is_counter, key, feature_list, leaf_features, sibling_idx, leaf_idx
):
if is_init:
n = vocab.add_and_get_index(key)
else:
n = vocab.get_index(key)
if is_counter:
if n != str(config.NUM_FEATURE_MIN):
leaf_features[leaf_idx][n] += 1
leaf_features[sibling_idx][n] += 1
else:
feature_list.append(n)
feature_list.append(n)
def get_leftmost_leaf(ast):
if isinstance(ast, list):
for elem in ast:
(success, token) = get_leftmost_leaf(elem)
if success:
return (success, token)
elif isinstance(ast, dict):
if "leaf" in ast and ast["leaf"]:
return (True, ast["token"])
return (False, None)
def get_var_context(p_idx, p_label, p_ast):
if p_label == "#.#":
return get_leftmost_leaf(p_ast[p_idx + 2])[1]
else:
return p_label + str(p_idx)
def collect_features_aux(
ast,
feature_list,
parents,
siblings,
var_siblings,
leaf_features,
leaf_pair_features,
is_init,
is_counter,
):
global leaf_idx
if isinstance(ast, list):
i = 0
for elem in ast:
parents.append((i, ast[0], ast))
collect_features_aux(
elem,
feature_list,
parents,
siblings,
var_siblings,
leaf_features,
leaf_pair_features,
is_init,
is_counter,
)
parents.pop()
i += 1
elif isinstance(ast, dict):
if "leaf" in ast and ast["leaf"]:
leaf_idx += 1
is_var = False
var_name = key = ast["token"]
if config.IGNORE_VAR_NAMES and "var" in ast and not key[0].isupper():
key = "#VAR"
is_var = True
c = None
if is_counter:
c = Counter()
leaf_features.append(c)
append_feature_index(is_init, is_counter, key, feature_list, c)
count = 0
for (i, p, p_ast) in reversed(parents):
if p != "(#)" and re.match("^\{#*\}$", p) is None:
count += 1
key2 = p + str(i) + ">" + key
append_feature_index(is_init, is_counter, key2, feature_list, c)
if count >= config.N_PARENTS:
break
count = 0
if not config.IGNORE_VAR_SIBLING_FEATURES and is_var:
(p_idx, p_label, p_ast) = parents[-1]
var_context = get_var_context(p_idx, p_label, p_ast)
if var_context is not None:
if var_name not in var_siblings:
var_siblings[var_name] = []
for (var_sibling_idx, var_context_sibling) in reversed(
var_siblings[var_name]
):
count += 1
key2 = var_context_sibling + ">>>" + var_context
# logging.info(f"var sibling feature {key2}")
append_feature_pair(
is_init,
is_counter,
key2,
feature_list,
leaf_features,
var_sibling_idx,
leaf_idx - 1,
)
if count >= config.N_VAR_SIBLINGS:
break
var_siblings[var_name].append((leaf_idx - 1, var_context))
count = 0
if not config.IGNORE_SIBLING_FEATURES: # and not is_var:
for (sibling_idx, sibling) in reversed(siblings):
count += 1
key2 = sibling + ">>" + key
append_feature_pair(
is_init,
is_counter,
key2,
feature_list,
leaf_features,
sibling_idx,
leaf_idx - 1,
)
if count >= config.N_SIBLINGS:
break
siblings.append((leaf_idx - 1, key))
def feature_list_to_doc(record):
return " ".join([str(y) for y in record["features"]])
def counter_vectorize(rpath, wpath):
with open(rpath, "r") as f:
records = f.readlines()
documents = [feature_list_to_doc(json.loads(x)) for x in records]
vectorizer = CountVectorizer(min_df=1, binary=True)
counter_matrix = vectorizer.fit_transform(documents)
with open(wpath, "wb") as outf:
pickle.dump((vectorizer, counter_matrix), outf)
def read_all_records(rpath):
with open(rpath, "r") as f:
records = f.readlines()
ret = [json.loads(x) for x in records]
return ret
def get_record_part(record):
n_lines = record["endline"] - record["beginline"]
if n_lines < config.SAMPLE_METHOD_MIN_LINES or "tests" in record["path"]:
return None
else:
(_, ast) = get_sub_ast_aux(record["ast"], record["beginline"])
if ast == None:
return None
else:
ret = copy_record_with_ast(record, ast)
ret["features"] = collect_features_as_list(ast, False, False)[0]
return ret
def get_sub_ast_aux(ast, beginline, stop=False):
if isinstance(ast, list):
if stop:
return (stop, None)
else:
ret = []
for elem in ast:
(stop, tmp) = get_sub_ast_aux(elem, beginline, stop)
if tmp != None:
ret.append(tmp)
if len(ret) >= 2:
return (stop, ret)
else:
return (True, None)
elif isinstance(ast, dict):
if (
"leaf" not in ast
or not ast["leaf"]
or (not stop and ast["line"] - beginline < config.SAMPLE_METHOD_MAX_LINES)
):
return (stop, ast)
else:
return (True, None)
else:
return (stop, ast)
def print_features(fstr):
print(" ".join([vocab.get_word(int(k)) for k in fstr]))
def sample_n_records(records, n):
ret_indices = []
ret_records = []
for j in range(10000):
if len(ret_indices) < n:
i = random.randint(0, len(records) - 1)
if not (i in ret_indices):
record = get_record_part(records[i])
if record != None:
ret_indices.append(i)
ret_records.append(record)
else:
logging.info("Sampled records")
return (ret_indices, ret_records)
logging.info("Sampled records")
return (ret_indices, ret_records)
def my_similarity_score(M1, M2):
return linear_kernel(M1, M2)
def find_similarity_score_features_un(record1, record2):
features_as_counter1 = Counter(record1["features"])
features_as_counter2 = Counter(record2["features"])
return sum((features_as_counter1 & features_as_counter2).values())
def find_similarity_score_features_set(records):
features_as_counters = []
for record in records:
features_as_counters.append(Counter(record["features"]))
return distance_set(features_as_counters)
def find_similarity_score_features_set_un(records):
features_as_counters = []
for record in records:
features_as_counters.append(Counter(record["features"]))
intersection = None
for counter in features_as_counters:
if intersection is None:
intersection = counter
else:
intersection = intersection & counter
return sum(intersection.values())
def copy_record_with_ast(record, ast):
ret = dict(record)
ret["ast"] = ast
return ret
def copy_record_with_features(record, features):
ret = dict(record)
ret["features"] = features
return ret
def copy_leaf_dummy(ast):
return {"token": " ... ", "leading": ast["leading"], "trailing": ast["trailing"]}
leaf_idx = 0
def prune_ast(ast, leaf_features):
global leaf_idx
if isinstance(ast, list):
no_leaf = True
ret = []
for elem in ast:
(flag, tmp) = prune_ast(elem, leaf_features)
ret.append(tmp)
no_leaf = no_leaf and flag
if no_leaf:
return (True, None)
else:
return (False, ret)
elif isinstance(ast, dict):
if "leaf" in ast and ast["leaf"]:
leaf_idx += 1
if leaf_features[leaf_idx - 1] is None:
return (True, copy_leaf_dummy(ast))
else:
return (False, ast)
else:
return (True, ast)
else:
return (True, ast)
def prune_second_jd(record1, record2):
return prune_last_jd([record1], record2)
def add_pair_features(new_features, leaf_pair_features, leaf_idx, current_leaf_indices):
if not config.IGNORE_SIBLING_FEATURES:
for sibling_idx in current_leaf_indices:
if (sibling_idx, leaf_idx) in leaf_pair_features:
new_features[leaf_pair_features[(sibling_idx, leaf_idx)]] += 1
def distance_set(counters):
intersection = None
union = None
for counter in counters:
if intersection is None:
intersection = counter
else:
intersection = intersection & counter
for counter in counters:
if union is None:
union = counter
else:
union = union | counter
return sum(intersection.values()) / sum(union.values())
def distance(counter1, counter2):
return sum((counter1 & counter2).values()) / sum((counter1 | counter2).values())
def copy_record(record2, ast, features):
ret = dict(record2)
ret["ast"] = ast
ret["features"] = features
ret["index"] = -1
return ret
def get_completions_via_clustering(query_record, similar_records):
features = [feature_list_to_doc(record) for record in similar_records]
if len(features) > 1:
vectorizer = CountVectorizer(min_df=1)
X = vectorizer.fit_transform(features)
if config.USE_DBSCAN:
db = DBSCAN(eps=config.DBSCAN_EPS, min_samples=2, metric="cosine")
labels = db.fit_predict(X)
else:
db = AffinityPropagation()
labels = db.fit_predict(X)
else:
labels = [0]
print(f"Clustering labels: {labels}")
logging.info(f"Clustering labels: {labels}")
index_pairs = OrderedDict()
ret = []
n_clusters = 0
n_uniques = 0
for i in range(min(config.MIN_MERGED_CODE, len(similar_records))):
if labels[i] < 0:
ret.append((similar_records[i]["ast"], i, i))
for i in range(len(labels)):
if labels[i] >= 0:
if labels[i] in index_pairs:
if len(index_pairs[labels[i]]) == 1:
index_pairs[labels[i]].append(i)
else:
index_pairs[labels[i]] = [i]
n_clusters += 1
else:
n_uniques += 1
for p in index_pairs.values():
if len(p) == 2:
(i, j) = p
pruned_record = prune_last_jd(
[query_record, similar_records[j]], similar_records[i]
)
ret.append((pruned_record, i, j))
else:
ret.append((similar_records[p[0]]["ast"], p[0], p[0]))
ret.sort(key=lambda t: t[1])
logging.info(
f"(# similars, #clusters, #singles, #completions) = ({len(similar_records)}, {n_clusters}, {n_uniques}, {len(ret)})"
)
print(
f"(# similars, #clusters, #singles, #completions) = ({len(similar_records)}, {n_clusters}, {n_uniques}, {len(ret)})"
)
return ret
def get_completions2(query_record, candidate_records):
l = len(candidate_records)
ret = []
n_clusters = 0
n_uniques = 0
print("2-way")
for i in range(l):
jmax = None
maxscore = 0
for j in range(i + 1, l):
pscore = find_similarity_score_features(
candidate_records[i][2], candidate_records[j][2]
)
if pscore > config.THRESHOLD1:
query_score_un = find_similarity_score_features_un(
candidate_records[i][2], candidate_records[j][2]
)
tmp_score = find_similarity_score_features_un(
candidate_records[i][0], candidate_records[j][0]
)
if (
tmp_score > config.THRESHOLD2 * query_score_un
and tmp_score > maxscore
):
jmax = j
maxscore = tmp_score
if jmax is not None:
pruned_record = prune_last_jd(
[query_record, candidate_records[jmax][0]], candidate_records[i][0]
)
ret.append((pruned_record, i, jmax))
print(ast_to_code(pruned_record["ast"]))
n_clusters += 1
# else:
# ret.append((candidate_records[i][0]['ast'], i, i))
# n_uniques += 1
ret2 = []
print("3-way")
for (record, i, j) in ret:
if i != j:
kmax = None
maxscore = 0
for k in range(l):
if k != i and k != j:
pscore = find_similarity_score_features_set(
[
candidate_records[i][2],
candidate_records[j][2],
candidate_records[k][2],
]
)
if pscore > config.THRESHOLD1:
query_score_un = find_similarity_score_features_set_un(
[
candidate_records[i][2],
candidate_records[j][2],
candidate_records[k][2],
]
)
tmp_score = find_similarity_score_features_set_un(
[
candidate_records[i][0],
candidate_records[j][0],
candidate_records[k][0],
]
)
if (
tmp_score > config.THRESHOLD2 * query_score_un
and tmp_score > maxscore
):
kmax = k
maxscore = tmp_score
if kmax is not None:
pruned_record = prune_last_jd(
[query_record, candidate_records[kmax][0]], record
)
n_clusters += 1
print(ast_to_code(pruned_record["ast"]))
ret2.append((pruned_record, i, j, kmax))
logging.info(
f"(# similars, #clusters, #singles, #completions) = ({len(candidate_records)}, {n_clusters}, {n_uniques}, {len(ret)})"
)
print(
f"(# similars, #clusters, #singles, #completions) = ({len(candidate_records)}, {n_clusters}, {n_uniques}, {len(ret)})"
)
return ret2 + ret
def get_completions3(query_record, candidate_records, top_n, threshold1, threshold2):
l = len(candidate_records)
ret = []
acc = []
for i in range(l):
ret.append([i])
changed = True
while changed:
ret2 = []
changed = False
for tuple in ret:
kmax = None
maxscore = 0
for k in range(tuple[-1] + 1, l):
record_list1 = []
record_list2 = []
for i in tuple:
record_list1.append(candidate_records[i][2])
record_list2.append(candidate_records[i][0])
record_list1.append(candidate_records[k][2])
record_list2.append(candidate_records[k][0])
qlen = sum(Counter(record_list1[0]["features"]).values())
iscore = find_similarity_score_features_set_un(record_list1)
pscore = iscore / qlen
# pscore = find_similarity_score_features_set(record_list1)
if pscore > threshold1:
query_score_un = find_similarity_score_features_set_un(record_list1)
tmp_score = find_similarity_score_features_set_un(record_list2)
if tmp_score > threshold2 * query_score_un and tmp_score > maxscore:
kmax = k
maxscore = tmp_score
if kmax is not None:
changed = True
ret2.append(tuple + [kmax])
acc = ret2 + acc
ret = ret2
ret = []
acc = sorted(acc, key=lambda t: t[0] * 1000 - len(t))
for i in range(len(acc)):
tuple = acc[i]
logging.info(f"Pruning {len(tuple)} {tuple}")
is_subset = False
s = set(tuple)
for j in reversed(range(i)):
if distance(Counter(tuple), Counter(acc[j])) > 0.5:
is_subset = True
if not is_subset:
print(f"Pruning {len(tuple)} {tuple}")
logging.info("recommending")
pruned_record = candidate_records[tuple[0]][0]
for j in range(1, len(tuple)):
pruned_record = prune_last_jd(
[query_record, candidate_records[tuple[j]][0]], pruned_record
)
ret.append([pruned_record, candidate_records[tuple[0]][0]] + tuple)
if len(ret) >= top_n:
return ret
return ret
def print_match_index(query_record, candidate_records):
ret = -1
i = 0
for (candidate_record, score, pruned_record, pruned_score) in candidate_records:
if query_record["index"] == candidate_record["index"]:
ret = i
i += 1
if ret < 0:
print("Failed to match original method.")
elif ret > 0:
print(f"Matched original method. Rank = {ret}")
else:
print(f"Matched original method perfectly.")
#### Interface methods ####
def find_indices_similar_to_features(
vectorizer, counter_matrix, feature_lists, num_similars, min_similarity_score
):
doc_counter_vector = vectorizer.transform(feature_lists)
len = my_similarity_score(doc_counter_vector, doc_counter_vector).flatten()[0]
cosine_similarities = my_similarity_score(
doc_counter_vector, counter_matrix
).flatten()
related_docs_indices = [
i
for i in cosine_similarities.argsort()[::-1]
if cosine_similarities[i] > min_similarity_score * len
][0:num_similars]
return [(j, cosine_similarities[j]) for j in related_docs_indices]
def find_similarity_score_features(record1, record2):
features_as_counter1 = Counter(record1["features"])
features_as_counter2 = Counter(record2["features"])
return distance(features_as_counter1, features_as_counter2)
def prune_last_jd(records, record2):
other_features = [Counter(record["features"]) for record in records]
ast = record2["ast"]
leaf_features, leaf_pair_features = collect_features_as_list(ast, False, True)
out_features = [None] * len(leaf_features)
current_features = Counter()
current_leaf_indices = []
for features1 in other_features:
score = distance(features1, current_features)
done = False
while not done:
max = score
max_idx = None
i = 0
for leaf_feature in leaf_features:
if leaf_feature is not None:
new_features = current_features + leaf_feature
# add_pair_features(new_features, leaf_pair_features, i, current_leaf_indices)
tmp = distance(features1, new_features)
if tmp > max:
max = tmp
max_idx = i
i += 1
if max_idx is not None:
score = max
out_features[max_idx] = leaf_features[max_idx]
current_features = current_features + leaf_features[max_idx]
# add_pair_features(current_features, leaf_pair_features, max_idx, current_leaf_indices)
current_leaf_indices.append(max_idx)
leaf_features[max_idx] = None
else:
done = True
global leaf_idx
leaf_idx = 0
pruned_ast = prune_ast(ast, out_features)[1]
pruned_features = collect_features_as_list(pruned_ast, False, False)[0]
return copy_record(record2, pruned_ast, pruned_features)
def ast_to_code(tree):
token_list = []
ast_to_code_aux(tree, token_list)
token_list.append("\n")
return "".join(token_list)
def ast_to_code_with_full_lines(tree, fulltree):
line_list = []
ast_to_code_collect_lines(tree, line_list)
token_list = []
ast_to_code_print_lines(fulltree, line_list, token_list)
token_list.append("\n")
return "".join(token_list)
def find_similar(
query_record,
records,
vectorizer,
counter_matrix,
num_similars,
min_similarity_score,
min_pruned_score,
):
print("Query features: ")
print_features(query_record["features"])
similars = find_indices_similar_to_features(
vectorizer,
counter_matrix,
[feature_list_to_doc(query_record)],
num_similars,
min_similarity_score,
)
candidate_records = []
for (idx, score) in similars:
pruned_record = prune_second_jd(query_record, records[idx])
pruned_score = find_similarity_score_features(query_record, pruned_record)
if pruned_score > min_pruned_score:
candidate_records.append((records[idx], score, pruned_record, pruned_score))
candidate_records = sorted(candidate_records, key=lambda v: v[3], reverse=True)
logging.info(f"# of similar snippets = {len(candidate_records)}")
return candidate_records
def cluster_and_intersect(
query_record, candidate_records, top_n, threshold1, threshold2
):
clustered_records = []
if len(candidate_records) > 0:
if config.CLUSTER:
clustered_records = get_completions3(
query_record, candidate_records, top_n, threshold1, threshold2
)
return clustered_records
def print_similar_and_completions(query_record, records, vectorizer, counter_matrix):
candidate_records = find_similar(
query_record,
records,
vectorizer,
counter_matrix,
config.NUM_SIMILARS,
config.MIN_SIMILARITY_SCORE,
config.MIN_PRUNED_SCORE,
)
print_match_index(query_record, candidate_records)
clustered_records = cluster_and_intersect(
query_record,
candidate_records,
config.TOP_N,
config.THRESHOLD1,
config.THRESHOLD2,
)
print(
f"################ query code ################ index = {query_record['index']}"
)
print(ast_to_code(query_record["ast"]))
if query_record["index"] >= 0:
print("---------------- extracted from ---------------")
print(ast_to_code(records[query_record["index"]]["ast"]))
for clustered_record in clustered_records:
print(
f"------------------- suggested code completion ------------------"
) # idxs = ({clustered_record[1:]}), score = {candidate_records[clustered_record[1]][3]}")
print(
ast_to_code_with_full_lines(
clustered_record[0]["ast"], clustered_record[1]["ast"]
)
)
if config.PRINT_SIMILAR:
j = 0
for (candidate_record, score, pruned_record, pruned_score) in candidate_records:
print(
f"idx = {j}:------------------- similar code ------------------ index = {candidate_record['index']}, score = {score}"
)
print(ast_to_code(candidate_record["ast"]))
print(
f"------------------- similar code (pruned) ------------------ score = {pruned_score}"
)
print(ast_to_code(pruned_record["ast"]))
j += 1
print("", flush=True)
def collect_features_as_list(ast, is_init, is_counter):
feature_list = []
leaf_features = []
leaf_pair_features = dict()
global leaf_idx
leaf_idx = 0
collect_features_aux(
ast,
feature_list,
[],
[],
dict(),
leaf_features,
leaf_pair_features,
is_init,
is_counter,
)
if is_counter:
return (leaf_features, leaf_pair_features)
else:
return (feature_list, None)
def read_and_featurize_record_file(rpath):
with open(rpath, "r") as inp:
for line in inp:
obj = json.loads(line)
obj["features"] = collect_features_as_list(obj["ast"], False, False)[0]
obj["index"] = -1
return obj
def test_record_at_index(idx):
record = get_record_part(records[idx])
if record != None:
print_similar_and_completions(record, records, vectorizer, counter_matrix)
def featurize_and_test_record(record_files, keywords):
set_tmp = None
record_final = None
for record_file in record_files:
record = read_and_featurize_record_file(record_file)
if record is not None:
record_final = record
if set_tmp is not None:
set_tmp = set_tmp & Counter(record["features"])
else:
set_tmp = Counter(record["features"])
# need to figure out how to merge asts as well
if set_tmp is None:
set_tmp = Counter()
for keyword in keywords:
set_tmp[vocab.get_index(keyword)] += 1
if record_final is None:
record_final = {"ast": None, "index": -1, "features": list(set_tmp.elements())}
else:
record_final["features"] = list(set_tmp.elements())
if len(record_final["features"]) > 0:
print_similar_and_completions(record_final, records, vectorizer, counter_matrix)
def test_all():
N = config.N_SAMPLES
(sampled_indices, sampled_records) = sample_n_records(records, N)
for k, record in enumerate(sampled_records):
print(f"{k}: ", end="")
print_similar_and_completions(record, records, vectorizer, counter_matrix)
def load_all(counter_path, asts_path):
with open(counter_path, "rb") as outf:
(vectorizer, counter_matrix) = pickle.load(outf)
records = read_all_records(asts_path)
logging.info("Read all records.")
return (vectorizer, counter_matrix, records)
def setup(records_file):
global config
global vocab
config = Config()
logging.basicConfig(level=logging.DEBUG)
random.seed(config.SEED)
os.makedirs(options.working_dir, exist_ok=True)
if records_file is None:
vocab = Vocab.load()
else:
vocab = Vocab.load(True)
featurize_records_file(
records_file, os.path.join(options.working_dir, config.FEATURES_FILE)
)
vocab.dump()
logging.info("Done featurizing.")
counter_vectorize(
os.path.join(options.working_dir, config.FEATURES_FILE),
os.path.join(options.working_dir, config.TFIDF_FILE),
)
logging.info("Done computing counter matrix.")
logging.basicConfig(level=logging.DEBUG)
options = parse_args()
setup(options.corpus)
(vectorizer, counter_matrix, records) = load_all(
os.path.join(options.working_dir, config.TFIDF_FILE),
os.path.join(options.working_dir, config.FEATURES_FILE),
)
if options.index_query is not None:
test_record_at_index(options.index_query)
elif len(options.file_query) > 0 or len(options.keywords) > 0:
featurize_and_test_record(options.file_query, options.keywords)
elif options.testall:
test_all()
| aroma-paper-artifacts-main | reference/src/main/python/similar.py |
#!/usr/bin/env python
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
archs = torch.cuda.get_arch_list()
archs = [arch[3:] for arch in archs if arch.startswith('sm_')]
print(";".join(archs), end='')
| baspacho-main | cmake/get_torch_cuda_archs.py |
#!/usr/bin/env python
"""TODO."""
from __future__ import print_function
import numbskull
from numbskull.numbskulltypes import *
import numpy as np
def factor(f, args):
"""THIS IS A DOCSTRING."""
if f == FUNC_IMPLY_NATURAL:
# TODO
pass
elif f == FUNC_OR:
return 1 if any(args) else -1
elif f == FUNC_EQUAL:
# TODO
pass
elif f == FUNC_AND or FUNC_ISTRUE:
return 1 if all(args) else -1
elif f == FUNC_LINEAR:
# TODO
pass
elif f == FUNC_RATIO:
# TODO
pass
elif f == FUNC_LOGICAL:
# TODO
pass
elif f == FUNC_IMPLY_MLN:
# TODO
pass
else:
raise NotImplemented("FACTOR " + str(f) + " not implemented.")
for (key, value) in numbskull.inference.FACTORS.items():
print(key)
variables = 2
if key == "DP_GEN_DEP_FIXING" or key == "DP_GEN_DEP_REINFORCING":
# These factor functions requires three vars to work
variables = 3
edges = variables
weight = np.zeros(1, Weight)
variable = np.zeros(variables, Variable)
factor = np.zeros(1, Factor)
fmap = np.zeros(edges, FactorToVar)
domain_mask = np.zeros(variables, np.bool)
weight[0]["isFixed"] = True
weight[0]["initialValue"] = 1
for i in range(variables):
variable[i]["isEvidence"] = 0
variable[i]["initialValue"] = 0
variable[i]["dataType"] = 0
variable[i]["cardinality"] = 2
factor[0]["factorFunction"] = value
factor[0]["weightId"] = 0
factor[0]["featureValue"] = 1
factor[0]["arity"] = variables
factor[0]["ftv_offset"] = 0
for i in range(variables):
fmap[i]["vid"] = i
ns = numbskull.NumbSkull(n_inference_epoch=100,
n_learning_epoch=100,
quiet=True)
ns.loadFactorGraph(weight, variable, factor, fmap, domain_mask, edges)
ns.learning()
ns.inference()
print(ns.factorGraphs[0].count)
| numbskull-master | loadfg.py |
#!/usr/bin/env python
"""TODO."""
from numbskull import numbskull
args = ['test',
'-l', '100',
'-i', '100',
'-t', '10',
'-s', '0.01',
'--regularization', '2',
'-r', '0.1',
'--quiet']
ns = numbskull.load(args)
ns.learning()
ns.inference()
print(ns.factorGraphs[0].count)
| numbskull-master | test.py |
#!/usr/bin/env python
"""This tests learning for labelling functions."""
from __future__ import print_function, absolute_import
import numpy as np
import numbskull
from numbskull.numbskulltypes import *
import math
def index_to_values(index, num_lf):
value = [0] * (1 + num_lf)
value[0] = index % 2
index = index // 2
for i in range(num_lf):
value[i + 1] = index % 3
index = index // 3
return value
def create_fg(prior, accuracy, abstain, copies):
"""
This creates copies of the following factor graph.
istrue (weight = prior)
|
y_i
/|\
/ | \
/ | \
/ | \
/ | \
LF_{i1} ... LF_{in}
( weight = ) ( weight = )
accuracy[1] accuracy[n]
Arguments:
prior: one floating-point value
weight: list of floating-point values
abstain: list of floating-point values (same number as weight)
copies: integer
Returns:
list of arguments that can be passed to numbskull.loadFactorGraph
"""
n = len(accuracy) # number of labelling functions
weights = 1 + n
variables = copies * (1 + n)
factors = copies * (1 + n)
edges = copies * (1 + 2 * n)
weight = np.zeros(weights, Weight)
variable = np.zeros(variables, Variable)
factor = np.zeros(factors, Factor)
fmap = np.zeros(edges, FactorToVar)
domain_mask = np.zeros(variables, np.bool)
states = 2 * 3 ** n
Z = np.zeros(states, np.float64)
for i in range(states):
value = index_to_values(i, n)
y = value[0]
lfs = value[1:]
Z[i] = prior * (2 * y - 1)
for (j, lf) in enumerate(lfs):
lf = lf - 1 # remap to standard -1, 0, 1
if lf != 0:
Z[i] += accuracy[j] * lf * (2 * y - 1)
# TODO: abstain not handled yet
Z[i] = math.exp(Z[i])
Z = np.cumsum(Z)
Z = Z / Z[-1]
print(Z)
for w in weight:
w["isFixed"] = False
w["initialValue"] = 1.0
weight[0]["initialValue"] = 0
for copy in range(copies):
r = np.random.rand()
index = np.argmax(Z >= r)
value = index_to_values(index, n)
y = value[0]
lf = value[1:]
# y variable
variable[copy * (1 + n)]["isEvidence"] = 0 # query
variable[copy * (1 + n)]["initialValue"] = 0 # Do not actually show y
variable[copy * (1 + n)]["dataType"] = 0 # not sparse
variable[copy * (1 + n)]["cardinality"] = 2
# labelling function variable
for i in range(n):
variable[copy * (1 + n) + 1 + i]["isEvidence"] = 1 # evidence
variable[copy * (1 + n) + 1 + i]["initialValue"] = lf[i]
variable[copy * (1 + n) + 1 + i]["dataType"] = 0 # not sparse
variable[copy * (1 + n) + 1 + i]["cardinality"] = 3
# Class prior
factor[copy * (1 + n)]["factorFunction"] = 18 # DP_GEN_CLASS_PRIOR
factor[copy * (1 + n)]["weightId"] = 0
factor[copy * (1 + n)]["featureValue"] = 1
factor[copy * (1 + n)]["arity"] = 1
factor[copy * (1 + n)]["ftv_offset"] = copy * (1 + 2 * n)
fmap[copy * (1 + 2 * n)]["vid"] = copy * (1 + n)
# Labelling function accuracy
for i in range(n):
factor_index = copy * (1 + n) + 1 + i
factor[factor_index]["factorFunction"] = 21 # DP_GEN_LF_ACCURACY
factor[factor_index]["weightId"] = i + 1
factor[factor_index]["featureValue"] = 1
factor[factor_index]["arity"] = 2
factor[factor_index]["ftv_offset"] = copy * (1 + 2 * n) + 1 + 2 * i
fmap_index = copy * (1 + 2 * n) + 1 + 2 * i
fmap[fmap_index]["vid"] = copy * (1 + n) # y
fmap[fmap_index + 1]["vid"] = copy * (1 + n) + i + 1 # LF i
return weight, variable, factor, fmap, domain_mask, edges
learn = 100
ns = numbskull.NumbSkull(n_inference_epoch=100,
n_learning_epoch=learn,
quiet=True,
learn_non_evidence=True,
stepsize=0.0001,
burn_in=100,
decay=0.001 ** (1.0 / learn),
regularization=1,
reg_param=0.01)
prior = 0
accuracy = [1, 0.5]
abstain = [0, 0, 0]
copies = 10
fg = create_fg(prior, accuracy, abstain, copies)
print("weight")
print(fg[0])
print()
print("variable")
print(fg[1])
print()
print("factor")
print(fg[2])
print()
print("fmap")
print(fg[3])
print()
print("domain_mask")
print(fg[4])
print()
print("edges")
print(fg[5])
print()
ns.loadFactorGraph(*fg)
print(ns.factorGraphs[0].weight_value)
ns.learning()
print(ns.factorGraphs[0].weight_value)
| numbskull-master | test_lf_learning.py |
"""For pip."""
from setuptools import setup, find_packages
exec(open('numbskull/version.py').read())
setup(
name='numbskull',
version=__version__,
description='sample away',
packages=find_packages(),
install_requires=[
'future',
'futures; python_version == "2.7"',
],
entry_points={
'console_scripts': [
'numbskull = numbskull.numbskull:main',
],
},
)
| numbskull-master | setup.py |
#!/usr/bin/env python
from __future__ import print_function
from distutils.dir_util import mkpath
import sys
import os
import shutil
import subprocess
def generate(directory, degree, copies):
print("Generating " + directory + "...")
sys.stdout.flush()
try:
shutil.rmtree(directory)
except:
# exception can be thrown if dir does not exist
pass
mkpath(directory)
# app.ddlog
f = open(directory + "/app.ddlog", "w")
f.write("p? (\n"
" prop_id bigint\n"
").\n"
"\n"
"voter_voted_for (\n"
" voter_id bigint,\n"
" prop_id bigint\n"
").\n"
"\n")
for i in range(degree):
f.write("v" + str(i) + "? (\n"
" prop_id bigint\n"
").\n"
"\n")
f.write("@name(\"and_factor\")\n"
"@weight(1.0)\n"
"p(p)")
for i in range(degree):
f.write(" ^ v" + str(i) + "(v)")
f.write(" :-\n"
" voter_voted_for(v, p).\n"
"\n"
"@weight(0.0)\n"
"v0(v) :- FALSE.\n")
f.close()
# db.url
f = open(directory + "/db.url", "w")
f.write("postgresql://[email protected]:1432/intro_" +
str(degree) + "_" + str(copies))
f.close()
# deepdive.conf
f = open(directory + "/deepdive.conf", "w")
f.write("deepdive.calibration.holdout_fraction:0.25\n"
"deepdive.sampler.sampler_args: \"-l 0 -i 0 --alpha 0.01\"\n")
f.close()
# simple.costmodel.txt
f = open(directory + "/simple.costmodel.txt", "w")
f.write("v_A 5.0\n"
"v_B 8.0\n"
"v_C 0.0\n"
"v_D 1.0\n"
"v_Au 6.0\n"
"v_Bu 9.0\n"
"v_Cu 1.0\n"
"v_Du 2.0\n"
"w_C 5.0\n"
"w_D 5.0\n"
"w_Cu 5.0\n"
"w_Du 5.0\n"
"f_A 0.0\n"
"f_C 0.0\n"
"f_D 1.0\n"
"f_E 1.0\n"
"f_G 2.0\n"
"f_Cu 0.0\n"
"f_Du 0.0\n"
"f_Eu 0.0\n"
"f_Gum 1.0\n"
"f_Guw 1.0\n"
"f_Gumw 0.0\n"
"f_H 1000.0\n")
f.close()
mkpath(directory + "/input")
f = open(directory + "/input/p.tsv", "w")
f.write("0\t\\N\n")
f.close()
f = open(directory + "/input/voter_voted_for.tsv", "w")
index = 0
for i in range(copies):
f.write(str(i) + "\t0\n")
f.close()
f = open(directory + "/input/v.tsv", "w")
for i in range(copies):
f.write(str(i) + "\t\\N\n")
f.close()
for i in range(degree):
os.symlink(directory + "/input/v.tsv",
directory + "/input/v" + str(i) + ".tsv")
cmd = ["deepdive", "do", "all"]
subprocess.call(cmd, cwd=directory)
if __name__ == "__main__":
n_var = 12600
for degree in [1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
copies = n_var // degree
generate("/dfs/scratch0/bryanhe/intro_" +
str(copies) + "_" +
str(degree) + "/", degree, copies)
| numbskull-master | experiments/intro/generate.py |
#!/usr/bin/env python
from __future__ import print_function
copies = 10
f = open("input/p.tsv", "w")
f.write("0\t\\N\n")
f.close()
f = open("input/voter_voted_for.tsv", "w")
index = 0
for i in range(copies):
f.write(str(i) + "\t0\n")
f.close()
f = open("input/v.tsv", "w")
for i in range(copies):
f.write(str(i) + "\t\\N\n")
f.close()
| numbskull-master | experiments/partitionable_and_vote/generate.py |
#!/usr/bin/env python
from __future__ import print_function
from distutils.dir_util import mkpath
import sys
import os
import shutil
import subprocess
import time
child_processes = {}
def generate(directory, propositions, voters_per_proposition, copies,
FNULL):
print("Generating " + directory + "...")
sys.stdout.flush()
mkpath(directory)
# app.ddlog
f = open(directory + "/app.ddlog", "w")
f.write("p? (\n"
" prop_id bigint\n"
").\n"
"\n"
"voter_voted_for (\n"
" voter_id bigint,\n"
" prop_id bigint\n"
").\n"
"\n")
for i in range(voters_per_proposition):
f.write("v" + str(i) + "? (\n"
" prop_id bigint\n"
").\n"
"\n")
f.write("@name(\"and_factor\")\n"
"@weight(1.0)\n"
"p(p)")
for i in range(voters_per_proposition):
f.write(" ^ v" + str(i) + "(v)")
f.write(" :-\n"
" voter_voted_for(v, p).\n"
"\n"
"@weight(0.0)\n"
"v0(v) :- FALSE.\n")
f.close()
# db.url
f = open(directory + "/db.url", "w")
f.write("postgresql://[email protected]:1432/tradeoff_" +
str(propositions) + "_" +
str(voters_per_proposition) + "_" +
str(copies))
f.close()
# deepdive.conf
f = open(directory + "/deepdive.conf", "w")
f.write("deepdive.calibration.holdout_fraction:0.25\n"
"deepdive.sampler.sampler_args: \"-l 0 -i 0 --alpha 0.01\"\n")
f.close()
# simple.costmodel.txt
f = open(directory + "/simple.costmodel.txt", "w")
f.write("v_A 5.0\n"
"v_B 8.0\n"
"v_C 0.0\n"
"v_D 1.0\n"
"v_Au 6.0\n"
"v_Bu 9.0\n"
"v_Cu 1.0\n"
"v_Du 2.0\n"
"w_C 5.0\n"
"w_D 5.0\n"
"w_Cu 5.0\n"
"w_Du 5.0\n"
"f_A 0.0\n"
"f_C 0.0\n"
"f_D 1.0\n"
"f_E 1.0\n"
"f_G 2.0\n"
"f_Cu 0.0\n"
"f_Du 0.0\n"
"f_Eu 0.0\n"
"f_Gum 1.0\n"
"f_Guw 1.0\n"
"f_Gumw 0.0\n"
"f_H 1000.0\n")
f.close()
mkpath(directory + "/input")
f = open(directory + "/input/p.tsv", "w")
for i in range(propositions):
f.write(str(i) + "\t\\N\n")
f.close()
f = open(directory + "/input/voter_voted_for.tsv", "w")
index = 0
for i in range(copies):
for j in range(propositions):
f.write(str(i) + "\t" + str(j) + "\n")
f.close()
f = open(directory + "/input/v.tsv", "w")
for i in range(copies):
f.write(str(i) + "\t\\N\n")
f.close()
for i in range(voters_per_proposition):
try:
os.symlink(directory + "/input/v.tsv",
directory + "/input/v" + str(i) + ".tsv")
except:
pass
cmd = ["deepdive", "do", "all"]
child_processes[directory] = subprocess.Popen(cmd, cwd=directory,
stdout=FNULL)
if __name__ == "__main__":
FNULL = open(os.devnull, 'w')
n_var = 1260000
propositions = 10
# for voters_per_proposition in [2, 5, 10, 20, 50]:
for voters_per_proposition in [1, 2, 3, 4, 5, 10,
15, 20, 25, 30, 35, 40, 45, 50]:
copies = n_var // voters_per_proposition
generate("/dfs/scratch0/bryanhe/tradeoff_" +
str(propositions) + "_" +
str(copies) + "_" +
str(voters_per_proposition) + "/",
propositions, voters_per_proposition, copies, FNULL)
print(80 * "*")
done = False
while not done:
done = True
for i in child_processes:
status = child_processes[i].poll()
print(str(i) + ":\t" + str(status))
if status is None:
done = False
print()
time.sleep(1)
FNULL.close()
| numbskull-master | experiments/tradeoff/generate.py |
# -*- coding: utf-8 -*-
#
# Numbskull documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 26 17:55:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
"""TODO."""
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Numbskull'
copyright = u'2016, Bryan He, Theodoros Rekatsinas'
author = u'Bryan He, Theodoros Rekatsinas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0'
# The full version, including alpha/beta/rc tags.
release = u'0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Numbskull v0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '../../fig/numbskull.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
html_favicon = "../../fig/mario.gif"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Numbskulldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Numbskull.tex', u'Numbskull Documentation',
u'Bryan He, Theodoros Rekatsinas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'numbskull', u'Numbskull Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Numbskull', u'Numbskull Documentation',
author, 'Numbskull', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Allow __init__ to be autodoc'ed
def skip(app, what, name, obj, skip, options):
"""TODO."""
if name == "__init__":
return False
return skip
def setup(app):
"""TODO."""
app.connect("autodoc-skip-member", skip)
numpydoc_show_class_members = False
| numbskull-master | docs/source/conf.py |
#!/usr/bin/env python
"""Script to run distributed experiments."""
import numbskull_master
import sys
if __name__ == "__main__":
n_var = 1260000
machines = 4
threads_per_machine = 1
learning_epochs = 10
inference_epochs = 10
f = open("intro_degree.dat", "w")
f.write("degree\tcopies\tmaster_l\tmaster_i\t" +
"a_l\ta_i\tb_l\tb_i\tbu_l\tbu_i\tc_l\tc_i\tcu_l\tcu_i\n")
f.flush()
for degree in [1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
copies = n_var // degree
application_dir = "/dfs/scratch0/bryanhe/intro_" + \
str(copies) + "_" + str(degree) + "/"
print(application_dir)
sys.stdout.flush()
f.write(str(degree) + "\t" +
str(copies) + "\t")
f.flush()
# Master
(ns, res) = numbskull_master.main(application_dir, 0,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "--ppa", False,
"")
f.write(str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\t")
f.flush()
# A
(ns, res) = numbskull_master.main(application_dir, machines,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "--ppa", False,
"(1)")
f.write(str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\t")
f.flush()
# B
(ns, res) = numbskull_master.main(application_dir, machines,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "--ppb", False,
"(1)")
f.write(str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\t")
f.flush()
# Bu
(ns, res) = numbskull_master.main(application_dir, machines,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "--ppb", True,
"(1)")
f.write(str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\t")
f.flush()
# C
(ns, res) = numbskull_master.main(application_dir, machines,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "--ppc", False,
"(1)")
f.write(str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\t")
f.flush()
# Cu
(ns, res) = numbskull_master.main(application_dir, machines,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "--ppc", True,
"(1)")
f.write(str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\n")
f.flush()
f.close()
| numbskull-master | salt/src/experiment_intro_degree.py |
#!/usr/bin/env python
"""Script to run distributed experiments."""
import numbskull_master
if __name__ == "__main__":
application_dir = "/dfs/scratch0/bryanhe/congress6/"
machines = 1
threads_per_machine = 1
learning_epochs = 100
inference_epochs = 100
f = open("congress.dat", "w")
for m in range(0, machines + 1):
partition_type = "" if m == 0 else "(1)"
(ns, res) = numbskull_master.main(application_dir, m,
threads_per_machine,
learning_epochs, inference_epochs,
"sp", "a", False,
partition_type)
f.write(str(m) + "\t" +
str(res["learning_time"]) + "\t" +
str(res["inference_time"]) + "\n")
f.flush()
| numbskull-master | salt/src/experiment.py |
"""TODO."""
from __future__ import print_function
import numbskull
from numbskull.numbskulltypes import *
import numbskull.inference
import numpy as np
import codecs
import numba
import time
import networkx as nx
import nxmetis
# Commands from master to minions (Tags)
ASSIGN_ID = 'ASSIGN_ID'
INIT_NS = 'INIT_NS'
LOAD_FG = 'LOAD_FG'
SYNC_MAPPING = 'SYNC_MAPPING'
LEARN = 'LEARN'
INFER = 'INFER'
# Responses from minions to master (Tags)
ASSIGN_ID_RES = 'ASSIGN_ID_RES'
INIT_NS_RES = 'INIT_NS_RES'
LOAD_FG_RES = 'LOAD_FG_RES'
SYNC_MAPPING_RES = 'SYNC_MAPPING_RES'
LEARN_RES = 'LEARN_RES'
INFER_RES = 'INFER_RES'
# TODO These should be in some sort of util package
def get_views(cur):
"""TODO."""
cur.execute("SELECT table_name "
"FROM INFORMATION_SCHEMA.views "
"WHERE table_name LIKE '%_sharding' "
" AND table_schema = ANY (current_schemas(false))")
view = []
while True:
temp = cur.fetchmany()
if temp == []:
break
for i in temp:
assert(len(i) == 1)
view += i
factor_view = []
variable_view = []
weight_view = []
for v in view:
is_f = ("_factors_" in v)
is_v = ("_variables_" in v)
is_w = ("_weights_" in v)
assert((is_f + is_v + is_w) == 1)
if is_f:
factor_view.append(v)
if is_v:
variable_view.append(v)
if is_w:
weight_view.append(v)
return (factor_view, variable_view, weight_view)
@numba.jit(nopython=True, cache=True, nogil=True)
def get_factors_helper(row, fid, ff, factor, factor_pt, factor_ufo, fmap,
factor_index, fmap_index):
"""TODO."""
for i in row:
fid[factor_index] = i[-1]
factor[factor_index]["factorFunction"] = ff
factor[factor_index]["weightId"] = i[-5]
factor[factor_index]["featureValue"] = i[-4]
factor_pt[factor_index] = i[-3]
factor_ufo[factor_index] = (i[-2] == 117) # 117 == 'u'
factor[factor_index]["arity"] = len(i) - 5
if factor_index == 0:
factor[factor_index]["ftv_offset"] = 0
else:
factor[factor_index]["ftv_offset"] = \
factor[factor_index - 1]["ftv_offset"] + \
factor[factor_index - 1]["arity"]
factor_index += 1
for j in i[:-5]:
fmap[fmap_index]["vid"] = j
# TODO: how to actually get categorical info?
fmap[fmap_index]["dense_equal_to"] = 0
fmap_index += 1
return factor_index, fmap_index
def get_factors(cur, views, sql_filter="True"):
"""TODO."""
factors = 0
edges = 0
# This operation is for counting rows, and getting rows
op_template = "SELECT {cmd} FROM {table_name} WHERE {filter}"
# This operation is for counting columns in a table
count_template = "SELECT COUNT(*) " \
"FROM INFORMATION_SCHEMA.COLUMNS " \
"WHERE table_schema = 'public' " \
"AND table_name = '{table_name}'"
# This operation is for getting columns in a table
names_template = "SELECT column_name " \
"FROM INFORMATION_SCHEMA.COLUMNS " \
"WHERE table_schema = 'public' " \
"AND table_name = '{table_name}' " \
"ORDER BY ordinal_position"
# Pre-count number of factors and edges
# TODO: can this step be avoided?
min_fid = np.zeros(len(views), np.int64)
max_fid = np.zeros(len(views), np.int64)
for (i, table) in enumerate(views):
op = op_template.format(cmd="COUNT(*), MIN(fid), MAX(fid)",
table_name=table,
filter=sql_filter)
cur.execute(op)
info = cur.fetchone()
f = info[0] # number of factors in this table
min_fid[i] = info[1] if info[1] is not None else 0
max_fid[i] = info[2] if info[2] is not None else 0
count = count_template.format(table_name=table)
cur.execute(count)
v = cur.fetchone()[0] - 4 # number of vars used by these factors
factors += f
edges += f * v
perm = min_fid.argsort()
min_fid = min_fid[perm]
max_fid = max_fid[perm]
assert(all(max_fid[i] <= max_fid[i + 1] for i in xrange(len(max_fid) - 1)))
# TODO: cannot directly apply perm to views
# (standard array, not numpy array)
views_temp = [None for i in range(len(views))]
for i in range(len(views)):
views_temp[i] = views[perm[i]]
views = views_temp
fid = np.zeros(factors, np.int64)
factor = np.zeros(factors, Factor)
factor_pt = np.zeros(factors, np.int8) # partition type
factor_ufo = np.zeros(factors, np.bool) # unary factor optimization
fmap = np.zeros(edges, FactorToVar)
factor_index = 0
fmap_index = 0
for v in views:
# Find factor function
ff = -1
for (key, value) in numbskull.inference.FACTORS.items():
if ("_" + key + "_").lower() in v:
assert(ff == -1)
ff = value
# TODO: assume istrue if not found?
if ff == -1:
ff = numbskull.inference.FUNC_ISTRUE
names_op = names_template.format(table_name=v)
cur.execute(names_op)
name = cur.fetchall()
for i in range(len(name)):
assert(len(name[i]) == 1)
name[i] = name[i][0]
assert(name[-4] == "weight_id")
assert(name[-3] == "feature_value")
assert(name[-2] == "partition_key")
assert(name[-1] == "fid")
cmd = (", ".join(['"' + i + '"' for i in name[:-2]]) +
", ASCII(LEFT(partition_key, 1))" + # partition key
", ASCII(SUBSTR(partition_key, 2, 1))" + # unary factor opt
", fid")
# TODO: should actually put the ORDER BY fid in its own var
op = op_template.format(cmd=cmd, table_name=v,
filter=sql_filter + "ORDER BY fid")
cur.execute(op)
while True:
row = cur.fetchmany(10000)
if row == []:
break
(factor_index, fmap_index) = \
get_factors_helper(row, fid, ff, factor, factor_pt, factor_ufo,
fmap, factor_index, fmap_index)
return fid, factor, factor_pt.view('c'), factor_ufo, fmap, edges
@numba.jit(nopython=True, cache=True, nogil=True)
def get_variables_helper(row, vid, variable, var_pt, var_ufo, index):
"""TODO."""
for v in row:
vid[index] = v[0]
variable[index]["isEvidence"] = v[1]
variable[index]["initialValue"] = v[2]
variable[index]["dataType"] = v[3]
variable[index]["cardinality"] = v[4]
var_pt[index] = v[5]
var_ufo[index] = (v[6] == 117) # 117 == 'u'
index += 1
return index
def get_variables(cur, views, sql_filter="True"):
"""TODO."""
op_template = "SELECT {cmd} FROM {table_name} " \
"WHERE {filter}"
# Obtain count of variables
# TODO: is there a way to do this together with next part?
# (one query per table)
n = 0
for v in views:
op = op_template.format(cmd="COUNT(*)", table_name=v,
filter=sql_filter)
cur.execute(op)
n += cur.fetchone()[0] # number of factors in this table
vid = np.zeros(n, np.int64)
variable = np.zeros(n, Variable)
var_pt = np.zeros(n, np.int8) # partition type
var_ufo = np.zeros(n, np.bool) # unary factor opt
index = 0
for v in views:
cmd = ("vid, variable_role, init_value, variable_type, cardinality, " +
"ASCII(LEFT(partition_key, 1)), " + # partition key
"ASCII(SUBSTR(partition_key, 2, 1))") # unary factor opt
op = op_template.format(cmd=cmd, table_name=v, filter=sql_filter)
cur.execute(op)
while True:
row = cur.fetchmany(10000)
if row == []:
break
index = get_variables_helper(row, vid, variable,
var_pt, var_ufo, index)
perm = vid.argsort()
vid = vid[perm]
variable = variable[perm]
var_pt = var_pt[perm]
var_ufo = var_ufo[perm]
return vid, variable, var_pt.view('c'), var_ufo
@numba.jit(nopython=True, cache=True, nogil=True)
def get_weights_helper(row, weight):
"""TODO."""
for w in row:
wid = w[0]
weight[wid]["isFixed"] = w[1]
weight[wid]["initialValue"] = w[2]
def get_weights(cur, views, sql_filter="True"):
"""TODO."""
op_template = "SELECT {cmd} FROM {table_name} " \
"WHERE {filter}"
# Obtain count of variables
# TODO: is there a way to do this together with next part?
# (one query per table)
n = 0
for v in views:
op = op_template.format(cmd="COUNT(*)", table_name=v,
filter=sql_filter)
cur.execute(op)
n += cur.fetchone()[0] # number of factors in this table
weight = np.zeros(n, Weight)
index = 0
for v in views:
op = op_template.format(cmd="*", table_name=v, filter=sql_filter)
cur.execute(op)
while True:
row = cur.fetchmany(10000)
if row == []:
break
index = get_weights_helper(row, weight)
return weight
def read_factor_views(cur, views, sql_filter="True"):
"""TODO."""
data = []
op_template = "SELECT * FROM {table_name} " \
"WHERE {filter}"
for v in views:
# Find factor function
ff = -1
for (key, value) in numbskull.inference.FACTORS.items():
if ("_" + key + "_").lower() in v:
assert(ff == -1)
ff = value
# TODO: assume istrue if not found?
# assert(ff != -1)
if ff == -1:
ff = numbskull.inference.FUNC_ISTRUE
op = op_template.format(table_name=v, filter=sql_filter)
cur.execute(op)
while True:
temp = cur.fetchmany()
if temp == []:
break
for i in temp:
data.append((i[:-3], i[-3], i[-2], i[-1], ff))
return data
# views for variables and factors
def read_views(cur, views, sql_filter="True"):
"""TODO."""
data = []
op_template = "SELECT * FROM {table_name} " \
"WHERE {filter}"
for v in views:
op = op_template.format(table_name=v, filter=sql_filter)
cur.execute(op)
while True:
temp = cur.fetchmany()
if temp == []:
break
data += temp
return data
@numba.jit(nopython=True, cache=True, nogil=True)
def inverse_map(forward, index):
"""TODO."""
# TODO: should probably also check that nothing is duplicated?
ans = np.searchsorted(forward, index)
assert(forward[ans] == index)
return ans
@numba.jit(nopython=True, cache=True, nogil=True)
def variable_exists(forward, index):
"""TODO."""
ans = np.searchsorted(forward, index)
return ans < len(forward) and forward[ans] == index
@numba.jit(nopython=True, cache=True, nogil=True)
def remap_fmap(fmap, vid):
"""TODO."""
for i in range(len(fmap)):
fmap[i]["vid"] = inverse_map(vid, fmap[i]["vid"])
@numba.jit(nopython=True, cache=True, nogil=True)
def remap_ufo(ufo, vid):
"""TODO."""
for i in range(len(ufo)):
ufo[i]["vid"] = inverse_map(vid, ufo[i]["vid"])
def get_fg_data(cur, filt, ismaster):
"""TODO."""
print("***GET_FG_DATA***")
# Get names of views
time1 = time.time()
(factor_view, variable_view, weight_view) = get_views(cur)
time2 = time.time()
print("get_views: " + str(time2 - time1))
# Load factors
(fid, factor, factor_pt, factor_ufo, fmap, edges) = \
get_factors(cur, factor_view, filt)
time1 = time2
time2 = time.time()
print("get_factors: " + str(time2 - time1))
# Load variables
(vid, variable, var_pt, var_ufo) = get_variables(cur, variable_view, filt)
time1 = time2
time2 = time.time()
print("get_variables: " + str(time2 - time1))
print("factor: ", factor)
print("factor_pt: ", factor_pt)
print("factor_ufo: ", factor_ufo)
print("fmap: ", fmap)
print("edges: ", edges)
print("vid: ", vid)
print("variable: ", variable)
print("var_pt: ", var_pt)
print("var_ufo: ", var_ufo)
print()
(fmap, vid, variable, var_pt, var_ufo,
pf_list, pf_var_begin, pf_ufo_var_list) = \
process_pf(factor, factor_pt, factor_ufo, fmap, fid,
vid, variable, var_pt, var_ufo, ismaster)
time1 = time2
time2 = time.time()
print("process_pf: " + str(time2 - time1))
print("factor: ", factor)
print("factor_pt: ", factor_pt)
print("factor_ufo: ", factor_ufo)
print("fmap: ", fmap)
print("edges: ", edges)
print("vid: ", vid)
print("variable: ", variable)
print("var_pt: ", var_pt)
print("var_ufo: ", var_ufo)
print()
print("pf_list: ", pf_list)
print("pf_var_begin: ", pf_var_begin)
print("pf_ufo_var_list: ", pf_ufo_var_list)
(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt,
var_ufo, ufo_send, ufo_recv, ufo_start, ufo_map, ufo_var_begin) = \
process_ufo(factor, factor_pt, factor_ufo, fmap, vid, variable,
var_pt, var_ufo, pf_ufo_var_list, pf_var_begin)
time1 = time2
time2 = time.time()
print("process_ufo: " + str(time2 - time1))
print("factor: ", factor)
print("factor_pt: ", factor_pt)
print("factor_ufo: ", factor_ufo)
print("fmap: ", fmap)
print("edges: ", edges)
print("vid: ", vid)
print("variable: ", variable)
print("var_pt: ", var_pt)
print("var_ufo: ", var_ufo)
print()
# remap factor to variable
remap_fmap(fmap, vid)
time1 = time2
time2 = time.time()
print("remap fmap: " + str(time2 - time1))
# Load weight info
# No filter since weights do not have a partition id
weight = get_weights(cur, weight_view)
time1 = time2
time2 = time.time()
print("get_weight: " + str(time2 - time1))
domain_mask = np.full(len(variable), True, np.bool)
time1 = time2
time2 = time.time()
print("allocate domain_mask: " + str(time2 - time1))
(factors_to_skip, pf_to_send) = \
compute_skipped_factors(factor, factor_pt.view(np.int8), factor_ufo,
fmap, fid, vid, variable,
var_pt.view(np.int8),
var_ufo, pf_list, ismaster)
return (weight, variable, factor, fmap, domain_mask, edges, var_pt,
factor_pt, var_ufo, factor_ufo, fid, vid, ufo_send, ufo_recv,
ufo_start, ufo_map, ufo_var_begin, pf_list, factors_to_skip,
pf_to_send)
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_skipped_factors(factor, factor_pt, factor_ufo, fmap, fid, vid,
variable, var_pt, var_ufo, pf_list, ismaster):
n_pf_send = 0
n_pf_skip = 0
# n_ufo_skip = 0
# UFO's never have to be skipped?
# minion will not generate extra factors for UFO
# and if master has an extra factor, it will always actually be
# using the ufo (and may have partial factors for minions)
for i in range(len(factor)):
if factor_pt[i] == 71: # "G"
if not factor_ufo[i] or ismaster:
n_pf_send += 1
if factor_ufo[i] and ismaster:
n_pf_skip += 1
elif factor_pt[i] == 68:
if not ismaster and not factor_ufo[i]:
n_pf_send += 1
# factors_to_skip = np.empty(n_pf_send + n_ufo_skip, np.int64)
factors_to_skip = np.empty(n_pf_skip, np.int64)
pf_to_send = np.empty(n_pf_send, np.int64)
n_pf_send = 0
n_pf_skip = 0
for i in range(len(factor)):
if factor_pt[i] == 71: # "G"
if not factor_ufo[i] or ismaster:
pf_to_send[n_pf_send] = i
n_pf_send += 1
if factor_ufo[i] and ismaster:
factors_to_skip[n_pf_skip] = i
n_pf_skip += 1
elif factor_pt[i] == 68:
if not ismaster and not factor_ufo[i]:
pf_to_send[n_pf_send] = i
n_pf_send += 1
return factors_to_skip, pf_to_send
def serialize(array):
"""TODO."""
return array.tolist()
# try:
# return array.tobytes().decode('utf16').encode('utf8')
# except:
# return array.tobytes()
def deserialize(array, dtype):
"""TODO."""
try:
return np.array(array, dtype=dtype)
except:
# For UnaryFactorOpt and other complicated dtypes
# Salt converts list of tuples into list of lists,
# which breaks the original version
return np.array([tuple(i) for i in array], dtype=dtype)
# try:
# ar = array.decode('utf8').encode('utf16').lstrip(codecs.BOM_UTF16)
# return np.fromstring(ar, dtype=dtype)
# except:
# return np.fromstring(array, dtype=dtype)
def find_connected_components(conn, cur):
"""TODO."""
# Open a cursor to perform database operations
(factor_view, variable_view, weight_view) = get_views(cur)
(factor, factor_pt, factor_ufo, fmap, edges) = \
get_factors(cur, factor_view)
hyperedges = []
for f in factor:
newedge = []
for i in range(f['ftv_offset'], f['ftv_offset'] + f['arity']):
newedge.append(fmap[i]['vid'])
hyperedges.append(newedge)
G = nx.Graph()
for e in hyperedges:
for i in range(len(e)):
for j in range(i + 1, len(e)):
newedge = (e[i], e[j])
G.add_edge(*e)
cc = nx.connected_components(G)
try:
cur.execute("CREATE TABLE variable_to_cc "
"(dd_id bigint, cc_id bigint);")
except:
conn.rollback()
cur.execute("TRUNCATE variable_to_cc;")
rows = []
cc_id = 0
for c in cc:
for node in c:
rows.append([node, cc_id])
cc_id += 1
dataText = ','.join(cur.mogrify('(%s,%s)', row) for row in rows)
try:
cur.execute("INSERT INTO variable_to_cc VALUES " + dataText)
if cc_id > 1:
cur.execute("CREATE INDEX dd_cc ON variable_to_cc (dd_id);")
conn.commit()
G.clear()
return True
except:
conn.rollback()
G.clear()
return False
def find_metis_parts(conn, cur, parts):
"""TODO"""
# Open a cursor to perform database operations
(factor_view, variable_view, weight_view) = get_views(cur)
# Obtain graph
(factor, factor_pt, factor_ufo, fmap, edges) = \
get_factors(cur, factor_view)
hyperedges = []
for f in factor:
newedge = []
for i in range(f['ftv_offset'], f['ftv_offset'] + f['arity']):
newedge.append(fmap[i]['vid'])
hyperedges.append(newedge)
G = nx.Graph()
for e in hyperedges:
for i in range(len(e)):
for j in range(i + 1, len(e)):
newedge = (e[i], e[j])
G.add_edge(*e)
# Run metis to obtain partitioning
metis_options = \
nxmetis.MetisOptions(objtype=nxmetis.enums.MetisObjType.vol)
(cost, partitions) = \
nxmetis.partition(G, parts, options=metis_options)
print(80 * "*")
print(cost)
print(partitions)
print(80 * "*")
# Find nodes to master
master_variables = set([])
# Get all edges
cut_edges = set(G.edges())
for p in partitions:
H = G.subgraph(p)
cut_edges -= set(H.edges())
print(H.edges())
H.clear()
for edge in cut_edges:
n1, n2 = edge
master_variables.add(n1)
master_variables.add(n2)
# Store parition in DB
try:
cur.execute("CREATE TABLE variable_to_cc(dd_id bigint, cc_id bigint);")
except:
conn.rollback()
cur.execute("TRUNCATE variable_to_cc;")
rows = []
# Output master variables
for node in master_variables:
rows.append([node, -1])
print(master_variables)
# Output minion variables
pid = 0
for p in partitions:
only_master = True
for node in p:
if node not in master_variables:
only_master = False
rows.append([node, pid])
if not only_master:
pid += 1
print(rows)
dataText = ','.join(cur.mogrify('(%s,%s)', row) for row in rows)
print(dataText)
try:
cur.execute("INSERT INTO variable_to_cc VALUES " + dataText)
if pid > 1:
cur.execute("CREATE INDEX dd_cc ON variable_to_cc (dd_id);")
conn.commit()
G.clear()
return True
except:
conn.rollback()
G.clear()
return False
@numba.jit(cache=True, nogil=True)
def remove_noop(factor, factor_pt, factor_ufo, fmap):
factor_des, fmap_des = \
remove_noop_helper(factor, factor_pt, factor_ufo, fmap)
factor = np.resize(factor, factor_des)
factor_pt = np.resize(factor_pt, factor_des)
factor_ufo = np.resize(factor_ufo, factor_des)
fmap = np.resize(fmap, fmap_des)
return factor, factor_pt, factor_ufo, fmap, fmap_des
@numba.jit(nopython=True, cache=True, nogil=True)
def remove_noop_helper(factor, factor_pt, factor_ufo, fmap):
factor_des = 0
fmap_des = 0
ftv_offset = 0
for factor_src in range(len(factor)):
if factor[factor_src]["factorFunction"] == \
numbskull.inference.FUNC_NOOP:
continue
factor[factor_des] = factor[factor_src]
factor_pt[factor_des] = factor_pt[factor_src]
factor_ufo[factor_des] = factor_ufo[factor_src]
factor[factor_des]["ftv_offset"] = ftv_offset
ftv_offset += factor[factor_des]["arity"]
for i in range(factor[factor_src]["arity"]):
fmap[fmap_des + i] = fmap[factor[factor_src]["ftv_offset"] + i]
fmap_des += factor[factor_des]["arity"]
factor_des += 1
return factor_des, fmap_des
@numba.jit(nopython=True, cache=True, nogil=True)
def find_ufo(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt,
var_ufo, pf_ufo_var_list, pf_var_begin):
# Count number of factors with UFO
n_ufo_recv = 0 # Number of ufo to receive
n_ufo_send = 0 # Number of ufo to send
for i in range(len(factor)):
if factor_ufo[i]:
exist = 0 # number of vars manifested on this machine
for j in range(factor[i]["arity"]):
vid1 = fmap[factor[i]["ftv_offset"] + j]["vid"]
local_vid = loose_inverse_map(vid, vid1)
exist += ((local_vid != -1) and
(var_pt[local_vid] != 80 or var_ufo[local_vid]))
# (local_vid != -1) specifies that this var must be on this
# machine to exist
# (var_pt[local_vid] != 80 or var_ufo[local_vid])
# part 1 (check against 80) mean that this is not a
# partial factor var
# part 2 is a check that it replaced an ufo var
# Must have exactly one or all vars on this machine
assert(exist == 1 or exist == factor[i]["arity"])
if exist == 1:
# One var is missing
# This machine gets the UFO
n_ufo_recv += 1
else:
# All vars are present
# This machine computes the UFO
n_ufo_send += 1
ufo_recv = np.empty(n_ufo_recv, dtype=UnaryFactorOpt)
ufo_send = np.empty(n_ufo_send, dtype=UnaryFactorOpt)
n_ufo_recv = 0
n_ufo_send = 0
for i in range(len(factor)):
if factor_ufo[i]:
exist = 0 # number of vars manifested on this machine
var = -1
is_pf = False
for j in range(factor[i]["arity"]):
vid1 = fmap[factor[i]["ftv_offset"] + j]["vid"]
local_vid = loose_inverse_map(vid, vid1)
ex = (local_vid != -1) and (var_pt[local_vid] != 80 or var_ufo[local_vid])
exist += ex
if ex:
var = vid1
if (local_vid != -1) and var_pt[local_vid] == 80:
is_pf = True
if exist == 1:
# Only one var on this machine
# This machine receives the ufo
ufo_recv[n_ufo_recv]['vid'] = var
ufo_recv[n_ufo_recv]['weightId'] = factor[i]['weightId']
n_ufo_recv += 1
if not is_pf:
factor[i]["factorFunction"] = numbskull.inference.FUNC_NOOP
else:
# Both on this machine
# Check which is actually the UFO
var = -1
for j in range(factor[i]["arity"]):
vid1 = fmap[factor[i]["ftv_offset"] + j]["vid"]
local_vid = inverse_map(vid, vid1)
is_ufo = var_ufo[local_vid]
if is_ufo:
assert(var == -1) # This must be the first seen
is_pf = (var_pt[local_vid] == 80) # check if this is a partial factor
if is_pf:
var = pf_ufo_var_list[local_vid - pf_var_begin]
else:
var = vid1
# if var == -1:
# # no ufo var found yet
# # this factor must have been partial factored
# # last var has to be the partial factor var
# vid1 = fmap[factor[i]["ftv_offset"] + factor[i]["arity"] - 1]["vid"]
# local_vid = inverse_map(vid, vid1)
# is_pf = (var_pt[local_vid] == 80) # check that this is a partial factor
# assert(is_pf)
# var = pf_ufo_var_list[local_vid - pf_var_begin]
ufo_send[n_ufo_send]['vid'] = var
ufo_send[n_ufo_send]['weightId'] = factor[i]['weightId']
n_ufo_send += 1
return ufo_send, ufo_recv
@numba.jit(nopython=True, cache=True, nogil=True)
def extra_space(vid, variable, ufo_recv):
m_factors = len(ufo_recv)
m_fmap = 0
m_var = 0
for ufo in ufo_recv:
card = variable[ufo["vid"]]["cardinality"]
m_fmap += card
m_var += card - 1
return m_factors, m_fmap, m_var
@numba.jit(nopython=True, cache=True, nogil=True)
def set_ufo(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, ufo_recv, n_factors, n_fmap, n_var, vid_max):
# vid_max should just be np.iinfo(vid.dtype).max, but numba doesn't support iinfo
ftv_offset = 0
if len(factor) > 0:
ftv_offset = factor[-1]["ftv_offset"] + factor[-1]["arity"]
n_vid = vid_max - len(vid) + n_var + 1
for (i, ufo) in enumerate(ufo_recv):
card = variable[ufo["vid"]]["cardinality"]
factor[n_factors + i]["factorFunction"] = numbskull.inference.FUNC_UFO
factor[n_factors + i]["weightId"] = ufo["weightId"]
factor[n_factors + i]["featureValue"] = 1 # TODO: feature value may not match
factor[n_factors + i]["arity"] = card
factor[n_factors + i]["ftv_offset"] = ftv_offset
factor_pt[n_factors + i] = 85 # TODO: Does this actually matter at all?
factor_ufo[n_factors + i] = True
fmap[n_fmap]["vid"] = vid[ufo["vid"]]
n_fmap += 1
for j in range(card - 1):
fmap[n_fmap]["vid"] = n_vid
vid[n_var] = n_vid
variable[n_var]["isEvidence"] = 4
variable[n_var]["initialValue"]
variable[n_var]["dataType"]
variable[n_var]["cardinality"]
variable[n_var]["vtf_offset"]
var_pt[n_var] = 85 # TODO: Does this actually matter at all?
var_ufo[n_var] = True
n_vid += 1
n_fmap += 1
n_var += 1
ftv_offset += card
@numba.jit(cache=True, nogil=True)
def add_ufo(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, ufo_recv, pf_var_begin):
n_factors = len(factor)
n_fmap = len(fmap)
n_var = len(variable)
m_factors, m_fmap, m_var = extra_space(vid, variable, ufo_recv)
factor = np.resize(factor, n_factors + m_factors)
factor_pt = np.resize(factor_pt, n_factors + m_factors)
factor_ufo = np.resize(factor_ufo, n_factors + m_factors)
fmap = np.resize(fmap, n_fmap + m_fmap)
vid = np.resize(vid, n_var + m_var)
variable = np.resize(variable, n_var + m_var)
var_pt = np.resize(var_pt, n_var + m_var)
var_ufo = np.resize(var_ufo, n_var + m_var)
# need to decrease vids of pf's to not overlap with ufo fake vid
decrease_vid(fmap, vid, m_var, pf_var_begin, n_var)
set_ufo(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, ufo_recv, n_factors, n_fmap, n_var, np.iinfo(vid.dtype).max)
return factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, n_var
@numba.jit(nopython=True, cache=True, nogil=True)
def decrease_vid(fmap, vid, amount, begin, end):
if begin < end:
for i in range(len(fmap)):
if vid[begin] <= fmap[i]["vid"] <= vid[end - 1]:
fmap[i]["vid"] -= amount
vid[begin:end] -= amount
@numba.jit(nopython=True, cache=True, nogil=True)
def ufo_equal(u, v):
"""Numba-compatible equality check."""
# TODO: is there a way to do this in a safer way?
# (in the sense that this changes if def of UFO changes)
return u["vid"] == v["vid"] and \
u["weightId"] == v["weightId"]
@numba.jit(nopython=True, cache=True, nogil=True)
def ufo_less(u, v):
"""Numba-compatible equality check."""
# TODO: is there a way to do this in a safer way?
if u["vid"] != v["vid"]:
return u["vid"] < v["vid"]
return u["weightId"] < v["weightId"]
@numba.jit(nopython=True, cache=True, nogil=True)
def ufo_check_sorted(a):
"""Checks if a numpy-array of ufo's is sorted."""
for i in range(1, len(a)):
assert(ufo_less(a[i - 1], a[i]))
@numba.jit(nopython=True, cache=True, nogil=True)
def ufo_searchsorted(a, b):
begin = -1
end = len(a)
while begin + 1 < end:
mid = (begin + end) / 2
if ufo_less(a[mid], b):
begin = mid
else:
end = mid
return end
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_ufo_map(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, ufo_send, pf_ufo_var_list, pf_var_begin):
ufo_length = np.zeros(ufo_send.size, np.int64)
ufo_start = np.zeros(ufo_send.size + 1, np.int64)
ufo = np.zeros(1, dtype=UnaryFactorOpt)
if len(ufo_send) == 0:
return ufo_length, np.zeros(0, np.int64)
for i in range(len(factor)):
if factor_ufo[i]:
exist = 0 # number of vars manifested on this machine
var = -1
for j in range(factor[i]["arity"]):
vid1 = fmap[factor[i]["ftv_offset"] + j]["vid"]
local_vid = inverse_map(vid, vid1)
ex = (local_vid != -1) and (var_pt[local_vid] != 80 or var_ufo[local_vid])
exist += ex
if ex and var_ufo[local_vid]:
# This variable is on this machine and is ufo
assert(var == -1) # This must be the first seen
is_pf = (var_pt[local_vid] == 80) # check if this is a partial factor
if is_pf:
var = pf_ufo_var_list[local_vid - pf_var_begin]
else:
var = local_vid
# if var == -1:
# # no ufo var found yet
# # this factor must have been partial factored
# # last var has to be the partial factor var
# vid1 = fmap[factor[i]["ftv_offset"] + factor[i]["arity"] - 1]["vid"]
# local_vid = inverse_map(vid, vid1)
# is_pf = (var_pt[local_vid] == 80) # check that this is a partial factor
# assert(is_pf)
# var = pf_ufo_var_list[local_vid - pf_var_begin]
# Must have exactly one or all vars on this machine
assert(exist == 1 or exist == factor[i]["arity"])
if exist == 1:
# Only one var on this machine
# This machine receives the ufo
# No computation will be done
pass
else:
# All vars on this machine
# Will be computing
weightId = factor[i]['weightId']
# TODO: is there a way to not create a list of length 1
ufo[0]["vid"] = var
ufo[0]["weightId"] = weightId
j = ufo_searchsorted(ufo_send, ufo[0])
assert(ufo_equal(ufo_send[j], ufo[0]))
ufo_length[j] += 1
for i in range(ufo_send.size):
ufo_start[i + 1] = ufo_start[i] + ufo_length[i]
ufo_length[i] = 0
ufo_map = np.zeros(ufo_start[-1], np.int64)
for i in range(len(factor)):
if factor_ufo[i]:
exist = 0
var = -1
for j in range(factor[i]["arity"]):
vid1 = fmap[factor[i]["ftv_offset"] + j]["vid"]
local_vid = inverse_map(vid, vid1)
ex = (local_vid != -1) and (var_pt[local_vid] != 80 or var_ufo[local_vid])
exist += ex
if ex and var_ufo[local_vid]:
# This variable is on this machine and is ufo
assert(var == -1) # This must be the first seen
is_pf = (var_pt[local_vid] == 80) # check if this is a partial factor
if is_pf:
var = pf_ufo_var_list[local_vid - pf_var_begin]
else:
var = local_vid
# if var == -1:
# # no ufo var found yet
# # this factor must have been partial factored
# # last var has to be the partial factor var
# vid1 = fmap[factor[i]["ftv_offset"] + factor[i]["arity"] - 1]["vid"]
# local_vid = inverse_map(vid, vid1)
# is_pf = (var_pt[local_vid] == 80) # check that this is a partial factor
# assert(is_pf)
# var = pf_ufo_var_list[local_vid - pf_var_begin]
# Must have exactly one or all vars on this machine
assert(exist == 1 or exist == factor[i]["arity"])
if exist == factor[i]["arity"]:
weightId = factor[i]['weightId']
ufo[0]["vid"] = var
ufo[0]["weightId"] = weightId
j = ufo_searchsorted(ufo_send, ufo[0])
assert(ufo_equal(ufo_send[j], ufo[0]))
ufo_map[ufo_start[j] + ufo_length[j]] = i
ufo_length[j] += 1
return ufo_start, ufo_map
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_ufo_values(factor, fmap, var_value, variable, var_ufo, ufo_send, ufo_start, ufo_map, ufo):
var_copy = 0
ufo_index = 0
for i in range(len(ufo)):
ufo[i] = 0
for i in range(len(ufo_send)):
var_samp = ufo_send[i]["vid"]
for j in range(ufo_start[i], ufo_start[i + 1]):
factor_id = ufo_map[j]
value = 0
f0 = numbskull.inference.eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap, var_value)
for value in range(1, variable[var_samp]["cardinality"]):
ufo[ufo_index + value - 1] += numbskull.inference.eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap, var_value) - f0
ufo_index += variable[var_samp]["cardinality"] - 1
@numba.jit(nopython=True, cache=True, nogil=True)
def clear_ufo_values(var_value, ufo_var_begin):
for i in range(ufo_var_begin, len(var_value)):
var_value[i] = 0
@numba.jit(nopython=True, cache=True, nogil=True)
def apply_ufo_values(factor, fmap, var_value, ufo_map, ufo_values):
for i in range(len(ufo_map)):
assert(factor[i]["arity"] == 2)
var_value[fmap[factor[i]["ftv_offset"] + 1]["vid"]] += ufo_values[i]
@numba.jit(cache=True, nogil=True)
def process_pf(factor, factor_pt, factor_ufo, fmap, fid, vid, variable, var_pt, var_ufo, ismaster):
"""Process partial factor."""
pf_var_begin = len(vid)
pf_list = find_pf(factor, factor_pt.view(np.int8), factor_ufo, fmap, fid, vid, variable, var_pt.view(np.int8), var_ufo)
vid = np.resize(vid, len(vid) + len(pf_list))
variable = np.resize(variable, len(variable) + len(pf_list))
var_pt = np.resize(var_pt, len(var_pt) + len(pf_list))
var_ufo = np.resize(var_ufo, len(var_ufo) + len(pf_list))
pf_ufo_var_list = np.zeros(pf_list.size, np.int64) # This is a list of one of the vars that was deleted from a partial factor
# In this case that this factor is also UFO'd, then this var is the UFO var
ftv_offset = set_pf(factor, factor_pt.view(np.int8), factor_ufo, fmap, fid, vid, variable, var_pt.view(np.int8), var_ufo, pf_var_begin, np.iinfo(vid.dtype).max, ismaster, pf_ufo_var_list)
fmap = np.resize(fmap, ftv_offset)
return fmap, vid, variable, var_pt.view('c'), var_ufo, pf_list, pf_var_begin, pf_ufo_var_list
@numba.jit(nopython=True, cache=True, nogil=True)
def find_pf(factor, factor_pt, factor_ufo, fmap, fid, vid, variable, var_pt, var_ufo):
count = 0
for i in range(len(factor)):
if ((factor_pt[i] == 68 and not factor_ufo[i]) # "D"
or factor_pt[i] == 71): # "G"
count += 1
pf_list = np.zeros(count, np.int64)
count = 0
for i in range(len(factor)):
if ((factor_pt[i] == 68 and not factor_ufo[i]) # "D"
or factor_pt[i] == 71): # "G"
pf_list[count] = i
count += 1
return pf_list
@numba.jit(nopython=True, cache=True, nogil=True)
def set_pf(factor, factor_pt, factor_ufo, fmap, fid, vid, variable, var_pt, var_ufo, pf_var_begin, vid_max, ismaster, pf_ufo_var_list):
# vid_max should just be np.iinfo(vid.dtype).max, but numba doesn't support iinfo
# Setting fake variables
for i in range(pf_var_begin, len(vid)):
vid[i] = vid_max - len(vid) + i # I think this can have a +1, but it doesn't really matter
variable[i]["isEvidence"] = 4
variable[i]["initialValue"]
variable[i]["dataType"]
variable[i]["cardinality"]
variable[i]["vtf_offset"]
var_pt[i] = 80 # TODO: Does this actually matter at all?
var_ufo[i] = False
ftv_offset = 0
ftv_offset_src = 0
count = 0
for i in range(len(factor)):
factor[i]["ftv_offset"] = ftv_offset
if ((factor_pt[i] == 68 and not factor_ufo[i]) # "D"
or factor_pt[i] == 71): # "G"
# Is a partial factor
arity = 0
var_was_ufo = False
for j in range(factor[i]["arity"]):
# Minions are loading Au vars right now for PPB with UFO
# Because PPB partitions on factors, the var never appears on minion
# but information about the minion needs to exist to figure out how
# to handle the UFO
local_vid = loose_inverse_map(vid, fmap[ftv_offset_src + j]["vid"])
if local_vid != -1 and (ismaster or var_pt[local_vid] != 65): # "A"
# This variable is owned by this machine
fmap[ftv_offset + arity] = fmap[ftv_offset_src + j]
arity += 1
else:
# This variable is being deleted
# Could be a ufo var
# save for later use
pf_ufo_var_list[count] = fmap[ftv_offset_src + j]["vid"]
var_was_ufo = var_ufo[local_vid]
# In Cu, it is possible for this factor (which is a partial factor)
# to receive some (or all) of its non-owned vars from another machine
# this part swaps everything to the back
back = ftv_offset + factor[i]["arity"] - 1
front = ftv_offset
while front < back:
local_vid = loose_inverse_map(vid, fmap[front]["vid"])
if local_vid != -1 and \
((ismaster and var_pt[local_vid] == 68) # "D"
or (not ismaster and var_pt[local_vid] == 66)): # "B"
# This variable exists on this machine
# but is not owned by the machine
# need to ship the var from another machine
fmap[front], fmap[back] = fmap[back], fmap[front]
back -= 1
front += 1
if arity < factor[i]["arity"]:
# there is enough space allocated for extra vid
# this means that something was deleted
# so a partial factor will be received
fmap[ftv_offset + arity]["vid"] = vid[pf_var_begin + count]
var_ufo[pf_var_begin + count] = var_was_ufo
count += 1
arity += 1
ftv_offset += arity
ftv_offset_src += factor[i]["arity"]
factor[i]["arity"] = arity
else:
for j in range(factor[i]["arity"]):
local_vid = loose_inverse_map(vid, fmap[ftv_offset_src + j]["vid"])
if local_vid != -1 and (ismaster or var_pt[local_vid] != 65): # "A"
fmap[ftv_offset + j] = fmap[ftv_offset_src + j]
ftv_offset += factor[i]["arity"]
ftv_offset_src += factor[i]["arity"]
return ftv_offset
@numba.jit(cache=True, nogil=True)
def process_ufo(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, pf_ufo_var_list, pf_var_begin):
time1 = time.time()
ufo_send, ufo_recv = find_ufo(factor, factor_pt.view(np.int8), factor_ufo, fmap, vid, variable, var_pt.view(np.int8), var_ufo, pf_ufo_var_list, pf_var_begin)
time2 = time.time()
print("find_ufo took ", time2 - time1)
factor, factor_pt, factor_ufo, fmap, edges = remove_noop(factor, factor_pt.view(np.int8), factor_ufo, fmap)
time1 = time2
time2 = time.time()
print("remove_noop took ", time2 - time1)
# compute unique
ufo_send = np.unique(ufo_send)
ufo_recv = np.unique(ufo_recv)
ufo_send.sort()
ufo_recv.sort()
# Checking that numpy sort uses the same comparison
ufo_check_sorted(ufo_send)
ufo_check_sorted(ufo_recv)
remap_ufo(ufo_send, vid)
remap_ufo(ufo_recv, vid)
apply_loose_inverse_map(vid, pf_ufo_var_list)
time1 = time2
time2 = time.time()
print("unique + sort took ", time2 - time1)
# add fake factors vars for UFO
factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, ufo_var_begin = add_ufo(factor, factor_pt.view(np.int8), factor_ufo, fmap, vid, variable, var_pt.view(np.int8), var_ufo, ufo_recv, pf_var_begin)
time1 = time2
time2 = time.time()
print("add_ufo took ", time2 - time1)
# Provide a fast method of finding factors that need to be evaluated for UFO
ufo_start, ufo_map = compute_ufo_map(factor, factor_pt, factor_ufo, fmap, vid, variable, var_pt, var_ufo, ufo_send, pf_ufo_var_list, pf_var_begin)
time1 = time2
time2 = time.time()
print("compute_ufo_map took ", time2 - time1)
return factor, factor_pt.view('c'), factor_ufo, fmap, vid, variable, var_pt.view('c'), var_ufo, ufo_send, ufo_recv, ufo_start, ufo_map, ufo_var_begin
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_map_master(vid, var_pt):
l = 0
for i in range(len(var_pt)):
if var_pt[i] == 66: # 66 = "B"
l += 1
map_to_minions = np.zeros(l, np.int64)
l = 0
for i in range(len(var_pt)):
if var_pt[i] == 66: # 66 = "B"
map_to_minions[l] = vid[i]
l += 1
return map_to_minions
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_map_minion(vid, var_pt):
l = 0
for i in range(len(var_pt)):
if var_pt[i] == 68: # 68 = "D"
l += 1
map_to_master = np.zeros(l, np.int64)
l = 0
for i in range(len(var_pt)):
if var_pt[i] == 68: # 68 = "D"
map_to_master[l] = vid[i]
l += 1
return map_to_master
@numba.jit(nopython=True, cache=True, nogil=True)
def apply_inverse_map(vid, array):
for i in range(len(array)):
array[i] = inverse_map(vid, array[i])
@numba.jit(nopython=True, cache=True, nogil=True)
def loose_inverse_map(forward, index):
"""TODO."""
ans = np.searchsorted(forward, index)
if ans >= len(forward) or forward[ans] != index:
return -1
return ans
@numba.jit(nopython=True, cache=True, nogil=True)
def apply_loose_inverse_map(vid, array):
for i in range(len(array)):
array[i] = loose_inverse_map(vid, array[i])
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_vars_to_send(map, var_to_send, var_value):
# TODO: handle multiple copies
for (i, m) in enumerate(map):
var_to_send[i] = var_value[m]
@numba.jit(nopython=True, cache=True, nogil=True)
def process_received_vars(map, var_recv, var_value):
for (i, v) in enumerate(var_recv):
m = map[i]
var_value[m] = v
@numba.jit(nopython=True, cache=True, nogil=True)
def ufo_to_factor(ufo, ufo_map, n_factors):
index = np.empty(ufo.size, np.int64)
for i in range(len(ufo)):
j = ufo_searchsorted(ufo_map, ufo[i])
assert(ufo_equal(ufo_map[j], ufo[i]))
index[i] = n_factors - len(ufo_map) + j
return index
@numba.jit(nopython=True, cache=True, nogil=True)
def compute_pf_values(factor, fmap, var_value, variable, pf_list, pf):
for i in range(len(pf_list)):
assert(factor[pf_list[i]]["factorFunction"] in [numbskull.inference.FUNC_OR,
numbskull.inference.FUNC_AND,
numbskull.inference.FUNC_ISTRUE])
# TODO: this only works if no vars are shipped from other machine
# There might be more than one var at the end that has to be skipped
factor[pf_list[i]]["arity"] -= 1
factor_id = pf_list[i]
var_samp = -1
value = -1
var_copy = 0
pf[i] = numbskull.inference.eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap, var_value)
factor[pf_list[i]]["arity"] += 1
@numba.jit(nopython=True, cache=True, nogil=True)
def apply_pf_values(factor, fmap, var_value, variable, pf_list, pf_values):
for i in range(len(pf_list)):
if pf_list[i] != -1:
fac = factor[pf_list[i]]
var_value[fmap[fac["ftv_offset"] + fac["arity"] - 1]["vid"]] = pf_values[i]
| numbskull-master | salt/src/messages.py |
"""TODO."""
# Import python libs
from __future__ import absolute_import
import logging
import sys
import os
import time
import pydoc
import urlparse
import traceback
# libs for server
import msgpack
import socket
import errno
import signal
# Import salt libs
import salt.utils.event
import salt.utils
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.ext.six as six
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.utils.process import default_signals, \
SignalHandlingMultiprocessingProcess
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import errno
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
"""Ensure that TCP keepalives are set for the socket."""
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
"""
This is a TCP server.
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, log_queue=None):
"""TODO."""
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
"""TODO."""
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue']
)
def __getstate__(self):
"""TODO."""
return {'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue}
def close(self):
"""TODO."""
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
"""TODO."""
self.close()
def run(self):
"""Start the load balancer."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['inf_learn_interface'],
int(self.opts['inf_learn_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == \
errno.ECONNABORTED:
continue
raise
class InfLearnMessageServer(tornado.tcpserver.TCPServer, object):
"""
This is a raw TCP server.
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, logger, *args, **kwargs):
"""TODO."""
super(InfLearnMessageServer, self).__init__(*args, **kwargs)
self.clients = []
self.message_handler = message_handler
self.log = logger
self.log.debug('Inside InfLearnMessageServer')
@tornado.gen.coroutine
def handle_stream(self, stream, address):
"""Handle incoming streams and add messages to the incoming queue."""
self.log.debug('InfLearn client {0} connected'.format(address))
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream,
header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
self.log.debug('InfLearn client disconnected {0}'.format(address))
self.clients.remove((stream, address))
except Exception as e:
self.log.debug('Other minion-side InfLearn '
'exception: {0}'.format(e))
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
"""Shutdown the whole server."""
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(InfLearnMessageServer):
"""
This receives TCP connections.
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, logger, *args,
**kwargs):
"""TODO."""
super(LoadBalancerWorker, self).__init__(
message_handler, logger, *args, **kwargs)
self.socket_queue = socket_queue
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def socket_queue_thread(self):
"""TODO."""
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPReqServerMinionChannel(object):
"""TODO."""
backlog = 5
def __init__(self, logger, opts, salt):
"""TODO."""
self.log = logger
self._opts = opts
self._socket = None
self._salt = salt
@property
def socket(self):
"""TODO."""
return self._socket
def close(self):
"""TODO."""
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already
# disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
def __del__(self):
"""TODO."""
self.close()
def pre_fork(self, process_manager):
"""Pre-fork we need to initialize socket."""
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self._opts, self.socket_queue)
)
else:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self._opts)
self._socket.setblocking(0)
self._socket.bind((self._opts['inf_learn_interface'],
int(self._opts['inf_learn_port'])))
def post_fork(self, payload_handler, io_loop):
"""
TODO.
After forking we need to create all of the local sockets to listen to
the router
payload_handler: function to call with your payloads
"""
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self._opts)
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue, self.handle_message,
self.log, io_loop=self.io_loop
)
else:
self.req_server = InfLearnMessageServer(self.handle_message,
self.log,
io_loop=self.io_loop)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
def fire_local_event(self, payload):
"""TODO."""
try:
tag = payload['load']['tag']
data = payload['load']['data']
self._salt['event.fire'](data, tag)
return True
except:
return False
def handle_message(self, stream, header, payload):
"""Handle incoming messages from underylying tcp streams."""
if self.fire_local_event(payload):
try:
stream.write(salt.transport.frame.frame_msg('OK',
header=header))
except:
raise tornado.gen.Return()
else:
try:
stream.write(salt.transport.frame.frame_msg('ERROR',
header=header))
except:
raise tornado.gen.Return()
class InfLearnMinionServer(object):
"""TODO."""
def __init__(self, opts, logger, salt, log_queue=None):
"""TODO."""
self.opts = opts
self.log_queue = log_queue
self.log = logger
self.salt = salt
def __bind(self):
"""TODO."""
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
self.process_manager = salt.utils.process.ProcessManager(
name='ReqMinionInfLearnServer_PM'
)
req_channels = []
tcp_only = True
chan = TCPReqServerMinionChannel(self.log, self.opts, self.salt)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
kwargs = {}
with default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['inf_learn_threads'])):
name = 'InfLearnWorker-{0}'.format(ind)
self.process_manager.add_process(InfLearnWorker,
args=(self.opts,
req_channels,
name,
self.log),
kwargs=kwargs,
name=name)
self.process_manager.run()
def run(self):
"""Start up the InfLearnServer."""
self.__bind()
def destroy(self, signum=signal.SIGTERM):
"""TODO."""
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
def __del__(self):
"""TODO."""
self.destroy()
class InfLearnWorker(SignalHandlingMultiprocessingProcess):
"""
Manages backend operations.
The worker multiprocess instance to manage the backend operations for the
minion during inference and learning.
"""
def __init__(self,
opts,
req_channels,
name,
logger,
**kwargs):
"""
Create a salt minion inference learning worker process.
:param dict opts: The salt options
:rtype: InfLearngWorker
:return: Inference Learning worker
"""
kwargs['name'] = name
SignalHandlingMultiprocessingProcess.__init__(self, **kwargs)
self.opts = opts
self.log = logger
self.req_channels = req_channels
self.k_mtime = 0
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
"""TODO."""
self._is_child = True
SignalHandlingMultiprocessingProcess.__init__(
self,
log_queue=state['log_queue']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.k_mtime = state['k_mtime']
def __getstate__(self):
"""TODO."""
return {'opts': self.opts,
'req_channels': self.req_channels,
'k_mtime': self.k_mtime,
'log_queue': self.log_queue}
def _handle_signals(self, signum, sigframe):
"""TODO."""
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(InfLearnWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
"""Bind to the local port."""
self.io_loop = LOOP_CLASS()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop)
self.log.debug('Inside worker ' + self.name)
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
"""
TODO.
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
"""
raise tornado.gen.Return(payload)
def run(self):
"""Start a Minion Inference Learning Worker."""
salt.utils.appendproctitle(self.name)
self.__bind()
####################
# ENGINE MAIN LOOP #
####################
def start():
"""TODO."""
log.debug('Starting Numbskull Minion InfLearn Server')
ilServer = InfLearnMinionServer(__opts__, log, __salt__)
ilServer.run()
| numbskull-master | salt/src/numbskull_minion_server.py |
"""TODO."""
from __future__ import absolute_import
from salt.utils.async import SyncWrapper
from salt.transport.client import AsyncChannel
from salt.transport.tcp import SaltMessageClient
import msgpack
import socket
import weakref
import logging
# Import Salt Libs
import salt.payload
import salt.exceptions
import salt.ext.six as six
from salt.exceptions import SaltReqTimeoutError, SaltClientError
# Import Tornado Libs
import tornado
import tornado.ioloop
import tornado.gen
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
log = logging.getLogger(__name__)
class InfLearn_Channel(object):
"""TODO."""
@staticmethod
def factory(opts, **kwargs):
"""TODO."""
return InfLearn_ReqChannel.factory(opts, **kwargs)
class InfLearn_ReqChannel(object):
"""Factory to create Sync communication channels to the ReqServer."""
@staticmethod
def factory(opts, **kwargs):
"""TODO."""
# All Sync interfaces are just wrappers around the Async ones
sync = SyncWrapper(InfLearn_AsyncChannel.factory, (opts,), kwargs)
return sync
def send(self, load, tries=3, timeout=60, raw=False):
"""Send "load" to the master."""
raise NotImplementedError()
class InfLearn_AsyncChannel(AsyncChannel):
"""Factory to create Async comm. channels to the ReqServer."""
@classmethod
def factory(cls, opts, **kwargs):
"""TODO."""
if not cls._resolver_configured:
AsyncChannel._config_resolver()
return InfLearn_AsyncTCPChannel(opts, **kwargs)
def send(self, load, tries=3, timeout=60, raw=False):
"""Send 'load' to the minion."""
raise NotImplementedError()
class InfLearn_AsyncTCPChannel(InfLearn_ReqChannel):
"""
Encapsulate sending routines to tcp.
Note: this class returns a singleton
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
"""Only create one instance of channel per __key()."""
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
if key not in loop_instance_map:
log.debug('Initializing new InfLearn_AsyncTCPChannel '
'for {0}'.format(key))
# we need to make a local variable for this, as we are going to
# store it in a WeakValueDictionary-- which will remove the item
# if no one references it-- this forces a reference while we
# return to the caller
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
else:
log.debug('Re-using AsyncTCPReqChannel for {0}'.format(key))
return loop_instance_map[key]
@classmethod
def __key(cls, opts, **kwargs):
if 'minion_uri' in kwargs:
opts['minion_uri'] = kwargs['minion_uri']
return (opts['master_uri'])
# must be empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
"""TODO."""
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
"""TODO."""
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['minion_uri'])
host, port = parse.netloc.rsplit(':', 1)
self.minion_addr = (host, int(port))
self._closing = False
self.message_client = SaltMessageClient(
self.opts, host, int(port), io_loop=self.io_loop,
resolver=resolver)
def close(self):
"""TODO."""
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
"""TODO."""
self.close()
def _package_load(self, load):
"""TODO."""
return {'load': load}
@tornado.gen.coroutine
def _transfer(self, load, tries=3, timeout=60):
"""TODO."""
ret = yield self.message_client.send(self._package_load(load),
timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request.
Returns a future which will complete when we send the message
"""
try:
ret = yield self._transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to minion lost')
raise tornado.gen.Return(ret)
| numbskull-master | salt/src/numbskull_master_client.py |
"""TODO."""
# Import python libs
from __future__ import print_function, absolute_import
import json
import logging
import sys
import os
import time
import argparse
import numpy as np
import codecs
import pydoc
import psycopg2
import urlparse
import numpy as np
import traceback
# Import salt libs
import salt.utils.event
# Import numbskull
m_opts = salt.config.minion_config(os.environ['SALT_CONFIG_DIR'] + '/minion')
sys.path.append(m_opts['extension_modules'] + '/modules')
import numbskull
from numbskull import numbskull
from numbskull.numbskulltypes import *
import messages
log = logging.getLogger(__name__)
class NumbskullMinion:
"""TODO."""
def __init__(self):
"""TODO."""
self.partitionId = None
self.args = None
self.ns = None
def parse_args(self, argv):
"""TODO."""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Runs a Gibbs sampler",
epilog="")
# Add version to parser
parser.add_argument("--version",
action='version',
version="%(prog)s 0.0",
help="print version number")
# Add execution arguments to parser
for arg, opts in numbskull.arguments:
parser.add_argument(*arg, **opts)
# Add flags to parser
for arg, opts in numbskull.flags:
parser.add_argument(*arg, **opts)
# Initialize NumbSkull #
args = parser.parse_args(argv)
return args
def init_numbskull(self, argv):
"""TODO."""
self.args = self.parse_args(argv)
self.ns = numbskull.NumbSkull(**vars(self.args))
def loadFG(self, data):
"""TODO."""
try:
weight = np.fromstring(data['weight'], dtype=Weight)
variable = np.fromstring(data['variable'], dtype=Variable)
factor = messages.deserialize(data['factor'], Factor)
fmap = np.fromstring(data['fmap'], dtype=FactorToVar)
vmap = np.fromstring(data['vmap'], dtype=VarToFactor)
factor_index = np.fromstring(data['factor_index'], dtype=np.int64)
except:
return 'FAILED LOADING', None
try:
self.ns.loadFactorGraphRaw(weight, variable, factor,
fmap, vmap, factor_index)
fg = self.ns.factorGraphs[-1]
meta = {}
meta['weights'] = fg.weight.shape[0]
meta['variables'] = fg.variable.shape[0]
meta['factors'] = fg.factor.shape[0]
return 'SUCCESS', meta
except:
return 'FAILED', None
def learning(self, fgID):
"""TODO."""
try:
self.ns.learning(fgID, False)
weights = self.ns.factorGraphs[fgID].weight_value
return 'SUCCESS', messages.serialize(weights)
except:
return 'FAILED', None
def inference(self, fgID=0):
"""TODO."""
try:
self.ns.inference(fgID, False)
marginals = self.ns.factorGraphs[fgID].marginals
return 'SUCCESS', messages.serialize(marginals)
except:
return 'FAILED', None
def start():
"""TODO."""
log.debug('Initializing Numbskull Minion Engine')
ns_minion = NumbskullMinion()
event_bus = salt.utils.event.get_event(
'minion',
transport=__opts__['transport'],
opts=__opts__,
sock_dir=__opts__['sock_dir'],
listen=True)
log.debug('Starting Numbskull Minion Engine')
partition_id = -1
for evdata in event_bus.iter_events(full=True):
loop_begin = time.time()
tag, data = evdata['tag'], evdata['data']
if tag == messages.ASSIGN_ID:
partition_id = data['id']
print("Assigned partition id #", partition_id)
# TODO: respond to master
elif tag == messages.INIT_NS:
try:
ns_minion.init_numbskull(data['argv'])
# Respond OK to master
data = {'status': 'OK'}
__salt__['event.send'](messages.INIT_NS_RES, data)
except:
# Respond FAIL to master
data = {'status': 'FAIL'}
__salt__['event.send'](messages.INIT_NS_RES, data)
elif tag == messages.LOAD_FG:
# Connect to an existing database
# http://stackoverflow.com/questions/15634092/connect-to-an-uri-in-postgres
db_url = data["db_url"]
url = urlparse.urlparse(db_url)
username = url.username
password = url.password
database = url.path[1:]
hostname = url.hostname
port = url.port
conn = psycopg2.connect(
database=database,
user=username,
password=password,
host=hostname,
port=port
)
# Open a cursor to perform database operations
cur = conn.cursor()
# TODO: the Au filter should only be for variables
# For partition scheme PPB with UFO,
# variables are missing from the minion
# and the minion needs to know the cardinality
minion_filter = " partition_key similar to 'Au' " \
"or partition_key similar to 'B(|u)' " \
"or partition_key similar to 'C(|u){partition_id}' " \
"or partition_key similar to 'D(|u){partition_id}' " \
"or partition_key similar to 'E(|u){partition_id}' " \
"or partition_key similar to 'F(|u){partition_id}' " \
"or partition_key similar to 'G(|um|uw|umw){partition_id}' " \
"or partition_key similar to 'H(|u)' "
minion_filter = minion_filter.format(partition_id=partition_id)
(weight, variable, factor, fmap, domain_mask, edges, var_pt,
factor_pt, var_ufo, factor_ufo, fid, vid, ufo_send, ufo_recv, ufo_start, ufo_map, ufo_var_begin, pf_list, factors_to_skip, pf_to_send) = \
messages.get_fg_data(cur, minion_filter, False)
# Close communication with the database
cur.close()
conn.close()
variable[var_pt == "B"]["isEvidence"] = 4 # not owned var type
ns_minion.ns.loadFactorGraph(weight, variable, factor, fmap,
domain_mask, edges, 1, 1, factors_to_skip)
# Respond to master
data = {}
__salt__['event.send'](messages.LOAD_FG_RES, data)
log.debug("DONE LOADFG")
elif tag == messages.SYNC_MAPPING:
# receive map from master
map_from_master = messages.deserialize(data["map"], np.int64)
pf_from_master = messages.deserialize(data["pf"], np.int64)
messages.apply_loose_inverse_map(fid, pf_from_master)
# compute map
map_to_master = messages.compute_map_minion(vid, var_pt.view(np.int8))
ufo_map_to_master = ufo_send.copy()
ufo_map_to_master["vid"] = vid[ufo_map_to_master["vid"]]
data = {"pid": partition_id,
"map": messages.serialize(map_to_master),
"pf": messages.serialize(fid[pf_to_send]),
"ufo": messages.serialize(ufo_map_to_master)}
__salt__['event.send'](messages.SYNC_MAPPING_RES, data)
messages.apply_inverse_map(vid, map_from_master)
messages.apply_inverse_map(vid, map_to_master)
variables_to_master = np.zeros(map_to_master.size, np.int64)
var_evid_to_master = np.zeros(map_to_master.size, np.int64)
pf_to_master = np.zeros(pf_to_send.size, np.int64)
pf_evid_to_master = np.zeros(pf_to_send.size, np.int64)
m_factors, m_fmap, m_var = messages.extra_space(vid, variable, ufo_send)
ufo_to_master = np.empty(m_var, np.int64)
ufo_evid_to_master = np.empty(m_var, np.int64)
log.debug("DONE SYNC_MAPPING")
elif tag == messages.INFER or tag == messages.LEARN:
variables_from_master = \
messages.deserialize(data["values"], np.int64)
messages.process_received_vars(map_from_master, variables_from_master, ns_minion.ns.factorGraphs[-1].var_value[0])
messages.apply_pf_values(factor, fmap, ns_minion.ns.factorGraphs[-1].var_value[0], variable, pf_from_master, messages.deserialize(data["pf"], np.int64))
if tag == messages.LEARN:
var_evid_from_master = \
messages.deserialize(data["v_evid"], np.int64)
messages.process_received_vars(map_from_master, var_evid_from_master, ns_minion.ns.factorGraphs[-1].var_value_evid[0])
messages.apply_pf_values(factor, fmap, ns_minion.ns.factorGraphs[-1].var_value_evid[0], variable, pf_from_master, messages.deserialize(data["pf_evid"], np.int64))
ns_minion.ns.factorGraphs[-1].weight_value[0] = \
messages.deserialize(data["weight"], np.float64)
w0 = ns_minion.ns.factorGraphs[-1].weight_value[0]
begin = time.time()
fgID = 0
if tag == messages.LEARN:
ns_minion.ns.learning(fgID, False)
else:
ns_minion.ns.inference(fgID, False)
end = time.time()
log.debug("INFERENCE LOOP TOOK " + str(end - begin))
# Respond to master
messages.compute_vars_to_send(map_to_master, variables_to_master, ns_minion.ns.factorGraphs[-1].var_value[0])
messages.compute_pf_values(factor, fmap, ns_minion.ns.factorGraphs[-1].var_value, variable, pf_to_send, pf_to_master)
messages.compute_ufo_values(factor, fmap, ns_minion.ns.factorGraphs[-1].var_value, variable, var_ufo, ufo_send, ufo_start, ufo_map, ufo_to_master)
print(80 * "*")
print(ns_minion.ns.factorGraphs[-1].var_value)
print(ufo_to_master)
if tag == messages.INFER:
data = {"pid": partition_id,
"values": messages.serialize(variables_to_master),
"pf": messages.serialize(pf_to_master),
"ufo": messages.serialize(ufo_to_master)}
__salt__['event.send'](messages.INFER_RES, data)
else:
messages.compute_vars_to_send(map_to_master, var_evid_to_master, ns_minion.ns.factorGraphs[-1].var_value_evid[0])
messages.compute_pf_values(factor, fmap, ns_minion.ns.factorGraphs[-1].var_value_evid, variable, pf_to_send, pf_evid_to_master)
messages.compute_ufo_values(factor, fmap, ns_minion.ns.factorGraphs[-1].var_value_evid, variable, var_ufo, ufo_send, ufo_start, ufo_map, ufo_evid_to_master)
dweight = ns_minion.ns.factorGraphs[-1].weight_value[0] - w0
data = {"pid": partition_id,
"values": messages.serialize(variables_to_master),
"v_evid": messages.serialize(var_evid_to_master),
"pf": messages.serialize(pf_to_master),
"pf_evid": messages.serialize(pf_evid_to_master),
"ufo": messages.serialize(ufo_to_master),
"ufo_evid": messages.serialize(ufo_to_master),
"dw": messages.serialize(dweight)}
__salt__['event.send'](messages.LEARN_RES, data)
loop_end = time.time()
print("*****" + tag + " took " + str(loop_end - loop_begin) + "*****")
| numbskull-master | salt/src/numbskull_minion.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""TODO."""
# Import python libs
from __future__ import print_function, absolute_import
import json
import logging
import os.path
import numbskull
from numbskull import numbskull
from numbskull.numbskulltypes import *
import argparse
import sys
import subprocess
import numpy as np
import codecs
from multiprocessing import Pool
from functools import partial
# Import salt libs
import salt.utils.event
import salt.client
import salt.runner
import salt.config
import messages
import time
import psycopg2
import urlparse
import numbskull_master_client
from numbskull_master_client import InfLearn_Channel
master_conf_dir = \
os.path.join(os.environ['SALT_CONFIG_DIR'], 'master')
salt_opts = salt.config.client_config(master_conf_dir)
def send_to_minion(data, tag, tgt):
"""TODO."""
salt_opts['minion_uri'] = 'tcp://{ip}:{port}'.format(
ip=salt.utils.ip_bracket(tgt),
port=7341 # TODO, no fallback
)
load = {'id': 'master_inflearn',
'tag': tag,
'data': data}
channel = InfLearn_Channel.factory(salt_opts)
channel.send(load)
return True
class NumbskullMaster:
"""TODO."""
def __init__(self, application_dir, machines,
partition_method, partition_scheme, use_ufo, partition_type, argv):
"""TODO."""
# Salt conf init
self.master_conf_dir = master_conf_dir
self.salt_opts = salt_opts
# Salt clients init
self.local_client = salt.client.LocalClient(self.master_conf_dir)
self.runner = salt.runner.RunnerClient(self.salt_opts)
# Salt event bus - used to communicate with minions
self.event_bus = salt.utils.event.get_event(
'master',
sock_dir=self.salt_opts['sock_dir'],
transport=self.salt_opts['transport'],
opts=self.salt_opts)
# Numbskull-related variables
self.argv = argv
self.args = self.parse_args(argv)
self.ns = None
self.application_dir = application_dir
self.num_minions = machines
self.partition_scheme = partition_scheme
self.use_ufo = use_ufo
self.partition_type = partition_type
# Partitioning variables
self.partition_method = partition_method
# DB variables
self.db_url = None
self.conn = None
def initialize(self):
"""TODO."""
time1 = time.time()
self.assign_partition_id()
time2 = time.time()
print("assign_partition_id took " + str(time2 - time1))
self.prep_numbskull()
time3 = time.time()
print("prep_numbskull took " + str(time3 - time2))
# Open DB connection
self.open_db_connection()
# Open a cursor to perform database operations
cur = self.conn.cursor()
out = self.prepare_db(cur)
time4 = time.time()
print("prepare_db took " + str(time4 - time3))
self.load_all_fg(self.db_url, cur)
time5 = time.time()
print("load_all_fg took " + str(time5 - time4))
# Close communication with the database
cur.close()
self.conn.close()
self.sync_mapping()
time6 = time.time()
print("sync_mapping took " + str(time6 - time5))
# The code for learning and inference share a lot of code (computing
# variable map, handling partial factors) so they are in one func).
# This is just a trivial wrapper function.
def learning(self, epochs=1):
"""TODO."""
return self.inference(epochs, True)
def inference(self, epochs=1, learn=False):
"""TODO."""
mode = "Learning" if learn else "Inference"
print("BEGINNING " + mode.upper())
begin = time.time()
variables_to_minions = np.zeros(self.map_to_minions.size, np.int64)
var_evid_to_minions = np.zeros(self.map_to_minions.size, np.int64)
pf_to_minions = np.zeros(self.pf_to_send.size, np.int64)
pf_evid_to_minions = np.zeros(self.pf_to_send.size, np.int64)
print("***LENGTHS***")
print("Variables to minions: ", len(variables_to_minions))
print("Partial Factors to minions: ", len(pf_to_minions))
print("Variables from Minions: ", sum([len(i) for i in self.map_from_minion]))
print("Partial Factors from Minions: ", sum([len(i) for i in self.pf_from_minion]))
print("UFO from Minions: ", sum([len(i) for i in self.ufo_from_minion]))
print("*************")
for i in range(epochs):
print(mode + " loop " + str(i))
# sample own variables
begin1 = time.time()
fgID = 0
if learn:
self.ns.learning(fgID, False)
else:
self.ns.inference(fgID, False)
end1 = time.time()
print(mode + " LOOP TOOK " + str(end1 - begin1))
# gather values to ship to minions
# TODO: handle multiple copies
messages.compute_vars_to_send(self.map_to_minions, variables_to_minions, self.ns.factorGraphs[-1].var_value[0])
messages.compute_pf_values(self.factor, self.fmap, self.ns.factorGraphs[-1].var_value, self.variable, self.pf_to_send, pf_to_minions)
if learn:
messages.compute_vars_to_send(self.map_to_minions, var_evid_to_minions, self.ns.factorGraphs[-1].var_value_evid[0])
messages.compute_pf_values(self.factor, self.fmap, self.ns.factorGraphs[-1].var_value_evid, self.variable, self.pf_to_send, pf_evid_to_minions)
# Tell minions to sample
beginTest = time.time()
if learn:
tag = messages.LEARN
# TODO: which copy of weight to use when multiple
weight_value = self.ns.factorGraphs[-1].weight_value[0]
data = {"values": messages.serialize(variables_to_minions),
"v_evid": messages.serialize(var_evid_to_minions),
"pf": messages.serialize(pf_to_minions),
"pf_evid": messages.serialize(pf_evid_to_minions),
"weight": messages.serialize(weight_value)}
else:
tag = messages.INFER
data = {"values": messages.serialize(variables_to_minions),
"pf": messages.serialize(pf_to_minions)}
if self.num_minions != 0:
pub_func = partial(send_to_minion, data, tag)
self.clientPool.imap(pub_func, self.minion2host.values())
endTest = time.time()
print("EVENT FIRE LOOP TOOK " + str(endTest - beginTest))
messages.clear_ufo_values(self.ns.factorGraphs[-1].var_value[0], self.ufo_var_begin)
if learn:
messages.clear_ufo_values(self.ns.factorGraphs[-1].var_value_evid[0], self.ufo_var_begin)
resp = 0
while resp < len(self.minions):
tag = messages.LEARN_RES if learn else messages.INFER_RES
evdata = self.event_bus.get_event(wait=5,
tag=tag,
full=True)
if evdata:
resp += 1
data = evdata['data']['data']
pid = data["pid"]
# Process variables from minions
vfmin = messages.deserialize(data["values"], np.int64)
messages.process_received_vars(self.map_from_minion[pid], vfmin, self.ns.factorGraphs[-1].var_value[0])
messages.apply_pf_values(self.factor, self.fmap, self.ns.factorGraphs[-1].var_value[0], self.variable, self.pf_from_minion[pid], messages.deserialize(data["pf"], np.int64))
messages.apply_ufo_values(self.factor, self.fmap, self.ns.factorGraphs[-1].var_value[0], self.ufo_from_minion[pid], messages.deserialize(data["ufo"], np.int64))
if learn:
vfmin = messages.deserialize(data["v_evid"], np.int64)
messages.process_received_vars(self.map_from_minion[pid], vfmin, self.ns.factorGraphs[-1].var_value_evid[0])
messages.apply_pf_values(self.factor, self.fmap, self.ns.factorGraphs[-1].var_value[0], self.variable, self.pf_from_minion[pid], messages.deserialize(data["pf_evid"], np.int64))
messages.apply_ufo_values(self.factor, self.fmap, self.ns.factorGraphs[-1].var_value_evid[0], self.ufo_from_minion[pid], messages.deserialize(data["ufo_evid"], np.int64))
self.ns.factorGraphs[-1].weight_value[0] += \
messages.deserialize(data["dw"], np.float64)
end1 = time.time()
print("FULL " + mode + " LOOP TOOK " + str(end1 - begin1) + "\n")
# TODO: get and return marginals
# TODO: switch to proper probs
end = time.time()
print(mode + " TOOK", end - begin)
return end - begin
##############
# Init Phase #
##############
def assign_partition_id(self):
"""TODO."""
while True:
self.minions = self.get_minions_status()['up']
if len(self.minions) >= self.num_minions:
break
print("Waiting for minions (" + str(len(self.minions)) +
" / " + str(self.num_minions) + ")")
time.sleep(1)
print("Minions obtained")
self.minions = self.minions[:self.num_minions]
for (i, name) in enumerate(self.minions):
data = {'id': i}
newEvent = self.local_client.cmd([name],
'event.fire',
[data, messages.ASSIGN_ID],
expr_form='list')
# TODO: listen for responses
# Obtain minions ip addresses
self.minion2host = \
self.local_client.cmd(self.minions, 'grains.get', ['localhost'],
expr_form='list', timeout=None)
# Initialize multiprocessing pool for publishing
if self.num_minions != 0:
self.clientPool = Pool(self.num_minions)
def prep_numbskull(self):
"""TODO."""
# Setup local instance
self.prep_local_numbskull()
# Setup minion instances
success = self.prep_minions_numbskull()
if not success:
print('ERROR: Numbksull not loaded')
def open_db_connection(self):
# obtain database url from file
with open(self.application_dir + "/db.url", "r") as f:
self.db_url = f.read().strip()
# Connect to an existing database
# http://stackoverflow.com/questions/15634092/connect-to-an-uri-in-postgres
url = urlparse.urlparse(self.db_url)
username = url.username
password = url.password
database = url.path[1:]
hostname = url.hostname
port = url.port
self.conn = psycopg2.connect(
database=database,
user=username,
password=password,
host=hostname,
port=port
)
def run_deepdive(self):
# Call deepdive to perform everything up to grounding
# TODO: check that deepdive ran successfully
cmd = ["deepdive", "do", "all"]
subprocess.call(cmd, cwd=self.application_dir)
def run_ddlog(self):
# semantic partitioning
if self.partition_method == 'sp':
cmd = ["ddlog", "semantic-partition", "app.ddlog",
self.partition_scheme,
"--workers", str(self.num_minions),
"--cost-model", "simple.costmodel.txt"]
if self.use_ufo:
cmd.append("-u")
partition_json = subprocess.check_output(cmd,
cwd=self.application_dir)
partition = json.loads(partition_json)
return partition
# Metis or connected components based partitioning
elif self.partition_method == 'metis' or self.partition_method == 'cc':
cmd = ["ddlog", "cc-partition", "app.ddlog",
"--workers", str(self.num_minions)]
partition_json = subprocess.check_output(cmd,
cwd=self.application_dir)
partition = json.loads(partition_json)
return partition
# Default
else:
print('ERROR: Invalid partition method!')
return False
def get_fg(self, cur):
"""TODO"""
master_filter = " partition_key similar to 'A(|u)' " \
"or partition_key similar to 'B(|u)' " \
"or partition_key similar to 'D(|u)%' " \
"or partition_key similar to 'F(|u)%' " \
"or partition_key similar to 'G(|u)%' " \
"or partition_key similar to 'H(|u)%' "
get_fg_data_begin = time.time()
(self.weight, self.variable, self.factor, self.fmap, domain_mask, edges, self.var_pt,
self.factor_pt, self.var_ufo, self.factor_ufo, self.fid, self.vid, self.ufo_send, self.ufo_recv, self.ufo_start, self.ufo_map, self.ufo_var_begin, self.pf_list, factors_to_skip, self.pf_to_send) = \
messages.get_fg_data(cur, master_filter, True)
get_fg_data_end = time.time()
print("Done running get_fg_data: " +
str(get_fg_data_end - get_fg_data_begin))
self.variable[self.var_pt == "D"]["isEvidence"] = 4 # not owned var type
self.ns.loadFactorGraph(self.weight, self.variable, self.factor, self.fmap,
domain_mask, edges, 1, 1, factors_to_skip)
def prepare_db(self, cur):
"""TODO."""
# Run deepdive to perform candidate extraction
self.run_deepdive()
# Obtain partition information
partition = self.run_ddlog()
if not partition:
return False
# Define functions that sql needs
for op in partition[0]["sql_prefix"]:
cur.execute(op)
# Make the changes to the database persistent
self.conn.commit()
# Check if partioning is metis or cc
if self.partition_method == 'metis':
messages.find_metis_parts(self.conn, cur, self.num_minions)
p0 = partition[0]
elif self.partition_method == 'cc':
messages.find_connected_components(self.conn, cur)
p0 = partition[0]
elif self.partition_method == 'sp':
begin = time.time()
# Select which partitioning scheme to use
if self.partition_type is not None:
# Type was prespecified
for p in partition:
if p["partition_types"] == self.partition_type:
p0 = p
else:
# Evaluating costs
print(80 * "*")
optimal_cost = None
for p in partition:
cur.execute(p["sql_to_cost"])
cost = cur.fetchone()[0]
print('Partition scheme "' + p["partition_types"] +
'" has cost ' + str(cost))
if optimal_cost is None or cost < optimal_cost:
optimal_cost = cost
p0 = p
print(80 * "*")
# p0 is partition to use
for k in p0.keys():
print(k)
print(p0[k])
print()
# This adds partition information to the database
print("Running sql_to_apply")
sql_to_apply_begin = time.time()
for op in p0["sql_to_apply"]:
cur.execute(op)
# Make the changes to the database persistent
self.conn.commit()
sql_to_apply_end = time.time()
print("Done running sql_to_apply: " +
str(sql_to_apply_end - sql_to_apply_begin))
def load_all_fg(self, db_url, cur):
"""TODO."""
tag = messages.LOAD_FG
data = {"db_url": db_url}
newEvent = self.local_client.cmd(self.minions,
'event.fire',
[data, tag],
expr_form='list')
begin = time.time()
# Grab factor graph data
self.get_fg(cur)
end = time.time()
print("get_fg", end - begin)
print("WAITING FOR MINION LOAD_FG_RES")
begin = time.time()
resp = 0
while resp < len(self.minions):
evdata = self.event_bus.get_event(wait=5,
tag=messages.LOAD_FG_RES,
full=True)
if evdata:
resp += 1
end = time.time()
print("DONE WAITING FOR MINION LOAD_FG_RES", end - begin)
def sync_mapping(self):
"""TODO."""
self.map_to_minions = messages.compute_map_master(self.vid, self.var_pt.view(np.int8))
print(self.fid[self.pf_list])
# send mapping to minions
tag = messages.SYNC_MAPPING
data = {"map": messages.serialize(self.map_to_minions),
"pf": messages.serialize(self.fid[self.pf_to_send])}
if self.num_minions != 0:
pub_func = partial(send_to_minion, data, tag)
self.clientPool.imap(pub_func, self.minion2host.values())
#newEvent = self.local_client.cmd(self.minions,
# 'event.fire',
# [data, tag],
# expr_form='list')
self.map_from_minion = [None for i in range(len(self.minions))]
self.pf_from_minion = [None for i in range(len(self.minions))]
self.ufo_from_minion = [None for i in range(len(self.minions))]
self.ufo_map_from_minion = self.ufo_recv.copy()
self.ufo_map_from_minion["vid"] = self.vid[self.ufo_map_from_minion["vid"]]
resp = 0
while resp < len(self.minions):
# receive map and save
evdata = self.event_bus.get_event(wait=5,
tag=messages.SYNC_MAPPING_RES,
full=True)
if evdata:
tag, data = evdata['tag'], evdata['data']['data']
pid = data["pid"]
self.map_from_minion[pid] = \
messages.deserialize(data["map"], np.int64)
messages.apply_inverse_map(self.vid, self.map_from_minion[pid])
self.pf_from_minion[pid] = messages.deserialize(data["pf"], np.int64)
messages.apply_inverse_map(self.fid, self.pf_from_minion[pid])
self.ufo_from_minion[pid] = messages.ufo_to_factor(messages.deserialize(data["ufo"], UnaryFactorOpt), self.ufo_map_from_minion, len(self.factor_pt))
resp += 1
print("DONE WITH SENDING MAPPING")
messages.apply_inverse_map(self.vid, self.map_to_minions)
# Helper
def parse_args(self, argv):
"""TODO."""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Runs a Gibbs sampler",
epilog="")
# Add version to parser
parser.add_argument("--version",
action='version',
version="%(prog)s 0.0",
help="print version number")
# Add execution arguments to parser
for arg, opts in numbskull.arguments:
parser.add_argument(*arg, **opts)
# Add flags to parser
for arg, opts in numbskull.flags:
parser.add_argument(*arg, **opts)
# Initialize NumbSkull #
args = parser.parse_args(argv)
return args
def get_minions_status(self):
"""TODO."""
minion_status = self.runner.cmd('manage.status')
print("***** MINION STATUS REPORT *****")
print(minion_status)
print("UP: ", len(minion_status['up']))
print("DOWN: ", len(minion_status['down']))
print()
return minion_status
def prep_local_numbskull(self):
"""TODO."""
self.ns = numbskull.NumbSkull(**vars(self.args))
def prep_minions_numbskull(self):
"""TODO."""
# send args and initialize numbskull at minion
data = {'argv': self.argv}
newEvent = self.local_client.cmd(self.minions,
'event.fire',
[data, messages.INIT_NS],
expr_form='list')
# wait for ACK from minions
SUCCESS = True
resp = []
while len(resp) < len(self.minions):
evdata = self.event_bus.get_event(wait=5,
tag=messages.INIT_NS_RES,
full=True)
if evdata:
tag, data = evdata['tag'], evdata['data']
if data['data']['status'] == 'FAIL':
print('ERROR: Minion %s failed to load numbskull.'
% data['id'])
SUCCESS = False
resp.append((data['id'], SUCCESS))
if SUCCESS:
print('SUCCESS: All minions loaded numbskull.')
return SUCCESS
def main(application_dir, machines, threads_per_machine,
learning_epochs, inference_epochs,
partition_method, partition_scheme, use_ufo, partition_type=None):
"""TODO."""
# Inputs for experiments:
# - dataset
# - number of machines
# - number of threads per machine
# - learning/inference epochs
# - sweeps per epoch
# Return values:
# - Time for database
# - Time for loading
# - Time for learning
# - Time for inference
# - Memory usage (master, all minions)
# TODO: how to automate partition selection
args = ['-l', '1',
'-i', '1',
'-t', str(threads_per_machine),
'-s', '0.01',
'--regularization', '2',
'-r', '0.1',
'--quiet']
ns_master = NumbskullMaster(application_dir,
machines,
partition_method,
partition_scheme,
use_ufo,
partition_type,
args)
ns_master.initialize()
learn_time = ns_master.learning(learning_epochs)
infer_time = ns_master.inference(inference_epochs)
return ns_master, {"learning_time": learn_time,
"inference_time": infer_time}
if __name__ == "__main__":
if len(sys.argv) == 7 or \
len(sys.argv) == 8 or \
len(sys.argv) == 9:
application_dir = sys.argv[1]
machines = int(sys.argv[2])
threads_per_machine = int(sys.argv[3])
learning_epochs = int(sys.argv[4])
inference_epochs = int(sys.argv[5])
partition_method = sys.argv[6]
assert(partition_method == "cc" or
partition_method == "metis" or
partition_method == "sp")
partition_scheme = None
use_ufo = False
if partition_method == "sp":
assert(len(sys.argv) >= 8)
partition_scheme = "--pp" + sys.argv[7][0]
if len(sys.argv[7]) > 1 and sys.argv[7][1] == "u":
use_ufo = True
partition_type = None
if len(sys.argv) == 9:
partition_type = sys.argv[8]
main(application_dir, machines, threads_per_machine,
learning_epochs, inference_epochs,
partition_method, partition_scheme, use_ufo, partition_type)
else:
print("Usage: " + sys.argv[0] +
" application_dir" +
" machines" +
" threads_per_machine" +
" learning_epochs" +
" inference_epochs" +
" partition_method (cc, metis, sp)" +
" partition_scheme (for sp) {a,b,c,au,bu,cu}" +
" partition_type (type for sp)")
| numbskull-master | salt/src/numbskull_master.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
# HELPER METHODS #
def dataType(i):
"""TODO."""
return {0: "Boolean",
1: "Categorical"}.get(i, "Unknown")
@jit(nopython=True, cache=True)
def compute_var_map(variables, factors, fmap, vmap, factor_index, domain_mask,
factors_to_skip=np.empty(0, np.int64)):
"""TODO."""
# Fill in domain values (for mapping when dumping marginals)
for i, v in enumerate(variables):
# skip boolean (value is 0)
if v["dataType"] == 0:
continue # default to 0
# categorical with explicit domain
if domain_mask[i]:
continue # filled already
else: # categorical with implicit domain [0...cardinality)
for index in range(v["cardinality"]):
vmap[v["vtf_offset"] + index]["value"] = index
# Fill in factor_index and indexes into factor_index
# Step 1: populate VTF.length
for ftv in fmap:
vid = ftv["vid"]
val = ftv["dense_equal_to"] if variables[vid]["dataType"] == 1 else 0
vtf = vmap[variables[vid]["vtf_offset"] + val]
vtf["factor_index_length"] += 1
# Step 2: populate VTF.offset
last_len = 0
last_off = 0
for i, vtf in enumerate(vmap):
vtf["factor_index_offset"] = last_off + last_len
last_len = vtf["factor_index_length"]
last_off = vtf["factor_index_offset"]
# Step 3: populate factor_index
offsets = vmap["factor_index_offset"].copy()
fts_index = 0 # factors_to_skip index
for i, fac in enumerate(factors):
if fts_index < len(factors_to_skip) and \
factors_to_skip[fts_index] == i:
fts_index += 1
continue
for j in range(fac["ftv_offset"], fac["ftv_offset"] + fac["arity"]):
ftv = fmap[j]
vid = ftv["vid"]
val = ftv["dense_equal_to"] if variables[
vid]["dataType"] == 1 else 0
vtf_idx = variables[vid]["vtf_offset"] + val
fidx = offsets[vtf_idx]
factor_index[fidx] = i
offsets[vtf_idx] += 1
# Step 4: remove dupes from factor_index
for vtf in vmap:
offset = vtf["factor_index_offset"]
length = vtf["factor_index_length"]
new_list = factor_index[offset: offset + length]
new_list.sort()
i = 0
last_fid = -1
for fid in new_list:
if last_fid == fid:
continue
last_fid = fid
factor_index[offset + i] = fid
i += 1
vtf["factor_index_length"] = i
@jit(nopython=True, cache=True)
def reverse(data, start, end):
"""TODO."""
end -= 1
while (start < end):
data[start], data[end] = data[end], data[start]
start += 1
end -= 1
@jit(nopython=True, cache=True)
def reverse_array(data):
"""TODO."""
# TODO: why does this fail?
# data = np.flipud(data)
reverse(data, 0, data.size)
# DEFINE NUMBA-BASED DATA LOADING METHODS #
@jit(nopython=True, cache=True)
def load_weights(data, nweights, weights):
"""TODO."""
for i in range(nweights):
# TODO: read types from struct?
# TODO: byteswap only if system is little-endian
buf = data[(17 * i):(17 * i + 8)]
reverse_array(buf)
weightId = np.frombuffer(buf, dtype=np.int64)[0]
isFixed = data[17 * i + 8]
buf = data[(17 * i + 9):(17 * i + 17)]
reverse_array(buf)
initialValue = np.frombuffer(buf, dtype=np.float64)[0]
weights[weightId]["isFixed"] = isFixed
weights[weightId]["initialValue"] = initialValue
print("LOADED WEIGHTS")
@jit(nopython=True, cache=True)
def load_variables(data, nvariables, variables):
"""TODO."""
for i in range(nvariables):
# TODO: read types from struct?
# TODO: byteswap only if system is little-endian
buf = data[(27 * i):(27 * i + 8)]
reverse_array(buf)
variableId = np.frombuffer(buf, dtype=np.int64)[0]
isEvidence = data[27 * i + 8]
buf = data[(27 * i + 9):(27 * i + 17)]
reverse_array(buf)
initialValue = np.frombuffer(buf, dtype=np.int64)[0]
buf = data[(27 * i + 17):(27 * i + 19)]
reverse_array(buf)
dataType = np.frombuffer(buf, dtype=np.int16)[0]
buf = data[(27 * i + 19):(27 * i + 27)]
reverse_array(buf)
cardinality = np.frombuffer(buf, dtype=np.int64)[0]
variables[variableId]["isEvidence"] = isEvidence
variables[variableId]["initialValue"] = initialValue
variables[variableId]["dataType"] = dataType
variables[variableId]["cardinality"] = cardinality
print("LOADED VARS")
@jit(nopython=True, cache=True)
def load_domains(data, domain_mask, vmap, variables):
"""TODO."""
index = 0
while index < data.size:
buf = data[index: index + 8]
reverse_array(buf)
variableId = np.frombuffer(buf, dtype=np.int64)[0]
index += 8
buf = data[index: index + 8]
reverse_array(buf)
cardinality = np.frombuffer(buf, dtype=np.int64)[0]
index += 8
domain_mask[variableId] = True
# NOTE: values are sorted already by DD
for j in range(cardinality):
buf = data[index: index + 8]
reverse_array(buf)
val = np.frombuffer(buf, dtype=np.int64)[0]
index += 8
vmap[variables[variableId]["vtf_offset"] + j]["value"] = val
# translate initial value into dense index
if val == variables[variableId]["initialValue"]:
variables[variableId]["initialValue"] = j
print("LOADED DOMAINS")
@jit(nopython=True, cache=True)
def load_factors(data, nfactors, factors, fmap, domain_mask, variable, vmap):
"""TODO."""
index = 0
fmap_idx = 0
k = 0 # somehow numba 0.28 would raise LowerError without this line
for i in range(nfactors):
buf = data[index:(index + 2)]
reverse_array(buf)
factors[i]["factorFunction"] = np.frombuffer(buf, dtype=np.int16)[0]
buf = data[(index + 2):(index + 10)]
reverse_array(buf)
arity = np.frombuffer(buf, dtype=np.int64)[0]
factors[i]["arity"] = arity
factors[i]["ftv_offset"] = fmap_idx
index += 10 # TODO: update index once per loop?
for k in range(arity):
buf = data[index:(index + 8)]
reverse_array(buf)
vid = np.frombuffer(buf, dtype=np.int64)[0]
fmap[fmap_idx + k]["vid"] = vid
buf = data[(index + 8):(index + 16)]
reverse_array(buf)
val = np.frombuffer(buf, dtype=np.int64)[0]
# translate initial value into dense index using bisect
if domain_mask[vid]:
start = variable[vid]["vtf_offset"]
end = start + variable[vid]["cardinality"]
val = np.searchsorted(vmap["value"][start:end], val)
fmap[fmap_idx + k]["dense_equal_to"] = val
index += 16
fmap_idx += arity
buf = data[index:(index + 8)]
reverse_array(buf)
factors[i]["weightId"] = np.frombuffer(buf, dtype=np.int64)[0]
buf = data[(index + 8):(index + 16)]
reverse_array(buf)
factors[i]["featureValue"] = np.frombuffer(buf, dtype=np.float64)[0]
index += 16
print("LOADED FACTORS")
| numbskull-master | numbskull/dataloading.py |
__version__ = "0.1.1"
| numbskull-master | numbskull/version.py |
"""TODO."""
from __future__ import print_function, absolute_import
import sys
import numpy as np
from numbskull.inference import *
from numbskull.learning import *
from numbskull.timer import Timer
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
def run_pool(threadpool, threads, func, args):
"""TODO."""
if threads == 1:
func(0, *args)
else:
future_to_samples = \
[threadpool.submit(func, threadID, *args)
for threadID in range(threads)]
concurrent.futures.wait(future_to_samples)
for fts in future_to_samples:
if fts.exception() is not None:
raise fts.exception()
class FactorGraph(object):
"""TODO."""
def __init__(self, weight, variable, factor, fmap, vmap,
factor_index, var_copies, weight_copies, fid, workers):
"""TODO."""
self.weight = weight
self.variable = variable
self.factor = factor
self.fmap = fmap
self.vmap = vmap
self.factor_index = factor_index
# This is just cumsum shifted by 1
self.cstart = np.empty(self.variable.shape[0] + 1, np.int64)
self.cstart[0] = 0
self.cstart[1:] = self.variable["cardinality"]
self.cstart[self.cstart == 2] = 1 # Save space for binary vars
np.cumsum(self.cstart, out=self.cstart)
self.count = np.zeros(self.cstart[self.variable.shape[0]], np.int64)
self.var_value_evid = \
np.tile(self.variable[:]['initialValue'], (var_copies, 1))
self.var_value = \
np.tile(self.variable[:]['initialValue'], (var_copies, 1))
self.weight_value = \
np.tile(self.weight[:]['initialValue'], (weight_copies, 1))
if self.variable.size == 0:
self.Z = np.zeros((workers, 0))
else:
self.Z = np.zeros((workers, max(self.variable[:]['cardinality'])))
if self.vmap.size == 0:
size = (workers, 0)
else:
size = (workers, 2 * max(self.vmap['factor_index_length']))
self.fids = np.zeros(size, factor_index.dtype)
self.fid = fid
assert(workers > 0)
self.threads = workers
self.threadpool = ThreadPoolExecutor(self.threads)
self.marginals = np.zeros(self.cstart[self.variable.shape[0]])
self.inference_epoch_time = 0.0
self.inference_total_time = 0.0
self.learning_epoch_time = 0.0
self.learning_total_time = 0.0
def clear(self):
"""TODO."""
self.count[:] = 0
self.threadpool.shutdown()
#################
# GETTERS #
#################
def getWeights(self, weight_copy=0):
"""TODO."""
return self.weight_value[weight_copy][:]
def getMarginals(self, varIds=None):
"""TODO."""
if not varIds:
return self.marginals
else:
return self.marginals[varIds]
#####################
# DIAGNOSTICS #
#####################
def diagnostics(self, epochs):
"""TODO."""
print('Inference took %.03f sec.' % self.inference_total_time)
epochs = epochs or 1
bins = 10
hist = np.zeros(bins, dtype=np.int64)
for i in range(len(self.count)):
assert(self.count[i] >= 0)
assert(self.count[i] <= epochs)
hist[min(self.count[i] * bins // epochs, bins - 1)] += 1
for i in range(bins):
start = i / 10.0
end = (i + 1) / 10.0
print("Prob. " + str(start) + ".." + str(end) + ": \
" + str(hist[i]) + " variables")
def diagnosticsLearning(self, weight_copy=0):
"""TODO."""
print('Learning epoch took %.03f sec.' % self.learning_epoch_time)
print("Weights:")
for (i, w) in enumerate(self.weight):
print(" weightId:", i)
print(" isFixed:", w["isFixed"])
print(" weight: ", self.weight_value[weight_copy][i])
print()
################################
# INFERENCE AND LEARNING #
################################
def burnIn(self, epochs, sample_evidence, diagnostics=False,
var_copy=0, weight_copy=0):
"""TODO."""
if diagnostics:
print("FACTOR " + str(self.fid) + ": STARTED BURN-IN...")
# NUMBA-based method. Implemented in inference.py
for ep in range(epochs):
args = (self.threads, var_copy, weight_copy,
self.weight, self.variable, self.factor,
self.fmap, self.vmap,
self.factor_index, self.Z, self.cstart, self.count,
self.var_value, self.weight_value, sample_evidence, True)
run_pool(self.threadpool, self.threads, gibbsthread, args)
if diagnostics:
print("FACTOR " + str(self.fid) + ": DONE WITH BURN-IN")
def inference(self, burnin_epochs, epochs, sample_evidence=False,
diagnostics=False, var_copy=0, weight_copy=0):
"""TODO."""
# Burn-in
if burnin_epochs > 0:
self.burnIn(burnin_epochs, sample_evidence,
diagnostics=diagnostics)
# Run inference
if diagnostics:
print("FACTOR " + str(self.fid) + ": STARTED INFERENCE")
for ep in range(epochs):
with Timer() as timer:
args = (self.threads, var_copy, weight_copy, self.weight,
self.variable, self.factor, self.fmap,
self.vmap, self.factor_index, self.Z,
self.cstart, self.count, self.var_value,
self.weight_value, sample_evidence, False)
run_pool(self.threadpool, self.threads, gibbsthread, args)
self.inference_epoch_time = timer.interval
self.inference_total_time += timer.interval
if diagnostics:
print('Inference epoch #%d took %.03f sec.' %
(ep, self.inference_epoch_time))
if diagnostics:
print("FACTOR " + str(self.fid) + ": DONE WITH INFERENCE")
# compute marginals
if epochs != 0:
self.marginals = self.count / float(epochs)
if diagnostics:
self.diagnostics(epochs)
def learn(self, burnin_epochs, epochs, stepsize, decay, regularization,
reg_param, truncation, diagnostics=False, verbose=False,
learn_non_evidence=False, var_copy=0, weight_copy=0):
"""TODO."""
# Burn-in
if burnin_epochs > 0:
self.burnIn(burnin_epochs, True, diagnostics=diagnostics)
# Run learning
if diagnostics:
print("FACTOR " + str(self.fid) + ": STARTED LEARNING")
for ep in range(epochs):
if diagnostics:
print("FACTOR " + str(self.fid) + ": EPOCH #" + str(ep))
print("Current stepsize = " + str(stepsize))
if verbose:
self.diagnosticsLearning(weight_copy)
sys.stdout.flush() # otherwise output refuses to show in DD
with Timer() as timer:
args = (self.threads, stepsize, regularization, reg_param,
truncation, var_copy, weight_copy, self.weight,
self.variable, self.factor, self.fmap,
self.vmap, self.factor_index, self.Z, self.fids,
self.var_value, self.var_value_evid,
self.weight_value, learn_non_evidence)
run_pool(self.threadpool, self.threads, learnthread, args)
self.learning_epoch_time = timer.interval
self.learning_total_time += timer.interval
# Decay stepsize
stepsize *= decay
if diagnostics:
print("FACTOR " + str(self.fid) + ": DONE WITH LEARNING")
def dump_weights(self, fout, weight_copy=0):
"""Dump <wid, weight> text file in DW format."""
with open(fout, 'w') as out:
for i, w in enumerate(self.weight):
out.write('%d %f\n' % (i, self.weight_value[weight_copy][i]))
def dump_probabilities(self, fout, epochs):
"""Dump <vid, value, prob> text file in DW format."""
epochs = epochs or 1
with open(fout, 'w') as out:
for i, v in enumerate(self.variable):
if v["cardinality"] == 2:
prob = float(self.count[self.cstart[i]]) / epochs
out.write('%d %d %.3f\n' % (i, 1, prob))
else:
for k in range(v["cardinality"]):
prob = float(self.count[self.cstart[i] + k]) / epochs
original_value = self.vmap[
v["vtf_offset"] + k]["value"]
out.write('%d %d %.3f\n' % (i, original_value, prob))
| numbskull-master | numbskull/factorgraph.py |
"""TODO."""
from __future__ import print_function, absolute_import
import time
class Timer:
"""TODO."""
def __enter__(self):
"""TODO."""
self.start = time.time()
return self
def __exit__(self, *args):
"""TODO."""
self.end = time.time()
self.interval = self.end - self.start
| numbskull-master | numbskull/timer.py |
"""inference and learning for factor graphs"""
from .numbskull import NumbSkull
from .numbskull import main
from .version import __version__
__all__ = ('numbskull', 'factorgraph', 'timer')
| numbskull-master | numbskull/__init__.py |
#!/usr/bin/env python
"""TODO: This is a docstring."""
from __future__ import print_function, absolute_import
from past.builtins import long
import os
import sys
import argparse
import numbskull.factorgraph
from numbskull.factorgraph import FactorGraph
from numbskull.dataloading import *
from numbskull.numbskulltypes import *
import numpy as np
# Define arguments for both parser in main and NumbSkull
arguments = [
(tuple(['directory']),
{'metavar': 'DIRECTORY',
'nargs': '?',
'default': '.',
'type': str,
'help': 'specify the directory of factor graph files'}),
# TODO: print default for meta, weight, variable, factor in help
(('-o', '--output_dir'),
{'metavar': 'OUTPUT_DIR',
'dest': 'output_dir',
'default': '.',
'type': str,
'help': 'Output dir to contain inference_result.out.text ' +
'and inference_result.out.weights.text'}),
(('-m', '--meta', '--fg_meta'),
{'metavar': 'META_FILE',
'dest': 'metafile',
'default': 'graph.meta',
'type': str,
'help': 'factor graph metadata file'}),
(('-w', '--weight', '--weights'),
{'metavar': 'WEIGHTS_FILE',
'dest': 'weightfile',
'default': 'graph.weights',
'type': str,
'help': 'factor weight file'}),
(('-v', '--variable', '--variables'),
{'metavar': 'VARIABLES_FILE',
'dest': 'variablefile',
'default': 'graph.variables',
'type': str,
'help': 'factor graph variables file'}),
(('-f', '--factor', '--factors'),
{'metavar': 'FACTORS_FILE',
'dest': 'factorfile',
'default': 'graph.factors',
'type': str,
'help': 'factor file'}),
(('--domain', '--domains'),
{'metavar': 'DOMAINS_FILE',
'dest': 'domainfile',
'default': 'graph.domains',
'type': str,
'help': 'domain file'}),
(('-l', '--n_learning_epoch'),
{'metavar': 'NUM_LEARNING_EPOCHS',
'dest': 'n_learning_epoch',
'default': 0,
'type': int,
'help': 'number of learning epochs'}),
(('-i', '--n_inference_epoch'),
{'metavar': 'NUM_INFERENCE_EPOCHS',
'dest': 'n_inference_epoch',
'default': 0,
'type': int,
'help': 'number of inference epochs'}),
(('-s', '--stepsize', '-a', '--alpha'),
{'metavar': 'LEARNING_STEPSIZE',
'dest': 'stepsize',
'default': 0.01,
'type': float,
'help': 'stepsize for learning'}),
(('-d', '--decay', '--diminish'),
{'metavar': 'LEARNING_DECAY',
'dest': 'decay',
'default': 0.95,
'type': float,
'help': 'decay for updating stepsize during learning'}),
(('-r', '--reg_param'),
{'metavar': 'LEARNING_REGULARIZATION_PARAM',
'dest': 'reg_param',
'default': 0.01,
'type': float,
'help': 'regularization penalty'}),
(tuple(['--regularization']),
{'metavar': 'REGULARIZATION',
'dest': 'regularization',
'default': 2,
'type': int,
'help': 'regularization (l1 or l2) [Enter as "1" or "2"]'}),
(('-k', '--truncation'),
{'metavar': 'TRUNCATION',
'dest': 'truncation',
'default': 1,
'type': int,
'help': 'If using l1 regularization, truncation is applied with '
'probability 1/k and with magnitude '
'step_size * reg_param * k. If not using l1 regularization, '
'this parameter has no effect.'}),
(('-b', '--burn_in'),
{'metavar': 'BURN_IN',
'dest': 'burn_in',
'default': 0,
'type': int,
'help': 'number of burn-in epochs'}),
(('-t', '--threads', '--n_threads'),
{'metavar': 'NUM_THREADS',
'dest': 'nthreads',
'default': 1,
'type': int,
'help': 'number of threads to be used'}),
(('-u', '--dburl'),
{'metavar': 'DATABASE_URL',
'dest': 'dburl',
'default': '',
'type': str,
'help': 'url to database holding factor graph information'})
]
flags = [
(tuple(['--sample_evidence']),
{'default': True,
'dest': 'sample_evidence',
'action': 'store_true',
'help': 'sample evidence variables'}),
(tuple(['--learn_non_evidence']),
{'default': False,
'dest': 'learn_non_evidence',
'action': 'store_true',
'help': 'learn from non-evidence variables'}),
(('-q', '--quiet'),
{'default': False,
'dest': 'quiet',
'action': 'store_true',
'help': 'quiet'}),
(tuple(['--verbose']),
{'default': False,
'dest': 'verbose',
'action': 'store_true',
'help': 'verbose'})
]
class NumbSkull(object):
"""TODO: Main class for numbskull."""
def __init__(self, **kwargs):
"""TODO.
Parameters
----------
paramater : type
This is a parameter
Returns
-------
describe : type
Expanation
"""
# Initialize default execution arguments
arg_defaults = {}
for arg, opts in arguments:
if 'directory' in arg[0]:
arg_defaults['directory'] = opts['default']
else:
arg_defaults[opts['dest']] = opts['default']
# Initialize default execution flags
for arg, opts in flags:
arg_defaults[opts['dest']] = opts['default']
for (arg, default) in arg_defaults.items():
setattr(self, arg, kwargs.get(arg, default))
self.factorGraphs = []
def loadFactorGraphRaw(self, weight, variable, factor, fmap,
vmap, factor_index,
var_copies=1, weight_copies=1):
"""TODO."""
fg = FactorGraph(weight, variable, factor, fmap, vmap, factor_index,
var_copies, weight_copies,
len(self.factorGraphs), self.nthreads)
self.factorGraphs.append(fg)
def loadFactorGraph(self, weight, variable, factor, fmap, domain_mask,
edges, var_copies=1, weight_copies=1,
factors_to_skip=np.empty(0, np.int64)):
"""TODO."""
# Note: factors_to_skip must be sorted
# Assert input arguments correspond to NUMPY arrays
assert(type(weight) == np.ndarray and weight.dtype == Weight)
assert(type(variable) == np.ndarray and variable.dtype == Variable)
assert(type(factor) == np.ndarray and factor.dtype == Factor)
assert(type(fmap) == np.ndarray and fmap.dtype == FactorToVar)
assert(type(domain_mask) == np.ndarray and
domain_mask.dtype == np.bool)
assert(type(edges) == int or
type(edges) == long or
type(edges) == np.int64)
assert(type(factors_to_skip) == np.ndarray and
factors_to_skip.dtype == np.int64)
# Initialize metadata
meta = {}
meta['weights'] = weight.shape[0]
meta['variables'] = variable.shape[0]
meta['factors'] = factor.shape[0]
# TODO: should probably just delete edges as an argument
# Not really needed (can just be computed from factors)
edges = sum(factor["arity"]) - sum(factor[factors_to_skip]["arity"])
meta['edges'] = edges
# count total number of VTF records needed
num_vtfs = 0
for var in variable:
var["vtf_offset"] = num_vtfs
if var["dataType"] == 0: # boolean
num_vtfs += 1
else:
num_vtfs += var["cardinality"]
vmap = np.zeros(num_vtfs, VarToFactor)
# factors_to_skip is a list of indices of factors
# these factors need to exist for the distributed sampler
# but cannot be sampled
# TODO: edges is really poorly defined with factors_to_skip
factor_index = np.zeros(meta["edges"], np.int64)
# Numba-based method. Defined in dataloading.py
compute_var_map(variable, factor, fmap, vmap,
factor_index, domain_mask, factors_to_skip)
fg = FactorGraph(weight, variable, factor, fmap, vmap, factor_index,
var_copies, weight_copies,
len(self.factorGraphs), self.nthreads)
self.factorGraphs.append(fg)
def loadFGFromFile(self, directory=None, metafile=None, weightfile=None,
variablefile=None, factorfile=None, domainfile=None,
var_copies=1, weight_copies=1):
"""TODO."""
# init necessary input arguments
if not self.directory:
print("No factor graph specified")
return
else:
directory = self.directory
metafile = self.metafile if not metafile else metafile
weightfile = self.weightfile if not weightfile else weightfile
variablefile = self.variablefile if not variablefile else variablefile
factorfile = self.factorfile if not factorfile else factorfile
domainfile = self.domainfile if not domainfile else domainfile
print_info = not self.quiet
print_only_meta = not self.verbose
# load metadata
meta = np.loadtxt(directory + "/" + metafile,
delimiter=',',
dtype=Meta)
meta = meta[()]
if print_info:
print("Meta:")
print(" weights: ", meta["weights"])
print(" variables:", meta["variables"])
print(" factors: ", meta["factors"])
print(" edges: ", meta["edges"])
print()
# load weights
weight_data = np.memmap(directory + "/" + weightfile, mode="c")
weight = np.zeros(meta["weights"], Weight)
# NUMBA-based function. Defined in dataloading.py
load_weights(weight_data, meta["weights"], weight)
if print_info and not print_only_meta:
print("Weights:")
for (i, w) in enumerate(weight):
print(" weightId:", i)
print(" isFixed:", w["isFixed"])
print(" weight: ", w["initialValue"])
print()
# load variables
variable_data = np.memmap(directory + "/" + variablefile, mode="c")
variable = np.zeros(meta["variables"], Variable)
# NUMBA-based method. Defined in dataloading.py
load_variables(variable_data, meta["variables"], variable)
sys.stdout.flush()
if print_info and not print_only_meta:
print("Variables:")
for (i, v) in enumerate(variable):
print(" variableId:", i)
print(" isEvidence: ", v["isEvidence"])
print(" initialValue:", v["initialValue"])
print(" dataType: ", v["dataType"],
"(", dataType(v["dataType"]), ")")
print(" cardinality: ", v["cardinality"])
print()
# count total number of VTF records needed
num_vtfs = 0
for var in variable:
var["vtf_offset"] = num_vtfs
if var["dataType"] == 0: # boolean
num_vtfs += 1
else:
num_vtfs += var["cardinality"]
print("#VTF = %s" % num_vtfs)
sys.stdout.flush()
# generate variable-to-factor map
vmap = np.zeros(num_vtfs, VarToFactor)
factor_index = np.zeros(meta["edges"], np.int64)
# load domains
# whether a var has domain spec
domain_mask = np.zeros(meta["variables"], np.bool)
domain_file = directory + "/" + domainfile
if os.path.isfile(domain_file) and os.stat(domain_file).st_size > 0:
domain_data = np.memmap(directory + "/" + domainfile, mode="c")
load_domains(domain_data, domain_mask, vmap, variable)
sys.stdout.flush()
# load factors
factor_data = np.memmap(directory + "/" + factorfile, mode="c")
factor = np.zeros(meta["factors"], Factor)
fmap = np.zeros(meta["edges"], FactorToVar)
# Numba-based method. Defined in dataloading.py
load_factors(factor_data, meta["factors"],
factor, fmap, domain_mask, variable, vmap)
sys.stdout.flush()
# Numba-based method. Defined in dataloading.py
compute_var_map(variable, factor, fmap, vmap,
factor_index, domain_mask)
print("COMPLETED VMAP INDEXING")
sys.stdout.flush()
fg = FactorGraph(weight, variable, factor, fmap, vmap, factor_index,
var_copies, weight_copies,
len(self.factorGraphs), self.nthreads)
self.factorGraphs.append(fg)
def getFactorGraph(self, fgID=0):
"""TODO."""
return self.factorGraphs[fgID]
def inference(self, fgID=0, out=True):
"""TODO."""
burn_in = self.burn_in
n_inference_epoch = self.n_inference_epoch
self.factorGraphs[fgID].inference(burn_in, n_inference_epoch,
sample_evidence=self.sample_evidence,
diagnostics=not self.quiet)
if out:
output_file = os.path.join(
self.output_dir, "inference_result.out.text")
self.factorGraphs[fgID].dump_probabilities(output_file,
n_inference_epoch)
def learning(self, fgID=0, out=True):
"""TODO."""
burn_in = self.burn_in
n_learning_epoch = self.n_learning_epoch
stepsize = self.stepsize
decay = self.decay
regularization = self.regularization
reg_param = self.reg_param
truncation = self.truncation
fg = self.factorGraphs[fgID]
fg.learn(burn_in, n_learning_epoch,
stepsize, decay, regularization, reg_param, truncation,
diagnostics=not self.quiet,
verbose=self.verbose,
learn_non_evidence=self.learn_non_evidence)
if out:
output_file = os.path.join(
self.output_dir, "inference_result.out.weights.text")
self.factorGraphs[fgID].dump_weights(output_file)
def load(argv=None):
"""TODO."""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Runs a Gibbs sampler",
epilog="")
# Add version to parser
parser.add_argument("--version",
action='version',
version="%(prog)s 0.0",
help="print version number")
# Add execution arguments to parser
for arg, opts in arguments:
parser.add_argument(*arg, **opts)
# Add flags to parser
for arg, opts in flags:
parser.add_argument(*arg, **opts)
# Initialize NumbSkull #
args = parser.parse_args(argv)
ns = NumbSkull(**vars(args))
ns.loadFGFromFile()
return ns
def main(argv=None):
"""Duh."""
ns = load(argv)
ns.learning()
ns.inference()
| numbskull-master | numbskull/numbskull.py |
#!/usr/bin/env python
"""TODO."""
from __future__ import print_function
import zmq
import sys
import time
import argparse
import gibbs
import numpy as np
def send_array(socket, A, flags=0, copy=True, track=False):
"""TODO: send a numpy array with metadata."""
md = dict(
dtype=str(A.dtype),
shape=A.shape,
)
socket.send_json(md, flags | zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=True, track=False):
"""TODO: recv a numpy array."""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = buffer(msg)
try:
A = np.frombuffer(buf, dtype=md['dtype'])
except:
A = np.frombuffer(buf, dtype=eval(md['dtype']))
return A.reshape(md['shape'])
def server(argv=None):
"""TODO."""
parser = argparse.ArgumentParser(
description="Run Gibbs worker",
epilog="")
parser.add_argument("directory",
metavar="DIRECTORY",
nargs="?",
help="specify directory of factor graph files",
default="",
type=str)
parser.add_argument("-p", "--port",
metavar="PORT",
help="port",
default=5556,
type=int)
parser.add_argument("-m", "--meta",
metavar="META_FILE",
dest="meta",
default="graph.meta",
type=str,
help="meta file")
# TODO: print default for meta, weight, variable, factor in help
parser.add_argument("-w", "--weight",
metavar="WEIGHTS_FILE",
dest="weight",
default="graph.weights",
type=str,
help="weight file")
parser.add_argument("-v", "--variable",
metavar="VARIABLES_FILE",
dest="variable",
default="graph.variables",
type=str,
help="variable file")
parser.add_argument("-f", "--factor",
metavar="FACTORS_FILE",
dest="factor",
default="graph.factors",
type=str,
help="factor file")
parser.add_argument("-b", "--burn",
metavar="NUM_BURN_STEPS",
dest="burn",
default=0,
type=int,
help="number of learning sweeps")
parser.add_argument("-l", "--learn",
metavar="NUM_LEARN_STEPS",
dest="learn",
default=0,
type=int,
help="number of learning sweeps")
parser.add_argument("-e", "--epoch",
metavar="NUM_LEARNING_EPOCHS",
dest="epoch",
default=0,
type=int,
help="number of learning epochs")
parser.add_argument("-i", "--inference",
metavar="NUM_INFERENCE_STEPS",
dest="inference",
default=0,
type=int,
help="number of inference sweeps")
# TODO: sample observed variable option
parser.add_argument("-q", "--quiet",
# metavar="QUIET",
dest="quiet",
default=False,
action="store_true",
# type=bool,
help="quiet")
# TODO: verbose option (print all info)
parser.add_argument("--verbose",
# metavar="VERBOSE",
dest="verbose",
default=False,
action="store_true",
# type=bool,
help="verbose")
print("Running server...")
arg = parser.parse_args(argv[1:])
if arg.directory == "":
fg = None
else:
var_copies = 1
weight_copies = 1
(meta, weight, variable, factor,
fstart, fmap, vstart, vmap, equalPredicate) = \
gibbs.load(arg.directory, arg.meta, arg.weight, arg.variable,
arg.factor, not arg.quiet, not arg.verbose)
fg_args = (weight, variable, factor, fstart, fmap, vstart,
vmap, equalPredicate, var_copies, weight_copies)
fg = gibbs.FactorGraph(*fg_args)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:%s" % arg.port)
num_clients = 0
# TODO: barrier between burn, learn, and inference
# Probably need to send client id back
while True:
# Wait for next request from client
message = socket.recv()
if message == "HELLO": # Initial message from client
print("Received HELLO.")
socket.send("CLIENT_ID", zmq.SNDMORE)
socket.send_json("%d" % num_clients)
num_clients += 1
elif message == 'R_FACTOR_GRAPH': # Request for factor graph
client_id = socket.recv_json()
print("Received factor graph request from client #%d." % client_id)
# TODO: check that fg != None
# TODO
socket.send("FACTOR_GRAPH", zmq.SNDMORE)
socket.send_json(len(fg_args), zmq.SNDMORE)
for a in fg_args:
is_array = (type(a) == np.ndarray)
socket.send_json(is_array, zmq.SNDMORE)
if is_array:
send_array(socket, a, zmq.SNDMORE)
else:
socket.send_json(a, zmq.SNDMORE)
# TODO: could just not send SNDMORE for last arg
socket.send("DONE")
elif message == "READY": # Client ready
print("Received ready.")
# could skip this if arg.burn == 0
socket.send("BURN", zmq.SNDMORE)
socket.send_json(arg.burn)
elif message == 'DONE_BURN' or message == 'DONE_LEARN':
# Client done with burn/learning
if message == 'DONE_BURN': # Done burning
epochs = 0
else: # Done learning
epochs = socket.recv_json()
fg.wv += recv_array(socket)
pass
if epochs < arg.epoch:
socket.send("LEARN", zmq.SNDMORE)
socket.send_json(arg.learn, zmq.SNDMORE)
socket.send_json(0.001, zmq.SNDMORE) # TODO
send_array(socket, fg.wv)
else:
socket.send("INFERENCE", zmq.SNDMORE)
socket.send_json(arg.inference, zmq.SNDMORE)
send_array(socket, fg.wv)
elif message == 'DONE_INFERENCE': # Client done with inference
data = recv_array(socket)
# TODO: handle count
socket.send("EXIT")
else:
print("Message (%s) cannot be interpreted." % message,
file=sys.stderr)
socket.send("EXIT")
return
def client(argv=None):
"""TODO."""
parser = argparse.ArgumentParser(
description="Run Gibbs worker",
epilog="")
parser.add_argument("directory",
metavar="DIRECTORY",
nargs="?",
help="specify directory of factor graph files",
default="",
type=str)
parser.add_argument("-p", "--port",
metavar="PORT",
help="port",
default=5556,
type=int)
parser.add_argument("-m", "--meta",
metavar="META_FILE",
dest="meta",
default="graph.meta",
type=str,
help="meta file")
# TODO: print default for meta, weight, variable, factor in help
parser.add_argument("-w", "--weight",
metavar="WEIGHTS_FILE",
dest="weight",
default="graph.weights",
type=str,
help="weight file")
parser.add_argument("-v", "--variable",
metavar="VARIABLES_FILE",
dest="variable",
default="graph.variables",
type=str,
help="variable file")
parser.add_argument("-f", "--factor",
metavar="FACTORS_FILE",
dest="factor",
default="graph.factors",
type=str,
help="factor file")
parser.add_argument("-q", "--quiet",
# metavar="QUIET",
dest="quiet",
default=False,
action="store_true",
# type=bool,
help="quiet")
parser.add_argument("--verbose",
# metavar="VERBOSE",
dest="verbose",
default=False,
action="store_true",
# type=bool,
help="verbose")
print(argv)
arg = parser.parse_args(argv[1:])
print("Running client...")
print(arg.directory)
if arg.directory == "":
fg = None
else:
var_copies = 1
weight_copies = 1
(meta, weight, variable, factor,
fstart, fmap, vstart, vmap, equalPredicate) = \
gibbs.load(arg.directory, arg.meta, arg.weight, arg.variable,
arg.factor, not arg.quiet, not arg.verbose)
fg = gibbs.FactorGraph(weight, variable, factor, fstart, fmap, vstart,
vmap, equalPredicate, var_copies, weight_copies)
context = zmq.Context()
print("Connecting to server...")
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:%s" % arg.port)
# hello message
print("Sent HELLO.")
socket.send("HELLO")
message = socket.recv()
assert(message == "CLIENT_ID")
message = socket.recv_json()
client_id = int(message)
print("Received id #%d.\n" % client_id)
# request factor graph if not loaded
if fg is None:
socket.send("R_FACTOR_GRAPH", zmq.SNDMORE)
socket.send_json(client_id)
message = socket.recv()
assert(message == "FACTOR_GRAPH")
length = socket.recv_json()
fg_args = [None, ] * length
for i in range(length):
is_array = socket.recv_json()
if is_array:
fg_args[i] = recv_array(socket)
else:
fg_args[i] = socket.recv_json()
assert(socket.recv() == "DONE")
fg = gibbs.FactorGraph(*fg_args)
# Send "ready"
socket.send("READY")
learning_epochs = 0
while True:
message = socket.recv()
if message == 'BURN': # request for burn-in
print("Received request for burn-in.")
burn = socket.recv_json()
print("Burning", burn, "sweeps.")
fg.gibbs(burn, 0, 0)
socket.send("DONE_BURN")
elif message == 'LEARN': # Request for learning
print("Received request for learning.")
sweeps = socket.recv_json()
step = socket.recv_json()
fg.wv = recv_array(socket)
wv = fg.wv
fg.learn(sweeps, step, 0, 0)
dw = fg.wv - wv
socket.send("DONE_LEARNING", zmq.SNDMORE)
learning_epochs += 1
socket.send_json(learning_epochs, zmq.SNDMORE)
send_array(socket, dw)
elif message == 'INFERENCE': # Request for inference
print("Received request for inference.")
inference = socket.recv_json()
fg.wv = recv_array(socket)
print("Inference:", inference, "sweeps.")
fg.clear()
fg.gibbs(inference, 0, 0)
socket.send("DONE_INFERENCE", zmq.SNDMORE)
send_array(socket, fg.count)
elif message == 'EXIT': # Exit
print("Exit")
break
else:
print("Message cannot be interpreted.", file=sys.stderr)
break
def main(argv=None):
"""TODO."""
if argv is None:
argv = sys.argv[1:]
if len(argv) < 1:
print("Usage: ./distributed.py [server/client]", file=sys.stderr)
elif argv[0].lower() == "server" or argv[0].lower() == "s":
server(argv)
elif argv[0].lower() == "client" or argv[0].lower() == "c":
client(argv)
else:
print("Error:", argv[0], "is not a valid choice.", file=sys.stderr)
if __name__ == "__main__":
main()
| numbskull-master | numbskull/distributed.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numpy as np
# TODO (shared with DW): space optimization:
# 1. use smaller ints for some fields
# 2. replace a[x].length with a[x+1].offset - a[x].offset
Meta = np.dtype([('weights', np.int64),
('variables', np.int64),
('factors', np.int64),
('edges', np.int64)])
Weight = np.dtype([("isFixed", np.bool),
("initialValue", np.float64)])
Variable = np.dtype([("isEvidence", np.int8),
("initialValue", np.int64),
("dataType", np.int16),
("cardinality", np.int64),
("vtf_offset", np.int64)])
Factor = np.dtype([("factorFunction", np.int16),
("weightId", np.int64),
("featureValue", np.float64),
("arity", np.int64),
("ftv_offset", np.int64)])
FactorToVar = np.dtype([("vid", np.int64),
("dense_equal_to", np.int64)])
VarToFactor = np.dtype([("value", np.int64),
("factor_index_offset", np.int64),
("factor_index_length", np.int64)])
UnaryFactorOpt = np.dtype([('vid', np.int64),
('weightId', np.int64)])
| numbskull-master | numbskull/numbskulltypes.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
import math
@jit(nopython=True, cache=True, nogil=True)
def gibbsthread(shardID, nshards, var_copy, weight_copy, weight, variable,
factor, fmap, vmap, factor_index, Z, cstart,
count, var_value, weight_value, sample_evidence, burnin):
"""TODO."""
# Indentify start and end variable
nvar = variable.shape[0]
start = (shardID * nvar) // nshards
end = ((shardID + 1) * nvar) // nshards
# TODO: give option do not store result, or just store tally
for var_samp in range(start, end):
if variable[var_samp]["isEvidence"] == 4:
# This variable is not owned by this machine
continue
if variable[var_samp]["isEvidence"] == 0 or sample_evidence:
v = draw_sample(var_samp, var_copy, weight_copy, weight, variable,
factor, fmap, vmap, factor_index, Z[shardID],
var_value, weight_value)
var_value[var_copy][var_samp] = v
if not burnin:
if variable[var_samp]["cardinality"] == 2:
count[cstart[var_samp]] += v
else:
count[cstart[var_samp] + v] += 1
@jit(nopython=True, cache=True, nogil=True)
def draw_sample(var_samp, var_copy, weight_copy, weight, variable, factor,
fmap, vmap, factor_index, Z, var_value, weight_value):
"""TODO."""
cardinality = variable[var_samp]["cardinality"]
for value in range(cardinality):
Z[value] = np.exp(potential(var_samp, value, var_copy, weight_copy,
weight, variable, factor, fmap,
vmap, factor_index, var_value,
weight_value))
for j in range(1, cardinality):
Z[j] += Z[j - 1]
z = np.random.rand() * Z[cardinality - 1]
return np.argmax(Z[:cardinality] >= z)
@jit(nopython=True, cache=True, nogil=True)
def potential(var_samp, value, var_copy, weight_copy, weight, variable, factor,
fmap, vmap, factor_index, var_value, weight_value):
"""TODO."""
p = 0.0
varval_off = value
if variable[var_samp]["dataType"] == 0:
varval_off = 0
vtf = vmap[variable[var_samp]["vtf_offset"] + varval_off]
start = vtf["factor_index_offset"]
end = start + vtf["factor_index_length"]
for k in range(start, end):
factor_id = factor_index[k]
p += weight_value[weight_copy][factor[factor_id]["weightId"]] * \
eval_factor(factor_id, var_samp, value, var_copy, variable,
factor, fmap, var_value)
return p
FACTORS = {
# Factor functions for boolean variables
"NOOP": -1,
"IMPLY_NATURAL": 0,
"OR": 1,
"EQUAL": 3,
"AND": 2,
"ISTRUE": 4,
"LINEAR": 7,
"RATIO": 8,
"LOGICAL": 9,
"IMPLY_MLN": 13,
# Factor functions for categorical variables
"AND_CAT": 12,
"OR_CAT": 14,
"EQUAL_CAT_CONST": 15,
"IMPLY_NATURAL_CAT": 16,
"IMPLY_MLN_CAT": 17,
# Factor functions for generative models for data programming.
#
# These functions accept two types of categorical variables:
#
# y \in {1, -1} corresponding to latent labels, and
# l \in {1, 0, -1} corresponding to labeling function outputs.
#
# The values of y are mapped to Numbskull variables y_index
# via {-1: 0, 1: 1}, and
# the values of l are mapped to Numbskull variables l_index
# via {-1: 0, 0: 1, 1: 2}.
# h(y) := y
"DP_GEN_CLASS_PRIOR": 18,
# h(l) := l
"DP_GEN_LF_PRIOR": 19,
# h(l) := l * l
"DP_GEN_LF_PROPENSITY": 20,
# h(y, l) := y * l
"DP_GEN_LF_ACCURACY": 21,
# h(l) := y * l * l
"DP_GEN_LF_CLASS_PROPENSITY": 22,
# l_2 fixes errors made by l_1
#
# h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,
# elif l_1 == -1 * y and l_2 == y: 1,
# else: 0
"DP_GEN_DEP_FIXING": 23,
# l_2 reinforces the output of l_1
#
# h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,
# elif l_1 == y and l_2 == y: 1,
# else: 0
"DP_GEN_DEP_REINFORCING": 24,
# h(l_1, l_2) := if l_1 != 0 and l_2 != 0: -1, else: 0
"DP_GEN_DEP_EXCLUSIVE": 25,
#h(l_1, l_2) := if l_1 == l_2: 1, else: 0
"DP_GEN_DEP_SIMILAR": 26,
# Factor functions for distribution
"UFO": 30
}
for (key, value) in FACTORS.items():
exec("FUNC_" + key + " = " + str(value))
@jit(nopython=True, cache=True, nogil=True)
def eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap,
var_value):
"""TODO."""
####################
# BINARY VARIABLES #
####################
fac = factor[factor_id]
ftv_start = fac["ftv_offset"]
ftv_end = ftv_start + fac["arity"]
if fac["factorFunction"] == FUNC_NOOP:
return 0
elif fac["factorFunction"] == FUNC_IMPLY_NATURAL:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if v == 0:
# Early return if body is not satisfied
return 0
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if head:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_OR:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if v == 1:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_EQUAL:
v = value if (fmap[ftv_start]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start]["vid"]]
for l in range(ftv_start + 1, ftv_end):
w = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != w:
return -1
return 1
elif factor[factor_id]["factorFunction"] == FUNC_AND \
or factor[factor_id]["factorFunction"] == FUNC_ISTRUE:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == 0:
return -1
return 1
elif factor[factor_id]["factorFunction"] == FUNC_LINEAR:
res = 0
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
res += 1
# This does not match Dimmwitted, but matches the eq in the paper
return res
elif factor[factor_id]["factorFunction"] == FUNC_RATIO:
res = 1
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
res += 1
# This does not match Dimmwitted, but matches the eq in the paper
return math.log(res) # TODO: use log2?
elif factor[factor_id]["factorFunction"] == FUNC_LOGICAL:
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
return 1
return 0
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_MLN:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == 0:
# Early return if body is not satisfied
return 1
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head:
return 1
return 0
#########################
# CATEGORICAL VARIABLES #
#########################
elif factor[factor_id]["factorFunction"] == FUNC_AND_CAT \
or factor[factor_id]["factorFunction"] == FUNC_EQUAL_CAT_CONST:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
return 0
return 1
elif factor[factor_id]["factorFunction"] == FUNC_OR_CAT:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == fmap[l]["dense_equal_to"]:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_NATURAL_CAT:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
# Early return if body is not satisfied
return 0
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head == fmap[l]["dense_equal_to"]:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_MLN_CAT:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
# Early return if body is not satisfied
return 1
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head == fmap[l]["dense_equal_to"]:
return 1
return 0
#####################
# DATA PROGRAMMING #
# GENERATIVE MODELS #
#####################
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_CLASS_PRIOR:
# NB: this doesn't make sense for categoricals
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
return 1 if y_index == 1 else -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_PRIOR:
# NB: this doesn't make sense for categoricals
l_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
if l_index == 2:
return -1
elif l_index == 0:
return 0
else:
return 1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_PROPENSITY:
l_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
abstain = variable[fmap[ftv_start]["vid"]]["cardinality"] - 1
return 0 if l_index == abstain else 1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_ACCURACY:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
abstain = variable[fmap[ftv_start + 1]["vid"]]["cardinality"] - 1
if l_index == abstain:
return 0
elif y_index == l_index:
return 1
else:
return -1
elif factor[factor_id]["factorFunction"] == \
FUNC_DP_GEN_LF_CLASS_PROPENSITY:
# NB: this doesn't make sense for categoricals
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
abstain = variable[fmap[ftv_start + 1]["vid"]]["cardinality"] - 1
if l_index == abstain:
return 0
elif y_index == 1:
return 1
else:
return -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_FIXING:
# NB: this doesn't make sense for categoricals
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l1_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
l2_index = value if fmap[ftv_start + 2]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 2]["vid"]]
abstain = variable[fmap[ftv_start + 1]["vid"]]["cardinality"] - 1
if l1_index == abstain:
return -1 if l2_index != 1 else 0
elif l1_index == 0 and l2_index == 1 and y_index == 1:
return 1
elif l1_index == 1 and l2_index == 0 and y_index == 0:
return 1
else:
return 0
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_REINFORCING:
# NB: this doesn't make sense for categoricals
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l1_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
l2_index = value if fmap[ftv_start + 2]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 2]["vid"]]
abstain = variable[fmap[ftv_start + 1]["vid"]]["cardinality"] - 1
if l1_index == abstain:
return -1 if l2_index != 1 else 0
elif l1_index == 0 and l2_index == 0 and y_index == 0:
return 1
elif l1_index == 1 and l2_index == 1 and y_index == 1:
return 1
else:
return 0
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_EXCLUSIVE:
l1_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l2_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
abstain = variable[fmap[ftv_start]["vid"]]["cardinality"] - 1
return 0 if l1_index == abstain or l2_index == abstain else -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_SIMILAR:
l1_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l2_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
return 1 if l1_index == l2_index else 0
###########################################
# FACTORS FOR OPTIMIZING DISTRIBUTED CODE #
###########################################
elif factor[factor_id]["factorFunction"] == FUNC_UFO:
v = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
if v == 0:
return 0
return value if fmap[ftv_start + v - 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + v - 1]["vid"]]
######################
# FACTOR NOT DEFINED #
######################
else: # FUNC_UNDEFINED
print("Error: Factor Function", factor[factor_id]["factorFunction"],
"( used in factor", factor_id, ") is not implemented.")
raise NotImplementedError("Factor function is not implemented.")
| numbskull-master | numbskull/inference.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
import math
import random
from numbskull.inference import draw_sample, eval_factor
@jit(nopython=True, cache=True, nogil=True)
def learnthread(shardID, nshards, step, regularization, reg_param, truncation,
var_copy, weight_copy, weight,
variable, factor, fmap,
vmap, factor_index, Z, fids, var_value, var_value_evid,
weight_value, learn_non_evidence):
"""TODO."""
# Identify start and end variable
nvar = variable.shape[0]
start = (shardID * nvar) // nshards
end = ((shardID + 1) * nvar) // nshards
for var_samp in range(start, end):
if variable[var_samp]["isEvidence"] == 4:
# This variable is not owned by this machine
continue
sample_and_sgd(var_samp, step, regularization, reg_param, truncation,
var_copy, weight_copy, weight, variable,
factor, fmap, vmap,
factor_index, Z[shardID], fids[shardID], var_value,
var_value_evid, weight_value, learn_non_evidence)
@jit(nopython=True, cache=True, nogil=True)
def get_factor_id_range(variable, vmap, var_samp, val):
"""TODO."""
varval_off = val
if variable[var_samp]["dataType"] == 0:
varval_off = 0
vtf = vmap[variable[var_samp]["vtf_offset"] + varval_off]
start = vtf["factor_index_offset"]
end = start + vtf["factor_index_length"]
return (start, end)
@jit(nopython=True, cache=True, nogil=True)
def sample_and_sgd(var_samp, step, regularization, reg_param, truncation,
var_copy, weight_copy, weight, variable, factor, fmap,
vmap, factor_index, Z, fids, var_value, var_value_evid,
weight_value, learn_non_evidence):
"""TODO."""
# If learn_non_evidence sample twice.
# The method corresponds to expectation-conjugate descent.
if variable[var_samp]["isEvidence"] != 1:
evidence = draw_sample(var_samp, var_copy, weight_copy,
weight, variable, factor,
fmap, vmap, factor_index, Z,
var_value_evid, weight_value)
# If evidence then store the initial value in a tmp variable
# then sample and compute the gradient.
else:
evidence = variable[var_samp]["initialValue"]
var_value_evid[var_copy][var_samp] = evidence
# Sample the variable
proposal = draw_sample(var_samp, var_copy, weight_copy, weight,
variable, factor, fmap, vmap,
factor_index, Z, var_value, weight_value)
var_value[var_copy][var_samp] = proposal
if not learn_non_evidence and variable[var_samp]["isEvidence"] != 1:
return
# Compute the gradient and update the weights
# Iterate over corresponding factors
range_fids = get_factor_id_range(variable, vmap, var_samp, evidence)
# TODO: is it possible to avoid copying around fids
if evidence != proposal:
range_prop = get_factor_id_range(variable, vmap, var_samp, proposal)
s1 = range_fids[1] - range_fids[0]
s2 = range_prop[1] - range_prop[0]
s = s1 + s2
fids[:s1] = factor_index[range_fids[0]:range_fids[1]]
fids[s1:s] = factor_index[range_prop[0]:range_prop[1]]
fids[:s].sort()
else:
s = range_fids[1] - range_fids[0]
fids[:s] = factor_index[range_fids[0]:range_fids[1]]
truncate = random.random() < 1.0 / truncation if regularization == 1 else False
# go over all factor ids, ignoring dupes
last_fid = -1 # numba 0.28 would complain if this were None
for factor_id in fids[:s]:
if factor_id == last_fid:
continue
last_fid = factor_id
weight_id = factor[factor_id]["weightId"]
if weight[weight_id]["isFixed"]:
continue
# Compute Gradient
p0 = eval_factor(factor_id, var_samp,
evidence, var_copy,
variable, factor, fmap,
var_value_evid)
p1 = eval_factor(factor_id, var_samp,
proposal, var_copy,
variable, factor, fmap,
var_value)
gradient = (p1 - p0) * factor[factor_id]["featureValue"]
# Update weight
w = weight_value[weight_copy][weight_id]
if regularization == 2:
w *= (1.0 / (1.0 + reg_param * step))
w -= step * gradient
elif regularization == 1:
# Truncated Gradient
# "Sparse Online Learning via Truncated Gradient"
# Langford et al. 2009
w -= step * gradient
if truncate:
l1delta = reg_param * step * truncation
w = max(0, w - l1delta) if w > 0 else min(0, w + l1delta)
else:
w -= step * gradient
weight_value[weight_copy][weight_id] = w
| numbskull-master | numbskull/learning.py |
"""TODO."""
from .numbskull import main
main()
| numbskull-master | numbskull/__main__.py |
from setuptools import setup
setup(
name='pytorch_radon',
version='0.1.0',
author='Philipp Ernst',
author_email='[email protected]',
packages=['pytorch_radon'],
url='https://github.com/phernst/pytorch_radon.git',
license='MIT',
description='Pytorch implementation of scikit-image\'s radon function and more',
install_requires=[
"torch >= 0.4.0",
],
) | pytorch_radon-master | setup.py |
import unittest
from pytorch_radon.utils import PI, SQRT2, deg2rad
import numpy as np
class TestStackgram(unittest.TestCase):
def test_pi(self):
self.assertAlmostEqual(PI.item(), np.pi, places=6)
def test_sqrt2(self):
self.assertAlmostEqual(SQRT2.item(), np.sqrt(2), places=6)
def test_deg2rad(self):
self.assertAlmostEqual(deg2rad(45).item(), np.deg2rad(45), places=6)
if __name__ == '__main__':
unittest.main() | pytorch_radon-master | tests/test_utils.py |
import unittest
from pytorch_radon import Radon, IRadon
import torch
class TestRadon(unittest.TestCase):
def test_radon_iradon_circle(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle)
sino = r(img)
reco = ir(sino)
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=4)
def test_radon_iradon_not_circle(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = False
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle)
sino = r(img)
reco = ir(sino)
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=4)
if __name__ == '__main__':
unittest.main()
| pytorch_radon-master | tests/test_radon.py |
pytorch_radon-master | tests/__init__.py |
|
import unittest
from pytorch_radon import Radon, IRadon, Stackgram, IStackgram
import torch
class TestStackgram(unittest.TestCase):
def test_stackgram_istackgram_circle(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle)
sg = Stackgram(img.shape[2], theta, circle)
isg = IStackgram(img.shape[2], theta, circle)
reco = ir(isg(sg(r(img))))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=3)
def test_stackgram_istackgram_not_circle(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = False
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle)
sg = Stackgram(img.shape[2], theta, circle)
isg = IStackgram(img.shape[2], theta, circle)
reco = ir(isg(sg(r(img))))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=3)
if __name__ == '__main__':
unittest.main() | pytorch_radon-master | tests/test_stackgram.py |
import unittest
from pytorch_radon import Radon, IRadon
from pytorch_radon.filters import RampFilter, HannFilter, LearnableFilter
from pytorch_radon.filters import RampButterflyFilter, HannButterflyFilter
import torch
class TestStackgram(unittest.TestCase):
def test_ramp_filter(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle, use_filter=RampFilter())
reco = ir(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=4)
def test_hann_filter(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle, use_filter=HannFilter())
reco = ir(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=3)
def test_learnable_filter(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle, use_filter=LearnableFilter(img.shape[2]))
reco = ir(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=4)
def test_ramp_butterfly_filter(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle, use_filter=RampButterflyFilter(img.shape[2]))
reco = ir(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=4)
# Check that it's close to using RampFilter
ir_og = IRadon(img.shape[2], theta, circle, use_filter=RampFilter())
reco_og = ir_og(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(reco, reco_og).item(), 0, places=4)
def test_hann_butterfly_filter(self):
img = torch.zeros(1,1,256,256)
img[:, :, 120:130, 120:130] = 1
circle = True
theta = torch.arange(180)
r = Radon(img.shape[2], theta, circle)
ir = IRadon(img.shape[2], theta, circle, use_filter=HannButterflyFilter(img.shape[2]))
reco = ir(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(img, reco).item(), 0, places=3)
# Check that it's close to using HannFilter
ir_og = IRadon(img.shape[2], theta, circle, use_filter=HannFilter())
reco_og = ir_og(r(img))
self.assertAlmostEqual(torch.nn.MSELoss()(reco, reco_og).item(), 0, places=4)
if __name__ == '__main__':
unittest.main()
| pytorch_radon-master | tests/test_filters.py |
from .radon import Radon, IRadon
from .stackgram import Stackgram, IStackgram | pytorch_radon-master | pytorch_radon/__init__.py |
import torch
# constants
PI = 4*torch.ones(1).atan()
SQRT2 = (2*torch.ones(1)).sqrt()
def fftfreq(n):
val = 1.0/n
results = torch.zeros(n)
N = (n-1)//2 + 1
p1 = torch.arange(0, N)
results[:N] = p1
p2 = torch.arange(-(n//2), 0)
results[N:] = p2
return results*val
def deg2rad(x):
return x*PI/180
| pytorch_radon-master | pytorch_radon/utils.py |
import torch
from torch import nn
import torch.nn.functional as F
from .utils import PI, SQRT2, deg2rad
from .filters import RampFilter
class Radon(nn.Module):
def __init__(self, in_size, theta=None, circle=True):
super(Radon, self).__init__()
self.circle = circle
self.theta = theta
if theta is None:
self.theta = torch.arange(180)
self.all_grids = self._create_grids(self.theta, in_size if circle else int((SQRT2*in_size).ceil()))
def forward(self, x):
N, C, W, H = x.shape
assert(W==H)
if not self.circle:
diagonal = SQRT2 * W
pad = int((diagonal - W).ceil())
new_center = (W + pad) // 2
old_center = W // 2
pad_before = new_center - old_center
pad_width = (pad_before, pad - pad_before)
x = F.pad(x, (pad_width[0], pad_width[1], pad_width[0], pad_width[1]))
N, C, W, _ = x.shape
L, _, __, _ = self.all_grids.shape
grid = self.all_grids.to(x.device).view(L * W, W, 2).expand(N, -1, -1, -1)
x_sampled = F.grid_sample(x, grid, align_corners=True)
out = x_sampled.view(N, C, L, W, W).sum(dim=3).transpose(-1, -2)
# out = torch.zeros(N, C, W, len(self.theta), device=x.device)
# for i in range(len(self.theta)):
# rotated = F.grid_sample(x, self.all_grids[i].repeat(N, 1, 1, 1).to(x.device), align_corners=False)
# out[...,i] = rotated.sum(2)
return out
def _create_grids(self, angles, grid_size):
rad = deg2rad(angles)
c, s = rad.cos(), rad.sin()
R = torch.stack((torch.stack((c, s, torch.zeros_like(c)), dim=-1),
torch.stack((-s, c, torch.zeros_like(c)), dim=-1)), dim=-2)
return F.affine_grid(R, (R.shape[0], 1, grid_size, grid_size), align_corners=True)
# all_grids = []
# for theta in angles:
# theta = deg2rad(theta)
# R = torch.tensor([[
# [ theta.cos(), theta.sin(), 0],
# [-theta.sin(), theta.cos(), 0],
# ]])
# all_grids.append(F.affine_grid(R, (1, 1, grid_size, grid_size), align_corners=False))
# return all_grids
class IRadon(nn.Module):
def __init__(self, out_size, theta=None, circle=True, use_filter=RampFilter()):
super(IRadon, self).__init__()
self.circle = circle
self.theta = theta if theta is not None else torch.arange(180)
self.out_size = out_size
self.in_size = in_size = out_size if circle else int((SQRT2*out_size).ceil())
# self.xgrid = torch.arange(in_size).float().view(1,-1).repeat(in_size, 1)*2/(in_size-1)-1
self.xgrid = torch.linspace(-1.0, 1.0, in_size).expand(in_size, -1)
# self.ygrid = torch.arange(in_size).float().view(-1,1).repeat(1, in_size)*2/(in_size-1)-1
self.ygrid = torch.linspace(-1.0, 1.0, in_size).unsqueeze(-1).expand(-1, in_size)
self.all_grids = self._create_grids(self.theta, in_size)
self.filter = use_filter if use_filter is not None else lambda x: x
def forward(self, x):
it_size = x.shape[2]
ch_size = x.shape[1]
x = self.filter(x)
N, C, W, _ = x.shape
L, _, __, _ = self.all_grids.shape
grid = self.all_grids.to(x.device).view(L * W, W, 2).expand(N, -1, -1, -1)
x_sampled = F.grid_sample(x, grid, align_corners=True)
reco = x_sampled.view(N, C, L, W, W).sum(dim=2)
# reco = torch.zeros(x.shape[0], ch_size, it_size, it_size).to(x.device)
# for i_theta in range(len(self.theta)):
# reco += F.grid_sample(x, self.all_grids[i_theta].repeat(reco.shape[0], 1, 1, 1).to(x.device), align_corners=True)
if not self.circle:
W = self.out_size
diagonal = self.in_size
pad = int(torch.tensor(diagonal - W).float().ceil())
new_center = (W + pad) // 2
old_center = W // 2
pad_before = new_center - old_center
pad_width = (pad_before, pad - pad_before)
reco = F.pad(reco, (-pad_width[0], -pad_width[1], -pad_width[0], -pad_width[1]))
if self.circle:
reconstruction_circle = (self.xgrid ** 2 + self.ygrid ** 2) <= 1
# reconstruction_circle = reconstruction_circle.repeat(x.shape[0], ch_size, 1, 1)
# reco[~reconstruction_circle] = 0.
reco[:, :, ~reconstruction_circle] = 0.
return reco*PI.item()/(2*len(self.theta))
# def _XYtoT(self, theta):
# T = self.xgrid*(deg2rad(theta)).cos() - self.ygrid*(deg2rad(theta)).sin()
# return T
def _create_grids(self, angles, grid_size):
X = torch.linspace(-1.0, 1.0, len(angles)).unsqueeze(-1).unsqueeze(-1).expand(-1, grid_size, grid_size)
rad = deg2rad(angles).unsqueeze(-1).unsqueeze(-1)
c, s = rad.cos(), rad.sin()
Y = self.xgrid.unsqueeze(0) * c - self.ygrid * s
return torch.stack((X, Y), dim=-1)
# all_grids = []
# for i_theta in range(len(angles)):
# X = torch.ones(grid_size).float().view(-1,1).repeat(1, grid_size)*i_theta*2./(len(angles)-1)-1.
# Y = self._XYtoT(angles[i_theta])
# all_grids.append(torch.cat((X.unsqueeze(-1), Y.unsqueeze(-1)), dim=-1))
# return torch.stack(all_grids, dim=0)
| pytorch_radon-master | pytorch_radon/radon.py |
import torch
from torch import nn
import torch.nn.functional as F
from .utils import SQRT2, deg2rad
class Stackgram(nn.Module):
def __init__(self, out_size, theta=None, circle=True, mode='nearest'):
super(Stackgram, self).__init__()
self.circle = circle
self.theta = theta
if theta is None:
self.theta = torch.arange(180)
self.out_size = out_size
self.in_size = in_size = out_size if circle else int((SQRT2*out_size).ceil())
self.all_grids = self._create_grids(self.theta, in_size)
self.mode = mode
def forward(self, x):
stackgram = torch.zeros(x.shape[0], len(self.theta), self.in_size, self.in_size).to(x.device)
for i_theta in range(len(self.theta)):
repline = x[...,i_theta]
repline = repline.unsqueeze(-1).repeat(1,1,1,repline.shape[2])
linogram = F.grid_sample(repline, self.all_grids[i_theta].repeat(x.shape[0],1,1,1).to(x.device), mode=self.mode)
stackgram[:,i_theta] = linogram
return stackgram
def _create_grids(self, angles, grid_size):
all_grids = []
for i_theta in range(len(angles)):
t = deg2rad(angles[i_theta])
R = torch.tensor([[t.sin(), t.cos(), 0.],[t.cos(), -t.sin(), 0.]]).unsqueeze(0)
all_grids.append(F.affine_grid(R, torch.Size([1,1,grid_size,grid_size])))
return all_grids
class IStackgram(nn.Module):
def __init__(self, out_size, theta=None, circle=True, mode='bilinear'):
super(IStackgram, self).__init__()
self.circle = circle
self.theta = theta
if theta is None:
self.theta = torch.arange(180)
self.out_size = out_size
self.in_size = in_size = out_size if circle else int((SQRT2*out_size).ceil())
self.all_grids = self._create_grids(self.theta, in_size)
self.mode = mode
def forward(self, x):
sinogram = torch.zeros(x.shape[0], 1, self.in_size, len(self.theta)).to(x.device)
for i_theta in range(len(self.theta)):
linogram = x[:,i_theta].unsqueeze(1)
repline = F.grid_sample(linogram, self.all_grids[i_theta].repeat(x.shape[0],1,1,1).to(x.device), mode=self.mode)
repline = repline[...,repline.shape[-1]//2]
sinogram[...,i_theta] = repline
return sinogram
def _create_grids(self, angles, grid_size):
all_grids = []
for i_theta in range(len(angles)):
t = deg2rad(angles[i_theta])
R = torch.tensor([[t.sin(), t.cos(), 0.],[t.cos(), -t.sin(), 0.]]).unsqueeze(0)
all_grids.append(F.affine_grid(R, torch.Size([1,1,grid_size,grid_size])))
return all_grids | pytorch_radon-master | pytorch_radon/stackgram.py |
import torch
from torch import nn
import torch.nn.functional as F
from .utils import PI, fftfreq
class AbstractFilter(nn.Module):
def __init__(self):
super(AbstractFilter, self).__init__()
def forward(self, x):
input_size = x.shape[2]
projection_size_padded = \
max(64, int(2 ** (2 * torch.tensor(input_size)).float().log2().ceil()))
pad_width = projection_size_padded - input_size
padded_tensor = F.pad(x, (0,0,0,pad_width))
f = fftfreq(padded_tensor.shape[2]).view(-1, 1).to(x.device)
fourier_filter = self.create_filter(f)
# fourier_filter = fourier_filter.unsqueeze(-1).repeat(1,1,2)
fourier_filter = fourier_filter.unsqueeze(-1)
projection = torch.rfft(padded_tensor.transpose(2,3), 1, onesided=False).transpose(2,3) * fourier_filter
return torch.irfft(projection.transpose(2,3), 1, onesided=False).transpose(2,3)[:,:,:input_size,:]
def create_filter(self, f):
raise NotImplementedError
class RampFilter(AbstractFilter):
def __init__(self):
super(RampFilter, self).__init__()
def create_filter(self, f):
return 2 * f.abs()
class HannFilter(AbstractFilter):
def __init__(self):
super(HannFilter, self).__init__()
def create_filter(self, f):
fourier_filter = 2 * f.abs()
omega = 2*PI*f
fourier_filter *= (1 + (omega / 2).cos()) / 2
return fourier_filter
class LearnableFilter(AbstractFilter):
def __init__(self, filter_size):
super(LearnableFilter, self).__init__()
self.filter = nn.Parameter(2*fftfreq(filter_size).abs().view(-1, 1))
def forward(self, x):
fourier_filter = self.filter.unsqueeze(-1).repeat(1,1,2).to(x.device)
projection = torch.rfft(x.transpose(2,3), 1, onesided=False).transpose(2,3) * fourier_filter
return torch.irfft(projection.transpose(2,3), 1, onesided=False).transpose(2,3)
import sys
# butterfly_root = '../../learning-circuits'
from pathlib import Path
butterfly_root = Path(__file__).absolute().parent.parent.parent / 'learning-circuits'
sys.path.insert(0, str(butterfly_root))
import math
from butterfly import Butterfly
from butterfly.utils import bitreversal_permutation
def fft_twiddle(n, forward=True, normalized=False):
m = int(math.ceil(math.log2(n)))
assert n == 1 << m, 'n must be a power of 2'
factors = []
for log_size in range(1, m + 1):
size = 1 << log_size
angles = -torch.arange(size // 2, dtype=torch.float) / size * 2 * math.pi
if not forward:
angles = -angles
c, s = torch.cos(angles), torch.sin(angles)
real = torch.stack((torch.stack((torch.ones_like(c), c), dim=-1),
torch.stack((torch.ones_like(c), -c), dim=-1)), dim=-2)
imag = torch.stack((torch.stack((torch.zeros_like(s), s), dim=-1),
torch.stack((torch.zeros_like(s), -s), dim=-1)), dim=-2)
twiddle_factor = torch.stack((real, imag), dim=-1)
factors.append(twiddle_factor.repeat(n // size, 1, 1, 1))
twiddle = torch.stack(factors, dim=0).unsqueeze(0)
if normalized: # Divide the whole transform by sqrt(n) by dividing each factor by n^(1/2 log_n) = sqrt(2)
twiddle /= math.sqrt(2)
elif not forward:
twiddle /= 2
return twiddle
def butterfly_transpose_conjugate(twiddle):
twiddle_conj = twiddle.clone()
if twiddle.dim() == 6:
twiddle_conj[..., 1] *= -1 # conjugate
return twiddle_conj.transpose(3, 4)
class ButterflyFilter(AbstractFilter):
def __init__(self, input_size):
super().__init__()
filter_size = \
max(64, int(2 ** (2 * torch.tensor(input_size)).float().log2().ceil()))
self.butterfly_ifft = Butterfly(filter_size, filter_size, complex=True, tied_weight=False, bias=False) # iFFT
self.butterfly_ifft.twiddle = torch.nn.Parameter(fft_twiddle(filter_size, forward=False, normalized=True))
self.butterfly_fft = Butterfly(filter_size, filter_size, complex=True, tied_weight=False, bias=False, increasing_stride=False) # FFT
self.butterfly_fft.twiddle = torch.nn.Parameter(butterfly_transpose_conjugate(self.butterfly_ifft.twiddle))
f = fftfreq(filter_size)
fourier_filter = self.create_filter(f)
br = bitreversal_permutation(filter_size)
self.fourier_filter_br = torch.nn.Parameter(fourier_filter[br])
def forward(self, x):
b, c, input_size, nangles = x.shape
x_reshaped = x.transpose(2, 3).reshape(b * c * nangles, input_size, 1) # Last dimension for complex
projection_size_padded = \
max(64, int(2 ** (2 * torch.tensor(input_size)).float().log2().ceil()))
pad_width = projection_size_padded - input_size
padded_tensor = F.pad(x_reshaped, (0, 1, 0, pad_width))
projection = self.butterfly_ifft(self.butterfly_fft(padded_tensor) * self.fourier_filter_br.unsqueeze(-1))[:, :input_size, 0]
return projection.t().reshape(b, c, input_size, nangles)
class RampButterflyFilter(ButterflyFilter, RampFilter):
pass
class HannButterflyFilter(ButterflyFilter, HannFilter):
pass
# n = 1024
# b = Butterfly(n, n, complex=True, tied_weight=False, bias=False) # iFFT
# b.twiddle = torch.nn.Parameter(fft_twiddle(n, forward=False, normalized=True))
# b_t = Butterfly(n, n, complex=True, tied_weight=False, bias=False, increasing_stride=False) # FFT
# b_t.twiddle = torch.nn.Parameter(butterfly_transpose_conjugate(b.twiddle))
# br = bitreversal_permutation(n)
# v = torch.randn(1, n, 2)
# v_if = torch.ifft(v, 1, normalized=True)
# v_b = b(v[:, br])
# print((v_if - v_b).abs().max())
# v_f = torch.fft(v, 1, normalized=True)
# v_bt = b_t(v)[:, br]
# print((v_f - v_bt).abs().max())
# h_f = torch.randn(n)
# v_conv_h = torch.ifft(torch.fft(v, 1) * h_f.unsqueeze(-1), 1)
# v_conv_h_butterfly = b(b_t(v) * h_f[br].unsqueeze(-1))
# print((v_conv_h - v_conv_h_butterfly).abs().max())
# twiddle = b.twiddle.clone()
# factor = twiddle[:, 0].transpose(2, 3).reshape(-1, n, 2, 2) * h_f[br].unsqueeze(-1).unsqueeze(-1)
# factor = factor.view(twiddle[:, 0].shape).transpose(2, 3)
# twiddle[:, 0] = factor
# b_h = Butterfly(n, n, complex=True, tied_weight=False, bias=False)
# b_h.twiddle = torch.nn.Parameter(twiddle)
# v_conv_h_bbt = b_h(b_t(v))
# print((v_conv_h - v_conv_h_bbt).abs().max())
| pytorch_radon-master | pytorch_radon/filters.py |
from setuptools import find_packages, setup
setup(
name="tabi",
version="0.0.1",
author="Megan Leszczynski",
author_email="[email protected]",
packages=find_packages(),
)
| tabi-main | setup.py |
import argparse
from tabi.utils.utils import str2bool
parser = argparse.ArgumentParser(add_help=False)
general_args = parser.add_argument_group("general_args")
general_args.add_argument(
"--verbose", type=str2bool, default="False", help="Print debug information"
)
general_args.add_argument(
"--distributed",
type=str2bool,
default="False",
help="Use distributed data parallel",
)
general_args.add_argument(
"--local_rank",
type=int,
default=-1,
help="Local rank. Provided by pytorch torch.distributed.launch script.",
)
general_args.add_argument(
"--log_dir", type=str, required=True, help="Directory to save log and outputs"
)
general_args.add_argument(
"--num_workers", type=int, default=4, help="Number of dataloader workers"
)
general_args.add_argument(
"--gpu", type=int, default=0, help="Device to use (-1 if CPU)"
)
general_args.add_argument("--batch_size", type=int, default=32, help="Batch size")
general_args.add_argument("--seed", type=int, default=1234, help="Seed for training")
general_args.add_argument("--type_file", type=str, help="List of types")
model_args = parser.add_argument_group("model_args")
model_args.add_argument(
"--tied",
type=str2bool,
default="True",
help="Tie mention and entity encoder weights",
)
model_args.add_argument("--temperature", type=float, default=0.1, help="Temperature")
model_args.add_argument(
"--model_name",
type=str,
default="bert-base-uncased",
help="Transformer model for initialization",
)
model_args.add_argument(
"--tokenizer_name",
type=str,
default="bert-base-uncased",
help="Transformer tokenizer",
)
model_args.add_argument(
"--normalize",
type=str2bool,
default=True,
help="Use L2 normalization for entity and mention embs. If using normalization, a lower temperature value (i.e. 0.1) is recommended.",
)
model_args.add_argument(
"--add_entity_type_in_description",
type=str2bool,
default="False",
help="Add the entity type in the entity encoding",
)
model_args.add_argument(
"--max_entity_length",
type=int,
default=128,
help="Max numbers of tokens for entity",
)
model_args.add_argument(
"--max_context_length",
type=int,
default=32,
help="Max number of tokens for mention context",
)
| tabi-main | tabi/config.py |
ENT_START = "[unused1]"
MENTION_START = "[unused2]"
MENTION_END = "[unused3]"
| tabi-main | tabi/constants.py |
tabi-main | tabi/__init__.py |
|
"""Extract entity embeddings from a trained biencoder model."""
import argparse
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from tabi.config import parser
from tabi.data import EntityDataset
from tabi.models.biencoder import Biencoder
from tabi.utils.train_utils import set_random_seed
from tabi.utils.utils import (
load_model,
log_setup,
move_dict,
save_entity_map,
set_device,
)
logger = logging.getLogger()
# we only use DataParallel with entity extraction
# to avoid merging back together embeddings across processes with DistributedDataParallel
def main(args):
# setup log directory and logger
log_setup(args)
# set seed and device
set_random_seed(args.seed)
set_device(args)
# datasets and dataloaders
logger.info("Loading entity dataset...")
dataset = EntityDataset(
entity_path=args.entity_file,
add_entity_type_in_description=args.add_entity_type_in_description,
max_entity_length=args.max_entity_length,
tokenized_entity_data=args.tokenized_entity_data,
tokenizer_name=args.tokenizer_name,
type_path=args.type_file,
)
dataloader = DataLoader(
dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False
)
# init model
model = Biencoder(
tied=args.tied,
model_name=args.model_name,
normalize=args.normalize,
temperature=args.temperature,
)
embed_dim = model.dim
# load saved model weights
if args.model_checkpoint is not None:
load_model(
model_checkpoint=args.model_checkpoint, device=args.device, model=model
)
if args.distributed:
logger.info(f"Using DataParallel with {torch.cuda.device_count()} GPUs")
model = nn.DataParallel(model)
model = model.to(args.device)
# save entity embeddings to memory mapped file
emb_path = os.path.join(args.log_dir, args.entity_emb_path)
logger.info(f"Saving entity embeddings to {emb_path}.")
mmap_file = np.memmap(
emb_path, dtype="float32", mode="w+", shape=(len(dataset), embed_dim)
)
model.eval()
entity_ids = []
with torch.no_grad():
with tqdm(total=len(dataloader), unit="ex") as bar:
for step, batch in enumerate(dataloader):
entity_emb_batch = model(
entity_data=move_dict(batch["sample"], args.device)
)["entity_embs"]
start_idx = step * args.batch_size
end_idx = start_idx + args.batch_size
mmap_file[start_idx:end_idx] = entity_emb_batch.cpu().numpy()
entity_ids.extend(batch["entity_id"].tolist())
bar.update(1)
mmap_file.flush()
# keep track of embedding idx to entity_id mapping
id_map_path = os.path.join(args.log_dir, args.entity_map_file)
save_entity_map(id_map_path, entity_ids)
if __name__ == "__main__":
# add arguments specific to entity extraction to parser
parser = argparse.ArgumentParser(parents=[parser])
entity_args = parser.add_argument_group("entity_args")
entity_args.add_argument("--entity_file", type=str, required=True)
entity_args.add_argument("--model_checkpoint", type=str, required=True)
entity_args.add_argument("--entity_emb_path", type=str, default="embs.npy")
entity_args.add_argument("--entity_map_file", type=str, default="entity_map.pkl")
entity_args.add_argument(
"--tokenized_entity_data",
type=str,
help="File path for memory mapped entity data",
)
args = parser.parse_args()
main(args)
| tabi-main | tabi/extract_entity.py |
"""Preprocesses entity data into memory mapped file to reduce memory usage by dataloaders."""
import argparse
import logging
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from tabi.config import parser
from tabi.data import EntityDataset
from tabi.utils.train_utils import set_random_seed
from tabi.utils.utils import get_mmap_type, log_setup, save_entity_map, set_device
torch.multiprocessing.set_sharing_strategy("file_system")
os.environ["TOKENIZERS_PARALLELISM"] = "true"
logger = logging.getLogger()
def main(args):
# setup log directory and logger
log_setup(args)
# set seed and device
set_random_seed(args.seed)
set_device(args)
# datasets and dataloaders
logger.info("Loading entity dataset...")
dataset = EntityDataset(
entity_path=args.entity_file,
add_entity_type_in_description=args.add_entity_type_in_description,
max_entity_length=args.max_entity_length,
tokenizer_name=args.tokenizer_name,
type_path=args.type_file,
)
dataloader = DataLoader(
dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False
)
# store tokenized results in memory mapped file
entity_type = get_mmap_type(args.max_entity_length)
mmap_file_path = os.path.join(args.log_dir, args.entity_memmap_file)
logger.info(f"Saving tokenized entity data to {mmap_file_path}.")
mmap_file = np.memmap(
mmap_file_path, dtype=entity_type, mode="w+", shape=len(dataset)
)
entity_ids = []
with tqdm(total=len(dataloader), unit="ex", desc="Extracting") as bar:
for step, batch in enumerate(dataloader):
entity_batch_data = batch["sample"]
start_idx = step * args.batch_size
end_idx = start_idx + args.batch_size
mmap_file[start_idx:end_idx]["input_ids"] = entity_batch_data["input_ids"]
mmap_file[start_idx:end_idx]["attention_mask"] = entity_batch_data[
"attention_mask"
]
mmap_file[start_idx:end_idx]["token_type_ids"] = entity_batch_data[
"token_type_ids"
]
entity_ids.extend(batch["entity_id"].tolist())
bar.update(1)
mmap_file.flush()
# keep track of embedding idx to entity_id mapping
id_map_path = os.path.join(args.log_dir, args.entity_map_file)
save_entity_map(id_map_path, entity_ids)
if __name__ == "__main__":
parser = argparse.ArgumentParser(parents=[parser])
entity_args = parser.add_argument_group("entity_args")
entity_args.add_argument("--entity_file", type=str, required=True)
entity_args.add_argument("--entity_map_file", type=str, default="entity_map.pkl")
entity_args.add_argument(
"--entity_memmap_file", type=str, default="entity_data.npy"
)
args = parser.parse_args()
main(args)
| tabi-main | tabi/preprocess_entity.py |
"""Train biencoder model for entity retrieval."""
import argparse
import logging
import math
import os
import time
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from tabi.config import parser
from tabi.data import EntityLinkingDataset
from tabi.models.biencoder import Biencoder
from tabi.utils.train_utils import get_bert_optimizer, set_random_seed
from tabi.utils.utils import load_model, log_setup, move_dict, set_device, str2bool
logger = logging.getLogger()
def train(args, model, dataloader, optimizer, tb_writer, epoch, global_step):
"""Train for one epoch"""
model.train()
total_loss = 0.0
loss_fn = model.module.loss if args.distributed else model.loss
log_every_k_steps = max(1, int(args.log_freq * len(dataloader)))
logger.info(f"Logging every {log_every_k_steps} steps")
with tqdm(total=len(dataloader), unit="ex", desc="Training") as bar:
for step, batch in enumerate(dataloader):
start = time.time()
embs = model(
entity_data=move_dict(batch["entity"], args.device),
context_data=move_dict(batch["context"], args.device),
)
loss = loss_fn(
query_embs=embs["query_embs"],
entity_embs=embs["entity_embs"],
query_type_labels=batch["query_type_labels"].to(args.device),
query_entity_labels=batch["query_entity_labels"].to(args.device),
entity_labels=batch["entity_labels"].to(args.device),
)
optimizer.zero_grad()
loss["total_loss"].backward()
optimizer.step()
bar.update(1)
bar.set_postfix(loss=f'{loss["total_loss"].item():.6f}')
total_loss += loss["total_loss"].item()
if (step + 1) % log_every_k_steps == 0:
logger.info(
f"Epoch: {epoch} [{step}/{len(dataloader)}] | loss: {round(loss['total_loss'].item(), 4)} | lr: {optimizer.param_groups[0]['lr']} | {round(time.time()-start, 4)}s/batch"
)
if args.local_rank in [-1, 0]:
tb_writer.add_scalar(
"total_loss/train/step",
loss["total_loss"].item(),
global_step + step,
)
tb_writer.add_scalar(
"ent_loss/train/step", loss["ent_loss"].item(), global_step + step
)
tb_writer.add_scalar(
"type_loss/train/step", loss["type_loss"].item(), global_step + step
)
avg_loss = total_loss / (step + 1.0)
return avg_loss
def eval(args, model, dataloader):
"""Eval over entities in batch"""
model.eval()
total_loss = 0.0
loss_fn = model.module.loss if args.distributed else model.loss
with torch.no_grad():
with tqdm(total=len(dataloader), unit="ex", desc="Evaluating") as bar:
for step, batch in enumerate(dataloader):
embs = model(
entity_data=move_dict(batch["entity"], args.device),
context_data=move_dict(batch["context"], args.device),
)
loss = loss_fn(
query_embs=embs["query_embs"],
entity_embs=embs["entity_embs"],
query_type_labels=batch["query_type_labels"].to(args.device),
query_entity_labels=batch["query_entity_labels"].to(args.device),
entity_labels=batch["entity_labels"].to(args.device),
)
bar.update(1)
bar.set_postfix(loss=f'{loss["total_loss"].item():.6f}')
total_loss += loss["total_loss"]
avg_loss = total_loss.item() / (step + 1.0)
return avg_loss
def main(args):
# setup log directory and logger
log_setup(args)
# setup tensorboard: only first process in distributed logs to tensorboard
tb_writer = None
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(log_dir=args.log_dir)
# set seed and device
set_random_seed(args.seed)
set_device(args)
# prepare datasets and dataloaders
logger.info("Preparing train dataset...")
train_dataset = EntityLinkingDataset(
data_path=args.train_data_file,
entity_path=args.train_entity_file,
neg_sample_file=args.train_neg_sample_file,
num_negatives=args.num_negatives,
add_entity_type_in_description=args.add_entity_type_in_description,
max_context_length=args.max_context_length,
max_entity_length=args.max_entity_length,
tokenized_entity_data=args.tokenized_entity_data,
tokenizer_name=args.tokenizer_name,
type_path=args.type_file,
)
logger.info("Preparing dev dataset...")
dev_dataset = EntityLinkingDataset(
data_path=args.dev_data_file,
entity_path=args.dev_entity_file,
neg_sample_file=args.dev_neg_sample_file,
num_negatives=args.num_negatives,
add_entity_type_in_description=args.add_entity_type_in_description,
max_context_length=args.max_context_length,
max_entity_length=args.max_entity_length,
tokenized_entity_data=args.tokenized_entity_data,
tokenizer_name=args.tokenizer_name,
type_path=args.type_file,
)
# make sure each process only gets its portion of the dataset
if args.distributed:
# necessary to set seed here or else seed will be default distributed seed (zero)
# and data will not change ordering wrt seed argument
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, seed=args.seed, drop_last=True
)
dev_sampler = torch.utils.data.distributed.DistributedSampler(
dev_dataset, seed=args.seed, drop_last=True
)
else:
train_sampler = None
dev_sampler = None
train_dataloader = DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
dev_dataloader = DataLoader(
dev_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=(dev_sampler is None),
sampler=dev_sampler,
drop_last=True,
)
# init model
model = Biencoder(
tied=args.tied,
model_name=args.model_name,
normalize=args.normalize,
temperature=args.temperature,
is_distributed=args.local_rank > -1,
alpha=args.alpha,
)
model = model.to(args.device)
# optimizer
optimizer = get_bert_optimizer(model, learning_rate=args.lr)
# lr scheduler
if args.lr_scheduler_type == "step":
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=1, gamma=0.5 # each epoch
)
else:
lr_scheduler = None
# load saved model weights and load optimizer/scheduler state dicts
global_step = 0
starting_epoch = 0
if args.model_checkpoint is not None:
model_ckpt_stats = load_model(
model_checkpoint=args.model_checkpoint,
device=args.device,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
)
global_step = model_ckpt_stats["global_step"]
starting_epoch = model_ckpt_stats["epoch"]
if lr_scheduler is None:
for g in optimizer.param_groups:
g["lr"] = args.lr
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.device], find_unused_parameters=True
)
# train loop
best_dev_loss = float("inf")
for epoch in range(starting_epoch, starting_epoch + args.n_epochs):
# required for determinism across runs with distributed
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
avg_train_loss = train(
args,
model=model,
dataloader=train_dataloader,
optimizer=optimizer,
tb_writer=tb_writer,
epoch=epoch,
global_step=global_step,
)
global_step += len(train_dataloader)
if args.lr_scheduler_type == "step":
lr_scheduler.step()
if lr_scheduler is not None:
logger.info(
f"Epoch {epoch} | average train loss: {round(avg_train_loss, 6)} | lr: {lr_scheduler.get_last_lr()[0]}"
)
else:
logger.info(
f"Epoch {epoch} | average train loss: {round(avg_train_loss, 6)} | lr: {optimizer.param_groups[0]['lr']}"
)
logger.info(f"Epoch {epoch} | average train loss: {round(avg_train_loss, 6)}")
# evaluate on dev set
avg_dev_loss = eval(args, model=model, dataloader=dev_dataloader)
logger.info(f"Epoch {epoch} | average dev loss: {round(avg_dev_loss, 6)}")
# log to tensorboard
if args.local_rank in [-1, 0]:
tb_writer.add_scalar("Loss/train/epoch", avg_train_loss, epoch)
tb_writer.add_scalar("Loss/dev/epoch", avg_dev_loss, epoch)
# save model at the end of each epoch
if args.local_rank in [-1, 0]:
ckpt_path = os.path.join(args.log_dir, f"model_epoch{epoch+1}.pth")
torch.save(
{
"epoch": epoch,
"global_step": global_step,
"optimizer": optimizer.state_dict(),
"train_loss": avg_train_loss,
"dev_loss": avg_dev_loss,
"state_dict": model.state_dict(),
"lr_scheduler": lr_scheduler.state_dict()
if lr_scheduler is not None
else None,
"rng_cpu": torch.get_rng_state(),
"rng_gpu": torch.cuda.get_rng_state()
if args.device != "cpu"
else None,
},
ckpt_path,
)
# keep track of best dev score
if avg_dev_loss < best_dev_loss and args.local_rank in [-1, 0]:
ckpt_path = os.path.join(args.log_dir, "best_model.pth")
logger.info(f"Dev loss improved. Saving checkpoint to {ckpt_path}.")
torch.save(
{
"epoch": epoch,
"global_step": global_step,
"optimizer": optimizer.state_dict(),
"train_loss": avg_train_loss,
"dev_loss": avg_dev_loss,
"state_dict": model.state_dict(),
"lr_scheduler": lr_scheduler.state_dict()
if lr_scheduler is not None
else None,
"rng_cpu": torch.get_rng_state(),
"rng_gpu": torch.cuda.get_rng_state()
if args.device != "cpu"
else None,
},
ckpt_path,
)
best_dev_loss = avg_dev_loss
logger.info("Finished training!")
if args.local_rank in [-1, 0]:
# save last ckpt
last_ckpt_path = os.path.join(args.log_dir, "last_model.pth")
logger.info(f"Saving last model checkpoint to {last_ckpt_path}.")
torch.save(
{
"epoch": epoch,
"global_step": global_step,
"optimizer": optimizer.state_dict(),
"train_loss": avg_train_loss,
"dev_loss": avg_dev_loss,
"state_dict": model.state_dict(),
"lr_scheduler": lr_scheduler.state_dict()
if lr_scheduler is not None
else None,
"rng_cpu": torch.get_rng_state(),
"rng_gpu": torch.cuda.get_rng_state() if args.device != "cpu" else None,
},
last_ckpt_path,
)
if args.distributed:
# tear down the process group
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(parents=[parser])
training_args = parser.add_argument_group("training_args")
training_args.add_argument("--lr", type=float, default=1e-5, help="Learning rate")
training_args.add_argument(
"--n_epochs",
type=int,
default=1,
help="Maximum number of (new) epochs to train (on top of starting epoch).",
)
training_args.add_argument(
"--num_negatives",
type=int,
default=5,
help="Number of hard negative samples to use for training",
)
training_args.add_argument(
"--model_checkpoint", type=str, help="Model checkpoint to continue training"
)
training_args.add_argument(
"--log_freq", type=float, default=0.1, help="Fraction of an epoch to log"
)
training_args.add_argument(
"--alpha", type=float, default=0.1, help="Alpha for weighting type loss"
)
training_args.add_argument(
"--lr_scheduler_type",
type=str,
help="LR scheduler: step or leave empty for no LR scheduler",
)
data_args = parser.add_argument_group("training_data_args")
data_args.add_argument("--train_data_file", type=str)
data_args.add_argument("--dev_data_file", type=str)
data_args.add_argument("--train_entity_file", type=str)
data_args.add_argument("--dev_entity_file", type=str)
data_args.add_argument(
"--train_neg_sample_file",
type=str,
help="File path for negative samples for train dataset",
)
data_args.add_argument(
"--dev_neg_sample_file",
type=str,
help="File path for negative samples for dev dataset",
)
data_args.add_argument(
"--tokenized_entity_data",
type=str,
help="File path for memory mapped entity data",
)
args = parser.parse_args()
# if using hard negatives, adjust the batch size
args.orig_batch_size = args.batch_size
if args.train_neg_sample_file is not None:
args.batch_size = 2 ** int(
math.log2(args.batch_size / (args.num_negatives + 1))
)
# setup distributed
args.ngpus_per_node = 1
if args.distributed:
dist.init_process_group(backend="nccl")
logger.info(
f"[{os.getpid()}]: world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, backend={dist.get_backend()} \n",
end="",
)
# update batch size and number of workers for DistributedDataParallel
# assumes we are using a single GPU per process
args.ngpus_per_node = torch.cuda.device_count()
args.batch_size = args.batch_size // args.ngpus_per_node
main(args)
| tabi-main | tabi/train.py |
"""Retrieve candidates for evaluation or hard negative sampling."""
import argparse
import logging
import os
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
from tqdm import tqdm
from tabi.config import parser
from tabi.data import EntityLinkingDataset
from tabi.models.biencoder import Biencoder
from tabi.utils.train_utils import set_random_seed
from tabi.utils.utils import (
combine_negs,
combine_preds,
load_model,
log_setup,
move_dict,
set_device,
str2bool,
write_neg_samples,
write_preds,
)
logger = logging.getLogger()
def main(args):
# setup log directory and logger
log_setup(args)
# set seed and device
set_random_seed(args.seed)
set_device(args)
# datasets and dataloaders
logger.info("Preparing dataset...")
top_k = args.top_k if args.mode == "eval" else args.orig_num_negatives
dataset = EntityLinkingDataset(
data_path=args.test_data_file,
entity_path=args.entity_file,
top_k=top_k,
tokenize_entities=False,
max_context_length=args.max_context_length,
max_entity_length=args.max_entity_length,
tokenizer_name=args.tokenizer_name,
type_path=args.type_file,
is_eval=args.mode == "eval",
)
# make sure each process only gets its portion of the dataset
if args.distributed:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, shuffle=False
)
else:
sampler = None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False,
sampler=sampler,
)
# init model
model = Biencoder(
tied=args.tied,
entity_emb_path=args.entity_emb_path,
top_k=top_k,
model_name=args.model_name,
normalize=args.normalize,
temperature=args.temperature,
)
# load saved model weights
if args.model_checkpoint is not None:
load_model(
model_checkpoint=args.model_checkpoint, device=args.device, model=model
)
model = model.to(args.device)
# entity embs aren't parameters of the model so need to be moved separately
model.entity_embs = model.entity_embs.to(args.device)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.device], find_unused_parameters=True
)
# run evaluation
model.eval()
predictions = []
predict_fn = model.module.predict if args.distributed else model.predict
with torch.no_grad():
with tqdm(total=len(dataloader), unit="ex", desc="Running model") as bar:
for batch in dataloader:
prediction = predict_fn(
context_data=move_dict(batch["context"], args.device),
data_id=batch["data_id"],
)
predictions.append(prediction)
bar.update(1)
if args.mode == "eval":
# save predictions as jsonlines
pred_file = os.path.join(
args.log_dir, args.pred_file.split(".jsonl")[0] + f"_{args.device if args.device != 'cpu' else 0}.jsonl"
)
write_preds(
preds=predictions,
dataset=dataset,
pred_file=pred_file,
entity_map_file=args.entity_map_file,
)
# make sure all processes write their predictions
if args.distributed:
torch.distributed.barrier()
# let first process combine predictions across processes into a single file
if args.local_rank in [-1, 0]:
combine_preds(
args.log_dir,
os.path.join(args.log_dir, args.pred_file),
num_gpus=dist.get_world_size() if args.distributed else 1,
)
elif args.mode == "neg_sample":
# save negative samples to file
neg_sample_file = os.path.join(
args.log_dir,
args.neg_sample_file.split(".json")[0] + f"_{args.device if args.device != 'cpu' else 0}.json",
)
write_neg_samples(
preds=predictions,
dataset=dataset,
entity_map_file=args.entity_map_file,
neg_sample_file=neg_sample_file,
num_negative_samples=args.orig_num_negatives,
)
if args.distributed:
# wait for all devices to generate their negative samples
torch.distributed.barrier()
# let first process combine negative samples across processes into a single file
if args.local_rank in [-1, 0]:
if args.filter_negatives:
combine_negs(
log_dir=args.log_dir,
neg_sample_file=os.path.join(args.log_dir, args.neg_sample_file),
num_gpus=dist.get_world_size() if args.distributed else 1,
use_filter=args.filter_negatives,
entity_cache=dataset.entity_cache,
ent_counter=dataset.ent_counter,
# top_k here is the number of negatives we want after filter
total_num_samples=args.top_k,
mult_factor=args.mult_factor,
)
else:
combine_negs(
log_dir=args.log_dir,
neg_sample_file=os.path.join(args.log_dir, args.neg_sample_file),
num_gpus=dist.get_world_size() if args.distributed else 1,
use_filter=args.filter_negatives,
)
if args.distributed:
# TODO: automatically remove all extra negative sample files that are generated
torch.distributed.barrier()
# tear down the process group
dist.destroy_process_group()
if __name__ == "__main__":
# add arguments specific to eval to parser
parser = argparse.ArgumentParser(parents=[parser])
eval_args = parser.add_argument_group("eval_args")
eval_args.add_argument("--test_data_file", type=str, required=True)
eval_args.add_argument("--entity_file", type=str, required=True)
eval_args.add_argument("--model_checkpoint", type=str, required=True)
eval_args.add_argument("--entity_emb_path", type=str, required=True)
eval_args.add_argument("--entity_map_file", type=str)
eval_args.add_argument(
"--mode", type=str, default="eval", choices=["eval", "neg_sample"]
)
eval_args.add_argument("--pred_file", type=str, default="preds.jsonl")
eval_args.add_argument("--neg_sample_file", type=str, default="neg_samples.json")
eval_args.add_argument(
"--top_k",
type=int,
default=10,
help="Number of candidates to retrieve for each mention",
)
eval_args.add_argument(
"--filter_negatives",
type=str2bool,
default=False,
help="Whether to filter negatives by entity count",
)
# eval_args.add_argument("--popularity_file", type=str, help="File with entity counts for filtering negatives")
eval_args.add_argument(
"--mult_factor",
type=int,
default=10,
help="Multiplicative factor for ratio of neg:pos in negative sample filter, e.g. up to 10 negatives for every 1 positive.",
)
eval_args.add_argument(
"--orig_num_negatives",
type=int,
default=20,
help="Number of negatives to fetch for filtering",
)
args = parser.parse_args()
# setup distributed
# recommend using distributed for neg_sample mode but not for eval mode
# for distributed eval, the accuracy metrics are computed separately over each portion of the dataset
if args.distributed:
dist.init_process_group(backend="nccl")
logger.info(
f"[{os.getpid()}]: world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, backend={dist.get_backend()} \n",
end="",
)
# update batch size and number of workers for DistributedDataParallel
# assumes we are using a single GPU per process
ngpus_per_node = torch.cuda.device_count()
args.batch_size = args.batch_size // ngpus_per_node
main(args)
| tabi-main | tabi/eval.py |
import json
import logging
import os
from collections import defaultdict
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer
from transformers import logging as tf_logging
import tabi.utils.data_utils as data_utils
import tabi.utils.utils as utils
from tabi.constants import ENT_START, MENTION_END, MENTION_START
logger = logging.getLogger(__name__)
import warnings
warnings.filterwarnings("ignore", message=".*The given NumPy array is not writeable.*")
# suppress warnings from huggingface
tf_logging.set_verbosity_error()
class EntityDataset(torch.utils.data.Dataset):
def __init__(
self,
entity_path: Optional[str] = None,
max_entity_length: int = 128,
add_entity_type_in_description: bool = False,
tokenized_entity_data: Optional[str] = None,
tokenizer_name: str = "bert-base-uncased",
type_path: Optional[str] = None,
) -> None:
super().__init__()
# hyperparameters
self.max_entity_length = max_entity_length
self.add_entity_type_in_description = add_entity_type_in_description
# load entity file
logger.debug("Loading entity data...")
self.entity_cache = data_utils.load_entity_data(entity_path)
self.entity_ids = np.array(list(self.entity_cache.keys()))
self.entity_ids.flags.writeable = False
# get typename to id map to pass types by id in batch
# type ids are used for masking in loss function
if type_path is not None:
self.all_types = np.array(data_utils.load_types(type_path))
self.all_types.flags.writeable = False
self.type_vocab = {name: i for i, name in enumerate(self.all_types)}
self.eid2row = {eid: row_id for row_id, eid in enumerate(self.entity_ids)}
logger.debug("Finished loading entity data!")
# load tokenized entity data if available
self.tokenized_entity_data = None
if tokenized_entity_data is not None:
logger.info(
f"Loading preprocessed entity data from {tokenized_entity_data}."
)
# load read only memory mapped file of tokenized entity data
self.tokenized_entity_data = np.memmap(
tokenized_entity_data,
mode="r",
shape=len(self.entity_ids),
dtype=utils.get_mmap_type(max_entity_length),
)
# map all types to a read only numpy array
if not os.path.exists(data_utils.get_prepped_type_file(entity_path)):
self.all_one_hot_types = np.zeros(
(len(self.entity_ids), len(self.all_types))
)
for e in self.entity_cache:
ent_typeids = [
self.type_vocab[t] for t in self.entity_cache[e]["types"]
]
if len(ent_typeids) > 0:
row_id = self.eid2row[e]
self.all_one_hot_types[row_id] = torch.sum(
F.one_hot(
torch.tensor(ent_typeids),
num_classes=len(self.all_types),
),
dim=0,
)
type_mmap = np.memmap(
data_utils.get_prepped_type_file(entity_path),
mode="w+",
shape=self.all_one_hot_types.shape,
)
type_mmap[:] = self.all_one_hot_types
else:
self.all_one_hot_types = np.memmap(
data_utils.get_prepped_type_file(entity_path), mode="r"
).reshape(len(self.entity_ids), len(self.all_types))
self.all_one_hot_types.flags.writeable = False
# no longer need entity cache
self.entity_cache = None
# set up tokenizer
logger.info(f"Using tokenizer: {tokenizer_name}")
# use local files unless not present
try:
self.tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name, local_files_only=True
)
except:
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
self.tokenizer.add_special_tokens(
{"additional_special_tokens": [ENT_START, MENTION_START, MENTION_END]}
)
def __len__(self) -> int:
return len(self.entity_ids)
def __getitem__(self, index):
entity_id = self.entity_ids[index]
# get entity tokens from preprocessed entity data
if self.tokenized_entity_data is not None:
entity_tokens = self.tokenized_entity_data[index]
# convert memory mapped format to standard dict format
entity_tokens = {
key: entity_tokens[key] for key in entity_tokens.dtype.names
}
# tokenize entity data on the fly
else:
entity_data = self.entity_cache[entity_id]
entity_tokens = self.get_entity_tokens(entity_data)
entity_tokens = {k: v[0] for k, v in entity_tokens.items()}
return {"sample": entity_tokens, "entity_id": entity_id}
def get_entity_tokens(self, entity):
title = entity["title"]
entity_description = entity["description"]
if self.add_entity_type_in_description:
type_str = utils.get_type_str(entity["types"])
ent_str = (
title
+ " "
+ ENT_START
+ " "
+ type_str
+ " "
+ ENT_START
+ " "
+ entity_description
)
else:
ent_str = title + " " + ENT_START + " " + entity_description
inputs = self.tokenizer(
ent_str,
padding="max_length",
add_special_tokens=True,
return_tensors="pt",
truncation=True,
max_length=self.max_entity_length,
)
return inputs
def get_types(self, ent):
if self.tokenized_entity_data is None:
one_hot_types = data_utils.convert_types_to_onehot(
types=self.entity_cache[ent]["types"], type_vocab=self.type_vocab
)
else:
one_hot_types = self.all_one_hot_types[self.eid2row[ent]]
return np.array(one_hot_types)
class EntityLinkingDataset(EntityDataset):
def __init__(
self,
max_entity_length: int = 128,
max_context_length: int = 64,
data_path: str = "",
entity_path: str = "",
neg_sample_file: Optional[str] = None,
num_negatives: int = 0,
add_entity_type_in_description: bool = False,
top_k: int = 10,
tokenize_entities: bool = True,
tokenized_entity_data: Optional[str] = None,
tokenizer_name: str = "bert-base-uncased",
type_path: Optional[str] = None,
is_eval: bool = False,
) -> None:
super().__init__(
max_entity_length=max_entity_length,
entity_path=entity_path,
add_entity_type_in_description=add_entity_type_in_description,
tokenized_entity_data=tokenized_entity_data,
tokenizer_name=tokenizer_name,
type_path=type_path,
)
# hyperparameters
self.eval = is_eval
self.max_context_length = max_context_length
self.top_k = top_k
self.tokenize_entities = tokenize_entities
# load context files
logger.debug("Loading context data...")
self.data, self.ent_counter = data_utils.load_data(data_path)
logger.debug("Finished loading context data!")
# load hard negative samples
self.neg_sample_file = neg_sample_file
self.num_negatives = num_negatives
self.neg_samples = None
if self.num_negatives > 0 and self.neg_sample_file is not None:
logger.info(
f"Using {self.num_negatives} hard negatives from {self.neg_sample_file}"
)
self.neg_samples = data_utils.load_neg_samples(
self.neg_sample_file,
data_len=len(self.data),
num_negatives=self.num_negatives,
)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index):
sample = self.data[index]
# get mention context data
context_tokens = defaultdict(list)
tokens = self.get_context_tokens(
sample["text"], char_spans=sample["char_spans"]
)
for key in tokens.keys():
context_tokens[key].append(tokens[key][0])
context_tokens = {k: torch.stack(v) for k, v in context_tokens.items()}
# get entity data
gold = sample["gold"]
entities = [gold]
# allow up to 10 alternate gold
alt_gold = sample["alt_gold"][:10]
# pad alt gold with gold so all the same size for batching
while len(alt_gold) < 10:
alt_gold.append(gold)
if self.neg_samples is not None:
for ent in self.neg_samples[index]:
entities.append(ent)
# get entity tokens
entity_tokens = defaultdict(list)
if self.tokenize_entities:
# tokenize entity data on the fly
if self.tokenized_entity_data is None:
for entity in entities:
tokens = self.get_entity_tokens(self.entity_cache[entity])
for key in tokens.keys():
entity_tokens[key].append(tokens[key][0])
# use preprocessed entity data
else:
for entity in entities:
tokens = self.tokenized_entity_data[self.eid2row[entity]]
for key in tokens.dtype.names:
# this throws a warning about torch tensors not being read-only
# we do not copy as tokenized_entity_data is large
entity_tokens[key].append(torch.from_numpy(tokens[key]))
entity_tokens = {k: torch.stack(v) for k, v in entity_tokens.items()}
# use query type labels if training
query_type_labels = (
torch.from_numpy(self.get_types(sample["gold"])) if not self.eval else []
)
return {
"data_id": index,
"context": context_tokens,
"entity": entity_tokens,
"entity_labels": torch.tensor(entities),
"query_entity_labels": torch.tensor(gold),
"query_type_labels": query_type_labels,
"alt_gold": torch.tensor(alt_gold),
}
def get_context_tokens(self, context, char_spans=[]):
# no mention boundaries
if len(char_spans) == 0:
return self.tokenizer(
context,
padding="max_length",
add_special_tokens=True,
return_tensors="pt", # return as pytorch tensors
truncation=True,
max_length=self.max_context_length,
)
char_spans = data_utils.clean_spaces(context=context, char_spans=char_spans)
context_tokens = data_utils.get_context_window(
char_spans=char_spans,
tokenizer=self.tokenizer,
context=context,
max_context_length=self.max_context_length,
)
# convert back to string to use tokenizer to pad and generate attention mask
context = self.tokenizer.decode(
self.tokenizer.convert_tokens_to_ids(context_tokens)
)
return self.tokenizer(
context,
padding="max_length",
add_special_tokens=True,
return_tensors="pt", # return as pytorch tensors
truncation=True,
max_length=self.max_context_length,
)
| tabi-main | tabi/data.py |
import logging
import math
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from transformers.optimization import AdamW
logger = logging.getLogger(__name__)
def get_type_label_mask(labels, type_equivalence="strict"):
"""Generate the mask indicating which queries have the same type.
Args:
labels: (num_queries, num_types)
type_equivalence (str): 'strict' or 'weak'
Returns:
mask with 1 where two queries share the same type and 0 otherwise
"""
# weak equivalence
# two sets of types are considered equivalent if more than
# 50% of the types are shared between them (based on cardinality of larger set)
if type_equivalence == "weak":
shared_labels = torch.matmul(labels.float(), labels.float().T)
num_types_per_el = labels.sum(1)
max_types = (
torch.cartesian_prod(num_types_per_el, num_types_per_el)
.max(1)[0]
.reshape(num_types_per_el.shape[0], -1)
)
same_label_mask = (shared_labels > max_types * 0.5).float()
# strict equivalence
# two sets of types are considered equivalent if all types match
else:
shared_labels = torch.matmul(labels.float(), labels.float().T)
labels_cols = labels.sum(1).repeat(len(labels), 1)
labels_rows = labels.sum(1).unsqueeze(1).repeat(1, len(labels))
same_label_mask = (
torch.eq(shared_labels, labels_rows) & torch.eq(shared_labels, labels_cols)
).float()
return same_label_mask
# https://discuss.pytorch.org/t/first-nonzero-index/24769
def first_nonzero(x, axis=0):
nonz = x > 0
return ((nonz.cumsum(axis) == 1) & nonz).max(axis)
# modified from https://github.com/facebookresearch/BLINK/blob/main/blink/common/optimizer.py
def get_bert_optimizer(model, learning_rate):
"""Optimizes the network with AdamWithDecay"""
parameters_with_decay = []
parameters_without_decay = []
for param_name, param in model.named_parameters():
# do not use decay on bias terms
if "bias" in param_name:
parameters_without_decay.append(param)
else:
parameters_with_decay.append(param)
optimizer_grouped_parameters = [
{"params": parameters_with_decay, "weight_decay": 0.01},
{"params": parameters_without_decay, "weight_decay": 0.0},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=learning_rate,
correct_bias=False,
no_deprecation_warning=True,
)
return optimizer
def gather_embs(embs):
"""
Gathers embeddings across machines in distributed training
and combines into a single embedding.
"""
return torch.cat(GatherLayer.apply(embs.contiguous()), dim=0)
# https://github.com/open-mmlab/OpenSelfSup/blob/master/openselfsup/models/utils/gather_layer.py
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all process, supporting backward propagation.
"""
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
output = [torch.zeros_like(input) for _ in range(dist.get_world_size())]
dist.all_gather(output, input)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
(input,) = ctx.saved_tensors
grad_out = torch.zeros_like(input)
grad_out[:] = grads[dist.get_rank()]
return grad_out
def set_random_seed(random_seed=0):
logger.info(f"Random seed: {random_seed}")
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
| tabi-main | tabi/utils/train_utils.py |
tabi-main | tabi/utils/__init__.py |
|
import json
import logging
import pickle
import unicodedata
from collections import Counter
import jsonlines
import numpy as np
import torch
import torch.nn.functional as F
from tabi.constants import MENTION_END, MENTION_START
logger = logging.getLogger(__name__)
def load_neg_samples(neg_sample_file, num_negatives, data_len):
with open(neg_sample_file) as f:
neg_samples = json.load(f)
# store only negative samples we need in array
assert len(neg_samples) == data_len, f"{data_len} {len(neg_samples)}"
ns_array = np.zeros((len(neg_samples), num_negatives), dtype="int64")
for ns in neg_samples:
assert len(neg_samples[ns]["samples"]) >= num_negatives
ns_array[int(ns)] = neg_samples[ns]["samples"][:num_negatives]
return ns_array
def save_entity_map(id_map_path, entity_ids):
"""Save mapping of embedding index to entity id (eid)"""
logger.info(f"Saving ids to {id_map_path}.")
entity_map = {int(idx): int(eid) for idx, eid in enumerate(entity_ids)}
with open(id_map_path, "wb") as f:
pickle.dump(entity_map, f)
def convert_types_to_onehot(types, type_vocab):
types = [type_vocab[t] for t in types]
if len(types) == 0:
one_hot_types = torch.zeros(len(type_vocab))
else:
one_hot_types = torch.sum(
F.one_hot(torch.tensor(types), num_classes=len(type_vocab)), dim=0
).float()
return one_hot_types
def load_entity_data(datapath: str):
"""Load entity data as dictionary with entity ids as keys"""
data = {}
if datapath.endswith(".pkl"):
with open(datapath, "rb") as f:
return pickle.load(f)
with jsonlines.open(datapath) as f:
for line_idx, line in enumerate(f):
lid = line["label_id"] if "label_id" in line else line_idx
data[lid] = {
"id": lid,
"description": line.get("text", ""),
"title": line.get("title", ""),
"types": line.get("types", []),
"wikipedia_page_id": line.get("wikipedia_page_id", ""),
}
# save as pickle for faster loading
entityfile = datapath.split(".jsonl")[0] + ".pkl"
with open(entityfile, "wb") as f:
pickle.dump(data, f)
logger.debug(f"Wrote entities to {entityfile}")
return data
def get_prepped_type_file(datapath: str):
if datapath.endswith(".pkl"):
tag = datapath.split(".pkl")[0]
if datapath.endswith(".jsonl"):
tag = datapath.split(".jsonl")[0]
return f"{tag}_onehot_types.npy"
def load_data(datapath):
samples = []
ent_counter = Counter()
with jsonlines.open(datapath) as f:
for line in f:
# each mention gets its own example
if len(line["mentions"]) > 0:
for i in range(len(["mentions"])):
sample = {
"id": line["id"],
"gold": line["label_id"][i],
"alt_gold": line["alt_label_id"][i]
if "alt_label_id" in line
else [],
"text": line["text"],
# keep track of all mentions that were present
"char_spans": line["mentions"][i],
}
ent_counter[sample["gold"]] += 1
samples.append(sample)
else:
sample = {
"id": line["id"],
"gold": line["label_id"][0],
"alt_gold": line["alt_label_id"][0]
if "alt_label_id" in line
else [],
"text": line["text"],
"char_spans": [],
}
ent_counter[sample["gold"]] += 1
samples.append(sample)
return samples, ent_counter
# modified from https://github.com/facebookresearch/BLINK/blob/main/blink/biencoder/data_process.py
def get_context_window(char_spans, tokenizer, context, max_context_length):
start_idx = char_spans[0]
end_idx = char_spans[1]
context_left = context[:start_idx]
mention = context[start_idx:end_idx]
context_right = context[end_idx:]
mention_tokens = [MENTION_START] + tokenizer.tokenize(mention) + [MENTION_END]
context_left_tokens = tokenizer.tokenize(context_left)
context_right_tokens = tokenizer.tokenize(context_right)
left_quota = (max_context_length - len(mention_tokens)) // 2 - 1
right_quota = max_context_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
context_tokens = (
context_left_tokens[-left_quota:]
+ mention_tokens
+ context_right_tokens[:right_quota]
)
return context_tokens
def clean_spaces(context, char_spans):
"""Update char span to require mention to not start with a space"""
while unicodedata.normalize("NFKD", context[char_spans[0]]) == " ":
char_spans[0] += 1
assert char_spans[1] > char_spans[0]
return char_spans
def load_types(type_path):
types = []
with open(type_path) as f:
for line in f:
types.append(line.strip())
return types
| tabi-main | tabi/utils/data_utils.py |
import logging
import os
import pickle
import subprocess
import sys
import time
from collections import Counter, defaultdict
import jsonlines
import numpy as np
import torch
import ujson
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from tqdm import tqdm
logger = logging.getLogger(__name__)
def save_entity_map(id_map_path, entity_ids):
logger.info(f"Saving ids to {id_map_path}.")
entity_map = {int(idx): int(eid) for idx, eid in enumerate(entity_ids)}
with open(id_map_path, "wb") as f:
pickle.dump(entity_map, f)
def get_type_str(types):
all_types = []
for t in types:
for subt in t.split("/")[1:]:
if subt not in all_types:
all_types.append(subt)
return " ".join(all_types)
def filter_negatives(
negative_samples, entity_cache, ent_counter, mult_factor=1, total_num_samples=1
):
neg_counter = Counter()
missing_negs = 0
# sample random ids in advance -- sample max possible number to avoid running out of random ids
start = time.time()
rand_samples = list(
np.random.choice(
list(entity_cache.keys()), len(negative_samples) * total_num_samples
)
)
logger.info(f"Time to sample random ids: {round(time.time()-start,3)}s")
for row, val in tqdm(negative_samples.items(), desc="Filtering"):
new_samples = []
new_scores = []
samples = val["samples"]
scores = val["scores"]
for sample, score in zip(samples, scores):
pos_count = ent_counter[sample]
if neg_counter[sample] < pos_count * mult_factor:
new_samples.append(sample)
new_scores.append(score)
neg_counter[sample] += 1
# exit if we have all the samples we need so the remaining
# hard samples can be used for another example
if len(new_samples) == total_num_samples:
break
while len(new_samples) < total_num_samples:
missing_negs += 1
new_samples.append(int(rand_samples.pop()))
negative_samples[row]["samples"] = new_samples[:total_num_samples]
negative_samples[row]["scores"] = new_scores[:total_num_samples]
logger.info(
f"{round(missing_negs/(len(negative_samples)*total_num_samples)*100,3)}% random samples"
)
return negative_samples
def get_mmap_type(max_length: int):
"""Get datatype for storing tokenized data in memory mapped file.
Modified from https://github.com/senwu/Emmental-Candidate_retrieval/blob/master/data_processing/prep_entity_mmap.py
"""
return [
("input_ids", "i8", max_length),
("attention_mask", "i8", max_length),
("token_type_ids", "i8", max_length),
]
def load_model(model_checkpoint, model, device, optimizer=None, lr_scheduler=None):
"""Load model checkpoint and update optimizer and lr_scheduler if in checkpoint.
Returns:
dict with global_step and epoch to start training from
"""
logger.info(f"Loading model from checkpoint {model_checkpoint}")
if device != "cpu":
# map model to be loaded to specified single gpu.
loc = "cuda:{}".format(device)
checkpoint = torch.load(model_checkpoint, map_location=loc)
else:
checkpoint = torch.load(model_checkpoint, map_location=torch.device("cpu"))
# remove DDP "module." prefix if present
state_dict = checkpoint["state_dict"]
consume_prefix_in_state_dict_if_present(state_dict, "module.")
model.load_state_dict(state_dict, strict=True)
if optimizer is not None:
optimizer.load_state_dict(checkpoint["optimizer"])
logger.info("Loaded optimizer.")
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
logger.info("Loaded lr scheduler.")
# reload random states to resume data loading order across runs
# TODO: support reloading numpy state if any new randomness depends on numpy random
if "rng_cpu" in checkpoint:
torch.set_rng_state(checkpoint["rng_cpu"].cpu())
if device != "cpu":
torch.cuda.set_rng_state(checkpoint["rng_gpu"].cpu())
logger.debug("Loaded random states.")
return {
"global_step": checkpoint.get("global_step", 0),
"epoch": checkpoint.get("epoch", -1) + 1,
}
def set_device(args):
if args.distributed:
# set to device 0 if using data parallel
args.device = args.local_rank if args.local_rank != -1 else 0
else:
args.device = args.gpu if args.gpu >= 0 else "cpu"
if args.device != "cpu":
torch.cuda.set_device(args.device)
logger.info(f"Device id: {args.device}")
def move_dict(dict_to_move, device):
return {k: val.to(device) for k, val in dict_to_move.items()}
def combine_preds(log_dir, predfile, num_gpus):
output = jsonlines.open(predfile, "w")
logger.info(f"Writing final preds to {predfile}")
seen_id = set()
for gpu_idx in range(num_gpus):
with jsonlines.open(
f'{log_dir}/{os.path.basename(predfile).split(".jsonl")[0]}_{gpu_idx}.jsonl'
) as f:
for line in f:
line_id = line["id"]
# already seen in another process
if line_id in seen_id:
continue
seen_id.add(line_id)
output.write(line)
output.close()
def combine_negs(
log_dir,
neg_sample_file,
num_gpus,
use_filter=False,
entity_cache=None,
ent_counter=None,
total_num_samples=-1,
mult_factor=1,
):
neg_samples = {}
for gpu_idx in range(num_gpus):
# includes sample ids and distances
with open(f"{log_dir}/neg_samples_{gpu_idx}.json", "r") as f:
res = ujson.load(f)
neg_samples.update(res)
# filter negatives before saving combined negatives
if use_filter:
neg_samples = filter_negatives(
neg_samples,
entity_cache=entity_cache,
ent_counter=ent_counter,
# top_k here is the number of negatives we want after filter
total_num_samples=total_num_samples,
mult_factor=mult_factor,
)
with open(neg_sample_file, "w") as f:
ujson.dump(neg_samples, f)
logger.info(f"Wrote final negative samples to {neg_sample_file}")
# https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def write_neg_samples(
preds, dataset, entity_map_file, neg_sample_file, num_negative_samples
):
# keep track of emb id to eid
with open(entity_map_file, "rb") as f:
entity_map = pickle.load(f)
# flatten preds across batches
flattened_preds = flatten_dicts(preds)
# store as dictionary because indices will be accessed out of order
# during training
neg_samples = {}
# need to use data_id rather than idx in list to support ddp
for (data_id, indices, scores) in zip(
flattened_preds["data_id"],
flattened_preds["indices"],
flattened_preds["scores"],
):
neg_samples_ex = []
scores_ex = []
gold_eids = [dataset.data[data_id]["gold"]]
gold_eids.extend(dataset.data[data_id]["alt_gold"])
gold_eids = set(gold_eids)
for s, idx in zip(scores, indices):
eid = entity_map[idx]
# remove gold entities from hard negatives
if eid in gold_eids:
continue
scores_ex.append(float(s))
# save entity ids (not emb/row ids) for negative samples
neg_samples_ex.append(eid)
neg_samples[int(data_id)] = {
"samples": neg_samples_ex[:num_negative_samples],
"scores": scores_ex[:num_negative_samples],
}
with open(neg_sample_file, "w") as f:
ujson.dump(neg_samples, f)
logger.info(f"Wrote negative samples to {neg_sample_file}")
def correct_at_k(pred_ids, gold_ids, k):
"""Return 1 if *any* gold id occurs in the top-k predicted ids, else 0."""
return int(len(set(gold_ids).intersection(pred_ids[:k])) > 0)
def write_preds(preds, dataset, pred_file, entity_map_file):
# embedding id to eid
entity_map = None
if entity_map_file is not None:
with open(entity_map_file, "rb") as f:
entity_map = pickle.load(f)
entity_cache = dataset.entity_cache
# flatten preds across batches
flattened_preds = flatten_dicts(preds)
# KILT FORMAT
# {
# "id": x,
# "output": [
# {
# "answer": y,
# "provenance": [
# {"wikipedia_id": z},
# {"wikipedia_id": w},
# ...
# ]
# }
# ]
# }
total_at_1 = 0
total_at_10 = 0
iter_ = tqdm(range(len(flattened_preds["data_id"])), desc="Evaluating")
with jsonlines.open(pred_file, "w") as f_out:
for i in iter_:
if entity_map is not None:
pred_eids = [
entity_map[emb_id] for emb_id in flattened_preds["indices"][i]
]
else:
pred_eids = flattened_preds["indices"][i]
data_id = flattened_preds["data_id"][i]
orig_data_id = dataset.data[data_id]["id"]
try:
new_ids = [
{
"wikipedia_id": entity_cache[eid]["wikipedia_page_id"],
"wikipedia_title": entity_cache[eid]["title"],
}
for eid in pred_eids
]
except:
# not using wikipedia ids
new_ids = [
{
"kb_id": eid,
"title": entity_cache[eid]["title"],
}
for eid in pred_eids
]
gold_ids = [dataset.data[data_id]["gold"]]
gold_ids.extend(dataset.data[data_id]["alt_gold"])
total_at_1 += correct_at_k(pred_ids=pred_eids, gold_ids=gold_ids, k=1)
total_at_10 += correct_at_k(pred_ids=pred_eids, gold_ids=gold_ids, k=10)
iter_.set_postfix(acc_1=total_at_1 / (i + 1), acc_10=total_at_10 / (i + 1))
output = {
"id": orig_data_id,
"input": dataset.data[data_id]["text"],
"output": [{"answer": "", "provenance": new_ids}],
}
f_out.write(output)
logger.info(f"Accuracy@1: {round(total_at_1/(i+1), 5)}")
logger.info(f"Accuracy@10: {round(total_at_10/(i+1), 5)}")
def flatten_dicts(batched_dict):
flattened_dict = defaultdict(list)
for batch in batched_dict:
for key, val in batch.items():
flattened_dict[key].extend(val)
return flattened_dict
def log_setup(args):
"""Create log directory and logger. Log basic set up information."""
# wait for first process to create log directory and dump config
if args.local_rank not in [-1, 0]:
torch.distributed.barrier(device_ids=[args.local_rank])
if args.local_rank in [-1, 0]:
os.makedirs(args.log_dir, exist_ok=True)
# dump args as config
# https://stackoverflow.com/questions/38884513/python-argparse-how-can-i-get-namespace-objects-for-argument-groups-separately
with open(os.path.join(args.log_dir, "config.json"), "w") as f:
ujson.dump(dict(sorted(args.__dict__.items())), f)
if args.local_rank != -1:
torch.distributed.barrier(device_ids=[args.local_rank])
# write separate log for each process with local rank tags
log_path = (
os.path.join(args.log_dir, f"log_{args.local_rank}.txt")
if args.local_rank != -1
else os.path.join(args.log_dir, "log.txt")
)
if args.local_rank in [-1, 0]:
# only use streamhandler for first process
handlers = [logging.FileHandler(log_path), logging.StreamHandler()]
else:
handlers = [
logging.FileHandler(log_path),
]
logging.basicConfig(
level=logging.INFO if not args.verbose else logging.DEBUG,
format="%(asctime)s [%(module)s] [%(levelname)s] %(message)s",
handlers=handlers,
)
# dump git hash
label = (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
.decode("ascii")
.strip()
)
logger.info(f"Git hash: {label}")
# dump basic machine info
machine_info = os.uname()
logger.info(f"Machine: {machine_info.nodename} ({machine_info.version})")
# dump run cmd
cmd_msg = " ".join(sys.argv)
logger.info(f"CMD: {cmd_msg}")
| tabi-main | tabi/utils/utils.py |
tabi-main | tabi/models/__init__.py |
|
import torch
import torch.nn as nn
import tabi.utils.train_utils as train_utils
class TABiLoss(nn.Module):
"""
Type-Aware Bi-encoders (TABi) loss.
"""
def __init__(self, temperature=0.01, alpha=0.1, type_equivalence="strict"):
super(TABiLoss, self).__init__()
self.temperature = temperature
self.alpha = alpha
self.type_equivalence = type_equivalence
def forward(
self,
query_embs,
entity_embs,
query_type_labels,
query_entity_labels,
entity_labels,
):
"""Computes TABi loss.
Args:
query_embs: (num_queries, hidden_dim)
entity_embs: (num_uniq_entities, hidden_dim)
query_type_labels: (num_queries, num_types)
query_entity_labels: (num_queries)
entity_labels: (num_uniq_entities)
Returns:
dict of {type_loss, ent_loss, total_loss}
"""
type_loss = torch.tensor(0.0)
ent_loss = torch.tensor(0.0)
total_loss = torch.tensor(0.0)
# entity loss
if self.alpha < 1.0:
all_embs = torch.cat([query_embs, entity_embs], dim=0)
all_ent_labels = torch.cat(
[query_entity_labels, entity_labels], dim=0
).view(-1, 1)
# -1 means no label
no_label_mask = all_ent_labels == -1
same_label_mask = torch.eq(all_ent_labels, all_ent_labels.T).bool()
ent_loss = SupConLoss(temperature=self.temperature)(
embs=all_embs,
same_label_mask=same_label_mask,
no_label_mask=no_label_mask,
)
# type loss
if self.alpha > 0.0:
no_label_mask = query_type_labels.sum(1) == 0
same_label_mask = train_utils.get_type_label_mask(
query_type_labels, type_equivalence=self.type_equivalence
).bool()
type_loss = SupConLoss(temperature=self.temperature)(
embs=query_embs,
no_label_mask=no_label_mask,
same_label_mask=same_label_mask,
)
total_loss = type_loss * self.alpha + ent_loss * (1 - self.alpha)
return {"total_loss": total_loss, "ent_loss": ent_loss, "type_loss": type_loss}
class SupConLoss(nn.Module):
"""
Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
Modified from https://github.com/HobbitLong/SupContrast.
"""
def __init__(self, temperature=0.01):
super(SupConLoss, self).__init__()
self.temperature = temperature
def forward(self, embs, same_label_mask, no_label_mask):
"""Compute supervised contrastive loss (variant).
Args:
embs: (num_examples, hidden_dim)
same_label_mask: (num_examples, num_examples)
no_label_mask: (num_examples)
Returns:
A loss scalar
"""
# compute similarity scores for embs
sim = embs @ embs.T / self.temperature
# for numerical stability
sim_max, _ = torch.max(sim, dim=1, keepdim=True)
sim = sim - sim_max.detach()
# compute log-likelihood for each pair
# ***unlike original supcon, do not include examples with the same label in the denominator***
negs = torch.exp(sim) * ~(same_label_mask)
denominator = negs.sum(axis=1, keepdim=True) + torch.exp(sim)
# log(exp(x)) = x and log(x/y) = log(x) - log(y)
log_prob = sim - torch.log(denominator)
# compute mean of log-likelihood over all positive pairs for a query/entity
# exclude self from positive pairs
pos_pairs_mask = same_label_mask.fill_diagonal_(0)
# only include examples in loss that have positive pairs and the class label is known
include_in_loss = (pos_pairs_mask.sum(1) != 0) & (~no_label_mask).flatten()
# we add ones to the denominator to avoid nans when there are no positive pairs
mean_log_prob_pos = (pos_pairs_mask * log_prob).sum(1) / (
pos_pairs_mask.sum(1) + (~include_in_loss).float()
)
mean_log_prob_pos = mean_log_prob_pos[include_in_loss]
# return zero loss if there are no values to take average over
if mean_log_prob_pos.shape[0] == 0:
return torch.tensor(0)
# scale loss by temperature (as done in supcon paper)
loss = -1 * self.temperature * mean_log_prob_pos
# average loss over all queries/entities that have at least one positive pair
return loss.mean()
| tabi-main | tabi/models/losses.py |
import logging
import torch.nn.functional as F
from torch import nn
from transformers import AutoModel
logger = logging.getLogger()
class Encoder(nn.Module):
def __init__(self, model_name: str = "bert-base-uncased") -> None:
super().__init__()
logger.info(f"Using encoder model: {model_name}")
# use local model files unless not present
try:
self.transformer = AutoModel.from_pretrained(
model_name, local_files_only=True
)
except:
self.transformer = AutoModel.from_pretrained(model_name)
self.output_dim = self.transformer.embeddings.word_embeddings.weight.size(1)
def forward(self, x):
input_ids = x["input_ids"]
token_type_ids = x["token_type_ids"]
attention_mask = x["attention_mask"]
seq_len = input_ids.shape[-1]
last_hidden_state, _ = self.transformer(
input_ids=input_ids.reshape(-1, seq_len),
token_type_ids=token_type_ids.reshape(-1, seq_len),
attention_mask=attention_mask.reshape(-1, seq_len),
return_dict=False,
)
return last_hidden_state
class Aggregator(nn.Module):
def __init__(self, normalize: bool = True) -> None:
super().__init__()
self.normalize = normalize
logger.debug(f"L2 normalization: {normalize}")
def forward(self, last_hidden_state):
# take CLS token as embedding
emb = last_hidden_state[:, 0]
if self.normalize:
return F.normalize(emb, p=2, dim=-1)
return emb
| tabi-main | tabi/models/layers.py |
import logging
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from tabi.models.layers import Aggregator, Encoder
from tabi.models.losses import TABiLoss
from tabi.utils.train_utils import first_nonzero, gather_embs
logger = logging.getLogger(__name__)
class Biencoder(nn.Module):
def __init__(
self,
tied,
temperature=0.1,
top_k=10,
entity_emb_path=None,
model_name="bert-base-uncased",
normalize=True,
alpha=0.1,
is_distributed=False,
):
super(Biencoder, self).__init__()
# initialize encoders
if tied:
# use same weights for entity encoder and query encoder
self.entity_encoder = Encoder(model_name=model_name)
self.query_encoder = self.entity_encoder
else:
self.entity_encoder = Encoder(model_name=model_name)
self.query_encoder = Encoder(model_name=model_name)
self.dim = self.entity_encoder.output_dim
self.entity_aggregator = Aggregator(normalize=normalize)
self.query_aggregator = Aggregator(normalize=normalize)
self.entity_embs = None
if entity_emb_path is not None:
entity_embs = np.memmap(entity_emb_path, dtype="float32", mode="r").reshape(
-1, self.dim
)
self.entity_embs = torch.from_numpy(np.copy(entity_embs))
# hyperparameters
self.temperature = temperature
self.top_k = top_k
self.alpha = alpha
self.is_distributed = is_distributed
logger.debug(f"Using distributed for model: {is_distributed}")
self.tabi_loss = TABiLoss(temperature=temperature, alpha=self.alpha)
def _embed_entity(self, entity_data):
"""Get entity embeddings"""
return self.entity_aggregator(self.entity_encoder(entity_data))
def _embed_query(self, context_data):
"""Get query embeddings"""
return self.query_aggregator(self.query_encoder(context_data))
def forward(self, entity_data=None, context_data=None):
entity_embs = None
query_embs = None
if entity_data is not None:
entity_embs = self._embed_entity(entity_data)
if context_data is not None:
query_embs = self._embed_query(context_data)
return {"entity_embs": entity_embs, "query_embs": query_embs}
def loss(
self,
query_embs,
entity_embs,
query_entity_labels,
entity_labels,
query_type_labels=None,
):
"""
Args:
query_embs: (num_queries, hidden_dim)
entity_embs: (num_entities, hidden_dim)
query_entity_labels: (num_queries)
entity_labels: (num_queries, num_negatives+1)
query_type_labels: (num_queries, num_types)
num_entities will include duplicate entities if the same entity
occurs with more than one example in the batch.
Returns:
dict of {type_loss, ent_loss, total_loss}
"""
# get embs across gpus before computing loss if distributed
if self.is_distributed:
entity_embs = gather_embs(entity_embs)
query_embs = gather_embs(query_embs)
query_type_labels = gather_embs(query_type_labels)
query_entity_labels = gather_embs(query_entity_labels)
entity_labels = gather_embs(entity_labels)
# flatten entity labels to have same dimension 0 as entity embs
entity_labels = entity_labels.flatten()
# remove duplicate entities from a batch
uniq_ent_indices = torch.unique(
first_nonzero(
torch.eq(
entity_labels.unsqueeze(1), entity_labels.unsqueeze(1).T
).float(),
axis=1,
)[1]
)
entity_embs = entity_embs[uniq_ent_indices]
entity_labels = entity_labels[uniq_ent_indices]
return self.tabi_loss(
query_embs=query_embs,
entity_embs=entity_embs,
query_type_labels=query_type_labels,
query_entity_labels=query_entity_labels,
entity_labels=entity_labels,
)
def predict(self, context_data, data_id=None):
query_embs = self(context_data=context_data)["query_embs"]
# get top entity candidates using nearest neighbor search
scores = query_embs @ self.entity_embs.t()
query_entity_scores, index = scores.topk(self.top_k)
probs = F.softmax(query_entity_scores / self.temperature, -1)
# "indices" are row in the embedding matrix and may not correspond to entity_ids
# we convert back to entity_ids in saving predictions/negative samples (see utils.py)
prediction = {
"scores": query_entity_scores,
"probs": probs,
"indices": index,
"data_id": data_id,
}
prediction = {
key: value.cpu().numpy()
for key, value in prediction.items()
if len(value) > 0
}
return prediction
| tabi-main | tabi/models/biencoder.py |
import subprocess
# set hyperparameters
# number of epochs for each round of sampling
n_epochs = 1
# first epoch is in-batch negatives
num_neg_rounds = 3
# tabi-specific
type_weight = 0.1
# model params
max_context_length = 32
lr = 3e-4
temperature = 0.05
add_types_in_desc = False
seed = 1234
batch_size = 4096
eval_batch_size = 32
neg_sample_batch_size = batch_size * 2
entity_batch_size = batch_size * 4
num_negatives = 3 # number of hard negatives to use for training
num_negatives_orig = 100 # total number of hard negatives to fetch (fetch extra since we filter gold ids and optionally based on counts)
filter_negatives = True # whether to filter hard negatives for training
# machine params
gpus = "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15"
ngpus = 16
distributed = True
# set paths
home_dir = "tabi"
data_dir = f"data"
train_file = "kilt_train.jsonl"
dev_file = "kilt_dev.jsonl"
test_file = "kilt_dev.jsonl"
entity_file = "entity.pkl"
type_file = "figer_types.txt"
base_log_dir = "logs"
run_name = f"kilt_train"
log_dir = f"{base_log_dir}/{run_name}"
preprocess = True
tokenized_entity_data = f"{log_dir}/1_preprocess/entity_data.npy"
python_str = "python"
if distributed:
python_str = f"python -m torch.distributed.launch --nproc_per_node={ngpus}"
# preprocess entity data (generate and save tokens for BERT entity input)
if preprocess:
subprocess.run(
f"python {home_dir}/preprocess_entity.py \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--log_dir {log_dir}/1_preprocess \
--type_file {data_dir}/{type_file} \
--num_workers 12 \
--entity_file {data_dir}/{entity_file}",
shell=True,
check=True,
)
# train model
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/train.py \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/1_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--max_context_length {max_context_length} \
--alpha {type_weight} \
--seed {seed} \
--lr {lr}",
shell=True,
check=True,
)
# generate entities
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/1_entity \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file}",
shell=True,
check=True,
)
# eval
# don't use distributed for eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/1_eval \
--temperature {temperature} \
--batch_size 32 \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--entity_emb_path {log_dir}/1_entity/embs.npy \
--entity_map_file {log_dir}/1_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--mode eval",
shell=True,
check=True,
)
# hard negative sampling rounds
for round in range(1, num_neg_rounds + 1):
# decay the lr by 1/2 each round
neg_lr = lr * (0.5) ** (round)
# generate train negative samples
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/eval.py \
--test_data_file {data_dir}/{train_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_train \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--orig_num_negatives {num_negatives_orig} \
--filter_negatives {filter_negatives} \
--mode neg_sample",
shell=True,
check=True,
)
# generate dev negative samples
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/eval.py \
--test_data_file {data_dir}/{dev_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_dev \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--max_context_length {max_context_length} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--mode neg_sample",
shell=True,
check=True,
)
# train model
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/train.py \
--seed {seed} \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/{round+1}_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--alpha {type_weight} \
--max_context_length {max_context_length} \
--num_negatives {num_negatives} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--train_neg_sample_file {log_dir}/{round}_neg_sample_train/neg_samples.json \
--dev_neg_sample_file {log_dir}/{round}_neg_sample_dev/neg_samples.json \
--lr {neg_lr}",
shell=True,
check=True,
)
# generate entities for eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/{round+1}_entity \
--add_entity_type_in_description {add_types_in_desc} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file} \
--distributed {distributed}",
shell=True,
check=True,
)
# eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round+1}_eval \
--temperature {temperature} \
--batch_size {eval_batch_size} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--entity_emb_path {log_dir}/{round+1}_entity/embs.npy \
--max_context_length {max_context_length} \
--entity_map_file {log_dir}/{round+1}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--mode eval",
shell=True,
check=True,
)
| tabi-main | scripts/run_kilt_data.py |
import subprocess
# set hyperparameters
# number of epochs for each round of sampling
n_epochs = 1
# first epoch is in-batch negatives
num_neg_rounds = 3
# tabi-specific
type_weight = 0.1
# model params
max_context_length = 32
lr = 1e-5
temperature = 0.05
add_types_in_desc = True
seed = 1234
batch_size = 32
eval_batch_size = 32
neg_sample_batch_size = 32
entity_batch_size = 32
num_negatives = 1 # number of hard negatives to use for training
num_negatives_orig = 20 # total number of hard negatives to fetch (fetch extra since we filter gold ids and optionally based on counts)
filter_negatives = False # whether to filter hard negatives
# machine params
gpu = -1
distributed = False
# set paths
home_dir = "tabi"
data_dir = f"sample_data"
train_file = "train.jsonl"
dev_file = "dev.jsonl"
test_file = "dev.jsonl"
entity_file = "entity.jsonl"
type_file = "figer_types.txt"
base_log_dir = "logs"
run_name = f"sample_run"
log_dir = f"{base_log_dir}/{run_name}"
preprocess = True
tokenized_entity_data = f"{log_dir}/1_preprocess/entity_data.npy"
# preprocess entity data (generate and save tokens for BERT entity input)
if preprocess:
subprocess.run(
f"python {home_dir}/preprocess_entity.py \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--log_dir {log_dir}/1_preprocess \
--type_file {data_dir}/{type_file} \
--num_workers 12 \
--entity_file {data_dir}/{entity_file} \
--gpu {gpu}",
shell=True,
check=True,
)
# train model
subprocess.run(
f"python {home_dir}/train.py \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/1_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--max_context_length {max_context_length} \
--alpha {type_weight} \
--seed {seed} \
--lr {lr} \
--gpu {gpu}",
shell=True,
check=True,
)
# generate entities
subprocess.run(
f"python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/1_entity \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file} \
--gpu {gpu}",
shell=True,
check=True,
)
# eval
subprocess.run(
f"python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/1_eval \
--temperature {temperature} \
--batch_size {eval_batch_size} \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--entity_emb_path {log_dir}/1_entity/embs.npy \
--entity_map_file {log_dir}/1_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--mode eval \
--gpu {gpu}",
shell=True,
check=True,
)
# hard negative sampling rounds
for round in range(1, num_neg_rounds + 1):
# decay the lr by 1/2 each round
neg_lr = lr * (0.5) ** (round)
# generate train negative samples
subprocess.run(
f"python {home_dir}/eval.py \
--test_data_file {data_dir}/{train_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_train \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--orig_num_negatives {num_negatives_orig} \
--filter_negatives {filter_negatives} \
--mode neg_sample \
--gpu {gpu}",
shell=True,
check=True,
)
# generate dev negative samples
subprocess.run(
f"python {home_dir}/eval.py \
--test_data_file {data_dir}/{dev_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_dev \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--max_context_length {max_context_length} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--mode neg_sample \
--gpu {gpu}",
shell=True,
check=True,
)
# train model
subprocess.run(
f"python {home_dir}/train.py \
--seed {seed} \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/{round+1}_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--alpha {type_weight} \
--max_context_length {max_context_length} \
--num_negatives {num_negatives} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--train_neg_sample_file {log_dir}/{round}_neg_sample_train/neg_samples.json \
--dev_neg_sample_file {log_dir}/{round}_neg_sample_dev/neg_samples.json \
--lr {neg_lr} \
--gpu {gpu}",
shell=True,
check=True,
)
# generate entities for eval
subprocess.run(
f"python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/{round+1}_entity \
--add_entity_type_in_description {add_types_in_desc} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file} \
--distributed {distributed} \
--gpu {gpu}",
shell=True,
check=True,
)
# eval
subprocess.run(
f"python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round+1}_eval \
--temperature {temperature} \
--batch_size {eval_batch_size} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--entity_emb_path {log_dir}/{round+1}_entity/embs.npy \
--max_context_length {max_context_length} \
--entity_map_file {log_dir}/{round+1}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--mode eval \
--gpu {gpu}",
shell=True,
check=True,
)
| tabi-main | scripts/run_sample_cpu.py |
import subprocess
# set hyperparameters
# number of epochs for each round of sampling
n_epochs = 1
# first epoch is in-batch negatives
num_neg_rounds = 3
# tabi-specific
type_weight = 0.1
# model params
max_context_length = 32
lr = 1e-5
temperature = 0.05
add_types_in_desc = True
seed = 1234
batch_size = 32
eval_batch_size = 32
neg_sample_batch_size = 32
entity_batch_size = 32
num_negatives = 1 # number of hard negatives to use for training
num_negatives_orig = 20 # total number of hard negatives to fetch (fetch extra since we filter gold ids and optionally based on counts)
filter_negatives = False # whether to filter hard negatives
# machine params
gpus = "0"
ngpus = 1
distributed = False
# set paths
home_dir = "tabi"
data_dir = f"sample_data"
train_file = "train.jsonl"
dev_file = "dev.jsonl"
test_file = "dev.jsonl"
entity_file = "entity.jsonl"
type_file = "figer_types.txt"
base_log_dir = "logs"
run_name = f"sample_run"
log_dir = f"{base_log_dir}/{run_name}"
preprocess = True
tokenized_entity_data = f"{log_dir}/1_preprocess/entity_data.npy"
python_str = "python"
if distributed:
python_str = f"python -m torch.distributed.launch --nproc_per_node={ngpus}"
# preprocess entity data (generate and save tokens for BERT entity input)
if preprocess:
subprocess.run(
f"python {home_dir}/preprocess_entity.py \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--log_dir {log_dir}/1_preprocess \
--type_file {data_dir}/{type_file} \
--num_workers 12 \
--entity_file {data_dir}/{entity_file}",
shell=True,
check=True,
)
# train model
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/train.py \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/1_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--max_context_length {max_context_length} \
--alpha {type_weight} \
--seed {seed} \
--lr {lr}",
shell=True,
check=True,
)
# generate entities
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/1_entity \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file}",
shell=True,
check=True,
)
# eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/1_eval \
--temperature {temperature} \
--batch_size {eval_batch_size} \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--entity_emb_path {log_dir}/1_entity/embs.npy \
--entity_map_file {log_dir}/1_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--mode eval",
shell=True,
check=True,
)
# hard negative sampling rounds
for round in range(1, num_neg_rounds + 1):
# decay the lr by 1/2 each round
neg_lr = lr * (0.5) ** (round)
# generate train negative samples
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/eval.py \
--test_data_file {data_dir}/{train_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_train \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--orig_num_negatives {num_negatives_orig} \
--filter_negatives {filter_negatives} \
--mode neg_sample",
shell=True,
check=True,
)
# generate dev negative samples
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/eval.py \
--test_data_file {data_dir}/{dev_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_dev \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--max_context_length {max_context_length} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--mode neg_sample",
shell=True,
check=True,
)
# train model
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/train.py \
--seed {seed} \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/{round+1}_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--alpha {type_weight} \
--max_context_length {max_context_length} \
--num_negatives {num_negatives} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--train_neg_sample_file {log_dir}/{round}_neg_sample_train/neg_samples.json \
--dev_neg_sample_file {log_dir}/{round}_neg_sample_dev/neg_samples.json \
--lr {neg_lr}",
shell=True,
check=True,
)
# generate entities for eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/{round+1}_entity \
--add_entity_type_in_description {add_types_in_desc} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file} \
--distributed {distributed}",
shell=True,
check=True,
)
# eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round+1}_eval \
--temperature {temperature} \
--batch_size {eval_batch_size} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--entity_emb_path {log_dir}/{round+1}_entity/embs.npy \
--max_context_length {max_context_length} \
--entity_map_file {log_dir}/{round+1}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--mode eval",
shell=True,
check=True,
)
| tabi-main | scripts/run_sample.py |
"""Convert KILT-formatted jsonlines files to TABi-formatted jsonlines files."""
import argparse
import glob
import logging
import os
import jsonlines
from tqdm import tqdm
from tabi.utils.data_utils import load_entity_data
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
type=str,
help="Input KILT file to convert. Only needed if input_dir and ouput_dir are NOT provided",
)
parser.add_argument(
"--output_file",
type=str,
help="Output file. Only needed if input_dir and output_dir are NOT provided",
)
parser.add_argument(
"--input_dir",
type=str,
help="Input directory to read KILT files. This reads all jsonlines files in the directory!",
)
parser.add_argument(
"--output_dir", type=str, help="Output directory to write TABi files"
)
parser.add_argument(
"--entity_file", type=str, required=True, help="KILT-E knowledge base path"
)
return parser.parse_args()
def convert_kilt_to_tabi(kilt_file, tabi_file, wikiid2eid):
with jsonlines.open(kilt_file) as f, jsonlines.open(tabi_file, "w") as f_out:
for line in f:
# get mentions (if any)
has_mention_boundaries = ("[START_ENT]" in line["input"]) and (
"[END_ENT]" in line["input"]
)
if has_mention_boundaries:
meta = line["meta"]
start_idx = len(meta["left_context"]) + 1
end_idx = start_idx + len(meta["mention"])
text = (
line["input"].replace("[START_ENT] ", "").replace(" [END_ENT]", "")
)
mentions = [[start_idx, end_idx]]
else:
text = line["input"]
mentions = []
# no labels provided (e.g. test dataset)
if "output" not in line or not any(
["provenance" in o for o in line["output"]]
):
f_out.write(
{
"text": text,
"label_id": [-1],
"alt_label_id": [[]],
"id": line["id"],
"mentions": mentions,
}
)
continue
# convert labels from wikipedia page ids to KILT-E entity ids
all_eids = []
for o in line["output"]:
# take first wikipedia id to be the label
if "provenance" in o:
for pair in o["provenance"]:
wikiid = pair["wikipedia_id"]
# some wikipedia page ids won't have eids if they are in KILT but not in KILT-E (e.g. list pages)
eid = int(wikiid2eid.get(wikiid, -1))
all_eids.append(eid)
# get unique entity ids
all_eids = list(set(all_eids))
assert len(all_eids) > 0
f_out.write(
{
"text": text,
"label_id": [all_eids[0]],
"alt_label_id": [all_eids[1:]],
"id": line["id"],
"mentions": mentions,
}
)
def main(args):
assert (args.input_file and args.output_file) or (
args.input_dir and args.output_dir
), "Must provide either input_file and output_file OR input_dir and output_dir"
logger.info("Loading entity data...")
entity_cache = load_entity_data(args.entity_file)
# mapping from Wikipedia page ids to KILT-E knowledge base ids
wikiid2eid = {}
for eid in entity_cache:
wikiid2eid[entity_cache[eid]["wikipedia_page_id"]] = eid
if args.output_dir is not None:
# make output directory if it doesn't exist
os.makedirs(args.output_dir, exist_ok=True)
# iterate over each file in the input dir
assert (
args.input_dir is not None
), "Must provide input_dir if output_dir is provided"
# assumes all jsonlines files in the directory are in KILT format!
kilt_files = glob.glob(f"{args.input_dir}/*")
logger.info(f"Found {len(kilt_files)} KILT files.")
for kilt_file in tqdm(kilt_files, desc="Converting"):
tabi_file = os.path.join(args.output_dir, os.path.basename(kilt_file))
convert_kilt_to_tabi(
kilt_file=kilt_file, tabi_file=tabi_file, wikiid2eid=wikiid2eid
)
else:
logger.info(f"Converting {args.input_file}...")
convert_kilt_to_tabi(
kilt_file=args.input_file, tabi_file=args.output_file, wikiid2eid=wikiid2eid
)
logger.info(f"Wrote {args.output_file}!")
if __name__ == "__main__":
args = parse_args()
main(args)
| tabi-main | scripts/preprocess_kilt.py |
import argparse
import logging
from collections import defaultdict
from string import punctuation
import torch
from termcolor import colored
from transformers import AutoTokenizer
from transformers import logging as hf_logging
from tabi.constants import ENT_START
from tabi.models.biencoder import Biencoder
from tabi.utils.data_utils import load_entity_data
from tabi.utils.utils import load_model, move_dict
hf_logging.set_verbosity_error()
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
)
parser = argparse.ArgumentParser()
parser.add_argument("--model_checkpoint", type=str, required=True)
parser.add_argument("--entity_emb_path", type=str)
parser.add_argument("--entity_file", type=str)
parser.add_argument("--top_k", type=int, default=10)
parser.add_argument("--device", type=str, choices=["cuda", "cpu"], default="cpu")
args = parser.parse_args()
def preprocess_query(tokenizer, text):
# Take the input data and make it inference ready
tokens = tokenizer(
text,
padding="max_length",
add_special_tokens=True,
return_tensors="pt", # return as pytorch tensors
truncation=True,
max_length=32,
return_length=True,
return_offsets_mapping=True,
)
context_tokens = defaultdict(list)
for key in tokens.keys():
context_tokens[key].append(tokens[key][0])
context_tokens = {k: torch.stack(v) for k, v in context_tokens.items()}
return context_tokens
def preprocess_entity(tokenizer, title, description):
ent_str = title + " " + ENT_START + " " + description
entity_tokens = tokenizer(
ent_str,
padding="max_length",
add_special_tokens=True,
return_tensors="pt",
truncation=True,
max_length=128,
)
return entity_tokens
# load model
logger.info("Loading model...")
model = Biencoder(
tied=True,
entity_emb_path=args.entity_emb_path,
top_k=args.top_k,
model_name="bert-base-uncased",
normalize=True,
temperature=0.05,
)
load_model(model_checkpoint=args.model_checkpoint, device=args.device, model=model)
model.to(args.device)
model.eval()
logger.info("Finished loading model!")
# create tokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
tokenizer.add_special_tokens({"additional_special_tokens": [ENT_START]})
if args.entity_emb_path is not None:
# load entity cache
logger.info("Loading entity data...")
entity_cache = load_entity_data(args.entity_file)
logger.info("Finished loading entity data!")
def pretty_print(ent_data, prob, score):
print(colored(f"\ntitle: {ent_data['title']}", "grey", "on_cyan"))
print(f"prob: {round(prob, 5)}")
print(f"score: {round(score, 5)}")
print(f"text:{' '.join(ent_data['description'].split(' ')[:150])}")
if args.entity_emb_path is None:
logger.info(
"Using entity-input mode. No entity index was provided. To enter a new query, type 'Exit' for entity title. Returns raw score."
)
while True:
# ask for input
text = input(colored("\nInsert query: ", "grey", "on_green"))
if text.lower() == "exit" or text == "exit()":
break
# remove punctuation from end of text
text = text.rstrip(punctuation)
query_tokens = preprocess_query(tokenizer=tokenizer, text=text)
# use index if provided
if args.entity_emb_path is not None:
# retrieve candidates
with torch.no_grad():
res = model.predict(
context_data=move_dict(query_tokens, args.device),
data_id=torch.tensor([-1]),
)
assert len(res["probs"]) == 1
res["probs"] = res["probs"][0].tolist()
res["indices"] = res["indices"][0].tolist()
res["scores"] = res["scores"][0].tolist()
del res["data_id"]
# return response to user
for eid, prob, score in zip(res["indices"], res["probs"], res["scores"]):
pretty_print(entity_cache[eid], prob, score)
# otherwise, return query for entity info and return raw score
else:
exit_code = False
while not exit_code:
entity_title = input("\nInsert entity title: ")
if entity_title.lower() == "exit" or entity_title == "exit()":
exit_code = True
break
entity_description = input("Insert entity description: ")
entity_tokens = preprocess_entity(
tokenizer=tokenizer, title=entity_title, description=entity_description
)
# print(tokenizer.decode(entity_tokens['input_ids'][0]))
# compute embeddings and take dot product
ent_emb = model._embed_entity(move_dict(entity_tokens, args.device))
query_emb = model._embed_query(move_dict(query_tokens, args.device))
score = torch.dot(ent_emb.squeeze(), query_emb.squeeze())
print(f"Score: {round(score.item(), 5)}")
| tabi-main | scripts/demo.py |
import subprocess
# set hyperparameters
# number of epochs for each round of sampling
n_epochs = 1
# first epoch is in-batch negatives
num_neg_rounds = 3
# tabi-specific
type_weight = 0.1
# model params
max_context_length = 32
lr = 3e-4
temperature = 0.05
add_types_in_desc = False
seed = 1234
batch_size = 4096
eval_batch_size = 32
neg_sample_batch_size = batch_size * 2
entity_batch_size = batch_size * 4
num_negatives = 3 # number of hard negatives to use for training
num_negatives_orig = 100 # total number of hard negatives to fetch (fetch extra since we filter gold ids and optionally based on counts)
filter_negatives = True # whether to filter hard negatives for training
# machine params
gpus = "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15"
ngpus = 16
distributed = True
# set paths
home_dir = "tabi"
data_dir = f"data"
train_file = "blink_train.jsonl"
dev_file = "blink_dev.jsonl"
test_file = "blink_dev.jsonl"
entity_file = "entity.pkl"
type_file = "figer_types.txt"
base_log_dir = "logs"
run_name = f"blink_train"
log_dir = f"{base_log_dir}/{run_name}"
preprocess = True
tokenized_entity_data = f"{log_dir}/1_preprocess/entity_data.npy"
python_str = "python"
if distributed:
python_str = f"python -m torch.distributed.launch --nproc_per_node={ngpus}"
# preprocess entity data (generate and save tokens for BERT entity input)
if preprocess:
subprocess.run(
f"python {home_dir}/preprocess_entity.py \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--log_dir {log_dir}/1_preprocess \
--type_file {data_dir}/{type_file} \
--num_workers 12 \
--entity_file {data_dir}/{entity_file}",
shell=True,
check=True,
)
# train model
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/train.py \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/1_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--max_context_length {max_context_length} \
--alpha {type_weight} \
--seed {seed} \
--lr {lr}",
shell=True,
check=True,
)
# generate entities
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/1_entity \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file}",
shell=True,
check=True,
)
# eval
# don't use distributed for eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/1_eval \
--temperature {temperature} \
--batch_size 32 \
--model_checkpoint {log_dir}/1_train/best_model.pth \
--entity_emb_path {log_dir}/1_entity/embs.npy \
--entity_map_file {log_dir}/1_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--mode eval",
shell=True,
check=True,
)
# hard negative sampling rounds
for round in range(1, num_neg_rounds + 1):
# decay the lr by 1/2 each round
neg_lr = lr * (0.5) ** (round)
# generate train negative samples
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/eval.py \
--test_data_file {data_dir}/{train_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_train \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--max_context_length {max_context_length} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--orig_num_negatives {num_negatives_orig} \
--filter_negatives {filter_negatives} \
--mode neg_sample",
shell=True,
check=True,
)
# generate dev negative samples
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/eval.py \
--test_data_file {data_dir}/{dev_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round}_neg_sample_dev \
--temperature {temperature} \
--batch_size {neg_sample_batch_size} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--entity_emb_path {log_dir}/{round}_entity/embs.npy \
--entity_map_file {log_dir}/{round}_entity/entity_map.pkl \
--max_context_length {max_context_length} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--top_k {num_negatives} \
--type_file {data_dir}/{type_file} \
--mode neg_sample",
shell=True,
check=True,
)
# train model
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} {python_str} {home_dir}/train.py \
--seed {seed} \
--train_data_file {data_dir}/{train_file} \
--train_entity_file {data_dir}/{entity_file} \
--dev_data_file {data_dir}/{dev_file} \
--dev_entity_file {data_dir}/{entity_file} \
--type_file {data_dir}/{type_file} \
--n_epochs {n_epochs} \
--log_dir {log_dir}/{round+1}_train \
--temperature {temperature} \
--batch_size {batch_size} \
--add_entity_type_in_description {add_types_in_desc} \
--distributed {distributed} \
--tokenized_entity_data {tokenized_entity_data} \
--alpha {type_weight} \
--max_context_length {max_context_length} \
--num_negatives {num_negatives} \
--model_checkpoint {log_dir}/{round}_train/best_model.pth \
--train_neg_sample_file {log_dir}/{round}_neg_sample_train/neg_samples.json \
--dev_neg_sample_file {log_dir}/{round}_neg_sample_dev/neg_samples.json \
--lr {neg_lr}",
shell=True,
check=True,
)
# generate entities for eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/extract_entity.py \
--entity_file {data_dir}/{entity_file} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--batch_size {entity_batch_size} \
--log_dir {log_dir}/{round+1}_entity \
--add_entity_type_in_description {add_types_in_desc} \
--tokenized_entity_data {tokenized_entity_data} \
--type_file {data_dir}/{type_file} \
--distributed {distributed}",
shell=True,
check=True,
)
# eval
subprocess.run(
f"CUDA_VISIBLE_DEVICES={gpus} python {home_dir}/eval.py \
--test_data_file {data_dir}/{test_file} \
--entity_file {data_dir}/{entity_file} \
--log_dir {log_dir}/{round+1}_eval \
--temperature {temperature} \
--batch_size {eval_batch_size} \
--model_checkpoint {log_dir}/{round+1}_train/best_model.pth \
--entity_emb_path {log_dir}/{round+1}_entity/embs.npy \
--max_context_length {max_context_length} \
--entity_map_file {log_dir}/{round+1}_entity/entity_map.pkl \
--add_entity_type_in_description {add_types_in_desc} \
--mode eval",
shell=True,
check=True,
)
| tabi-main | scripts/run_blink_data.py |
import glob
from os.path import basename
import setuptools
from setuptools import find_namespace_packages
setuptools.setup(
name="anchor", version="0.0.1", author="Megan Leszczynski", packages=find_namespace_packages()
)
| anchor-stability-master | setup.py |
anchor-stability-master | tests/__init__.py |
|
import unittest
from unittest.mock import Mock, patch
import numpy as np
import utils
from scipy.linalg import orthogonal_procrustes
from anchor.embedding import Embedding
class EmbeddingTest(unittest.TestCase):
def test_save_load(self):
# save embedding
vecs = np.array([[0, 1, 2], [3, 4, 5]])
vocab = ["cat", "dog"]
tmpfile = "tmp.txt"
original_emb = Embedding(vecs=vecs, vocab=vocab)
original_emb.save(tmpfile)
# load into a new embedding
new_emb = Embedding(tmpfile)
new_vecs = new_emb.m
utils.clean_files("tmp.txt")
np.testing.assert_array_equal(vecs, new_vecs)
def test_align(self):
# test basic align
vocab = ["cat", "dog"]
vecs1 = np.array([[0, 1, 2], [3, 4, 5]])
vecs2 = np.array([[4, 5, 6], [7, 8, 9]])
emb1 = Embedding(vecs=vecs1, vocab=vocab)
emb2 = Embedding(vecs=vecs2, vocab=vocab)
R, _ = orthogonal_procrustes(vecs2, vecs1)
expected_vec = np.dot(vecs2, R)
emb2.align(emb1)
np.testing.assert_array_equal(expected_vec, emb2.m)
np.testing.assert_array_equal(emb1, emb2.reference)
# test align with subembeds
vocab1 = ["cat", "dog"]
vocab2 = ["cat", "giraffe", "dog"]
vecs1 = np.array([[0, 1, 2], [3, 4, 5]])
vecs2 = np.array([[4, 5, 6], [7, 8, 9], [0, 3, 4]])
emb1 = Embedding(vecs=vecs1, vocab=vocab1)
emb2 = Embedding(vecs=vecs2, vocab=vocab2)
R, _ = orthogonal_procrustes(np.array([[4, 5, 6], [0, 3, 4]]), vecs1)
expected_vec = np.dot(vecs2, R)
emb2.align(emb1)
np.testing.assert_array_equal(expected_vec, emb2.m)
np.testing.assert_array_equal(emb1, emb2.reference)
def test_get_subembed(self):
vecs = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
vocab = ["cat", "dog", "giraffe"]
full_emb = Embedding(vecs=vecs, vocab=vocab)
new_vecs = full_emb.get_subembed(["giraffe", "dog"])
expected_vecs = np.array([[6, 7, 8], [3, 4, 5]])
np.testing.assert_array_equal(expected_vecs, new_vecs)
new_vecs = full_emb.get_subembed(["cat"])
expected_vecs = np.array([[0, 1, 2]])
np.testing.assert_array_equal(expected_vecs, new_vecs)
if __name__ == "__main__":
unittest.main()
| anchor-stability-master | tests/test_embedding.py |
import subprocess
import numpy as np
def clean_files(filename):
try:
subprocess.check_output("rm -f %s" % filename, shell=True)
except OSError as e:
print(e)
pass
| anchor-stability-master | tests/utils.py |
"""
Main analysis file to compute the embedding distance measures and downstream measures between pairs
of embeddings and save the results to panda dataframes.
"""
import argparse
import glob
import os
import pickle
import multiprocessing as mp
import numpy as np
import pandas as pd
from anchor.embedding import Embedding
from utils import run_task, check_sent_complete, check_ner_complete
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--dist", type=str, required=True,
choices=['eis', 'knn', 'eigen_overlap', 'sem_disp', 'weighted_eigen_overlap',
'fro_norm', 'pip', 'pred', 'quality'],
help='Distance metric between embeddings or models')
parser.add_argument("--resultdir1", type=str, help='Directory for first embedding or model')
parser.add_argument("--resultdir2", type=str,help='Directory for second embedding or model')
parser.add_argument("--out", type=str, default=".", help='Result directory for dataframe')
parser.add_argument('-t', '--task', type=str,
help='Task for intrinsic or extrinsic comparison')
parser.add_argument('--algo', type=str, help='Emb. algorithm', required=True)
parser.add_argument('--compressed', action='store_true')
parser.add_argument("--compress_type", type=str, help='Compression type', default='uniform',
choices=['kmeans', 'uniform'])
parser.add_argument("--dim", type=int)
parser.add_argument("--bitrate", type=int)
parser.add_argument("--model", type=str, help='Type of model, e.g. CNN')
parser.add_argument("--exp", type=float, help="Exponent for weighting eigenvalues")
parser.add_argument("--nprocs", type=int, default=20, help="Number of processes")
parser.add_argument("--wiki", type=bool, default=True, help='Use wiki embeddings')
parser.add_argument("--no_norm", action="store_true", help="Do not normalize the overlap metric")
parser.add_argument("--symmetric", action='store_true')
parser.add_argument("--lr", type=float, help="Learning rate for downstream model")
parser.add_argument("--no_align", action='store_true')
parser.add_argument("--truncate", type=int, help='Truncate embeddings to this number of words', default=-1)
parser.add_argument("--tag", type=str, help='Additional tag to add to distance metric')
parser.add_argument("--random", action='store_true', help='Randomly sampling vocab')
parser.add_argument("--nquery", type=int, help='Number of queries for the knn metric', default=1000)
parser.add_argument("--nneighbors", type=int, help='Number of neighbors to compare for the knn metric', default=10)
parser.add_argument("--validation", action='store_true', help='Use the validation predictions')
parser.add_argument("--seed_test", action='store_true',
help='Use different seed for 2018 embeddings to test seed effect')
parser.add_argument("--nseeds", type=int)
parser.add_argument("--same_norm", action='store_true', help='Use embeddings that have the same norm for algo comparison')
parser.add_argument("--scale", type=float, help='Magnitude to normalize embeddings to.')
return parser.parse_args()
def ner_stability(modelpath1, modelpath2, val=False):
"""Return the downstream prediction disagreement for the NER task."""
if val:
preds1 = f"{modelpath1}/dev.tsv"
preds2 = f"{modelpath2}/dev.tsv"
else:
preds1 = f"{modelpath1}/test.tsv"
preds2 = f"{modelpath2}/test.tsv"
check_ner_complete(modelpath1, modelpath2)
file1_lines = open(preds1, 'r').readlines()
file2_lines = open(preds2, 'r').readlines()
# compare predictions
mismatch = 0
total = 0
for line1, line2 in zip(file1_lines, file2_lines):
if len(line1.split(' ')) > 3:
pred1 = line1.split(' ')[2]
pred2 = line2.split(' ')[2]
# skip over cases where true value is "O"
if line1.split(' ')[1] != 'O':
if pred1 != pred2:
mismatch += 1
total += 1
dist = mismatch / float(total) * 100
return dist
def get_dist_tag(args):
"""Return the tag to use for the 'distance' measure in the dataframe."""
# downstream tasks
if args.model is not None:
# measure stability
if args.dist == 'pred':
dist_tag = f'{args.model}_{args.task}'
# measure quality
else:
dist_tag = f'{args.model}_{args.task}_quality'
else:
dist_tag = args.dist
# append additional tags
if args.exp:
dist_tag += f'_{args.exp}'
if args.no_norm:
dist_tag += f'_no_norm'
if args.symmetric:
dist_tag += f'_sym'
if args.truncate > -1:
dist_tag += f'_top_{args.truncate}'
if args.random:
dist_tag += '_rand'
if args.dist == 'knn':
dist_tag += f'_nquery_{args.nquery}_nn_{args.nneighbors}'
if args.tag is not None:
dist_tag += f'_{args.tag}'
if args.validation:
dist_tag += '_val'
if args.seed_test:
dist_tag += '_seed_test'
return dist_tag
def get_final_dist_tag(dist_tag, args):
"""Add additional file identifier tags for saving the results. NOT used in dataframe."""
if args.dim:
dist_tag += f'_dim_{args.dim}'
if args.compressed:
dist_tag += f'_{args.compress_type}'
if args.lr:
dist_tag += f'_lr_{args.lr}'
if args.wiki:
dist_tag += '_wiki'
if args.no_align:
dist_tag += 'no_align'
return dist_tag
def run_seed(algo, seed, dim, lr, bitrate=None, compress_type=None, args=None):
# set seeds for model
seed1 = seed
if args.seed_test:
seed2 = seed + 1000
else:
seed2 = seed
# get paths for embeddings or models
if algo != 'ft_sg':
if algo == "w2v_cbow":
end_tag = "50.w.txt"
elif algo == "mc":
end_tag = "50.txt"
else:
end_tag = "050.txt"
path1 = f"{args.resultdir1}/{algo}_wiki.en.txt_2017_seed_{seed}_dim_{dim}_lr_{lr}.{end_tag}"
path2 = f"{args.resultdir2}/{algo}_wiki.en.txt_2018_seed_{seed}_dim_{dim}_lr_{lr}.{end_tag}"
else:
path1 = f"{args.resultdir1}/ft_sg_wiki.en.txt_2017_seed_{seed}_dim_{dim}.vec"
path2 = f"{args.resultdir2}/ft_sg_wiki.en.txt_2018_seed_{seed}_dim_{dim}.vec"
# do embedding comparison
if args.model is None:
anchor_path = f'{args.resultdir1}/{args.algo}_anchor_seed_{seed}_top_{args.truncate}.pkl'
if args.compressed:
path1 += f'_br_{bitrate}'
path2 += f'_br_{bitrate}_same_range'
print(path2)
else:
path1 += f'_br_32'
if not args.no_align:
path2 += f'_br_32_same_range'
print(path2)
if args.same_norm:
path1 += f'_norm_{args.scale}'
path2 += f'_norm_{args.scale}'
# get downstream model quality of second model
elif args.dist == 'quality':
assert args.model is not None and args.task is not None and args.lr is not None, "Must provide model, task, and lr for quality eval"
if args.task != 'ner':
try:
modelpath1 = f"{path1}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed1}_lr_{args.lr}"
ff = open(f'{modelpath1}.log', 'r')
except:
modelpath1 = f"{path1}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed1}_data_{seed1}_lr_{args.lr}"
ff = open(f'{modelpath1}.log', 'r')
dat = [_.strip() for _ in ff]
quality1 = 1-float(dat[-2].strip().split(': ')[1])*100
try:
modelpath2 = f"{path2}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed2}_lr_{args.lr}"
ff = open(f'{modelpath2}.log', 'r')
except:
modelpath2 = f"{path2}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed2}_data_{seed2}_lr_{args.lr}"
ff = open(f'{modelpath2}.log', 'r')
dat = [_.strip() for _ in ff]
try:
dist = (1-float(dat[-2].strip().split(': ')[1]))*100
except:
print(modelpath2)
exit()
else:
modelpath2 = f"{path2}/ner_{args.model}_lr_{args.lr}/eval.log"
ff = open(modelpath2, 'r')
dat = [_.strip() for _ in ff]
lr = float(os.path.basename(os.path.dirname(modelpath2)).split('_')[-1])
assert 'f1-score' in dat[-7] and 'MICRO_AVG' in dat[-7], 'Parsing NER incorrect'
dist = float(dat[-7].strip().split(' ')[-1])*100
print(modelpath2, dist)
# compute downstream stability
elif args.model is not None:
assert args.model is not None and args.task is not None and (args.lr is not None), "Must provide model, task, and lr for prediction eval"
if args.task != 'ner':
# load validation predictions
if args.validation:
# load model trained on embedding
modelpath1 = f"{path1}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed1}_lr_{args.lr}"
preds1 = np.array(pickle.load(open(f'{modelpath1}_eval.val.pred', "rb")))
modelpath2 = f"{path2}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed2}_lr_{args.lr}"
preds2 = np.array(pickle.load(open(f'{modelpath2}_eval.val.pred', "rb")))
print(len(preds1), len(preds2))
dist = (1 - np.sum(preds1 == preds2) / float(len(preds1)))*100
# make sure logs are complete
assert check_sent_complete(modelpath1, modelpath2)
else:
# load model trained on embedding
# hacky soln to deal with new naming for seed experiments
try:
modelpath1 = f"{path1}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed1}_lr_{args.lr}"
preds1 = np.array(pickle.load(open(f'{modelpath1}.pred', "rb")))
except:
modelpath1 = f"{path1}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed1}_data_{seed1}_lr_{args.lr}"
preds1 = np.array(pickle.load(open(f'{modelpath1}.pred', "rb")))
try:
modelpath2 = f"{path2}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed2}_lr_{args.lr}"
preds2 = np.array(pickle.load(open(f'{modelpath2}.pred', "rb")))
except:
modelpath2 = f"{path2}/{args.task}/model_{args.model}_dropout_0.5_seed_{seed2}_data_{seed2}_lr_{args.lr}"
print(modelpath2)
preds2 = np.array(pickle.load(open(f'{modelpath2}.pred', "rb")))
dist = (1 - np.sum(preds1 == preds2) / float(len(preds1)))*100
assert check_sent_complete(modelpath1, modelpath2)
else:
modelpath1 = f"{path1}/ner_{args.model}_lr_{args.lr}"
modelpath2 = f"{path2}/ner_{args.model}_lr_{args.lr}"
dist = ner_stability(modelpath1, modelpath2, val=args.validation)
# Compute embedding distance measure
else:
# load embeddings from text files
emb1 = Embedding(path1)
emb2 = Embedding(path2)
if args.dist == "sem_disp":
dist = emb2.sem_disp(other=emb1, n=args.truncate)
elif args.dist == "eigen_overlap":
dist = emb2.eigen_overlap(other=emb1, n=args.truncate)
elif args.dist == 'weighted_eigen_overlap':
dist = emb2.eigen_overlap(other=emb1, weighted=True, exp=args.exp, normalize=not args.no_norm, symmetric=args.symmetric, n=args.truncate)
elif args.dist == 'eis':
assert args.truncate > 0, 'Need to use top n for anchor metric'
print(f'Loading {anchor_path}.')
curr_anchor, other_anchor, vocab_anchor = pickle.load(open(anchor_path, 'rb'))
dist = emb2.eis(emb1, curr_anchor=curr_anchor, other_anchor=other_anchor, vocab=vocab_anchor, exp=args.exp, n=args.truncate)
elif args.dist == 'fro_norm':
dist = emb2.fro_norm(other=emb1, n=args.truncate)
elif args.dist == 'pip':
dist = emb2.pip_loss(other=emb1, n=args.truncate, random=args.random)
elif 'knn' in args.dist:
dist = emb2.knn(other=emb1, n=args.truncate, nquery=args.nquery, nneighbors=args.nneighbors)
return dist
def main():
args = parse_args()
os.makedirs(args.out, exist_ok=True)
algos = [args.algo]
assert args.dist != 'weighted_eigen_overlap' or args.exp is not None, "Must provide exponent for weighted eigen overlap."
assert args.dim is not None or not args.compressed, "Must provide a dimension for compression evaluation"
# learning rates used to train each embedding
wiki_lrs = {'glove': 0.01, 'mc': 0.2, 'w2v_cbow': 0.05, 'pi': None, 'ft_sg': 0.05}
seeds = [1234, 1235, 1236]
# Set dimensions
if args.dim:
dims = [args.dim]
else:
dims = [25, 50, 100, 200, 400, 800]
# Set precisions
if args.bitrate:
bitrates = [args.bitrate]
else:
bitrates = [1,2,4,8,16,32]
dist_tag = get_dist_tag(args)
results = []
pool = mp.Pool(processes=args.nprocs)
for algo in algos:
# use same learning rate across dimensions
lr = wiki_lrs[algo]
dim = args.dim
for bitrate in bitrates:
seed_runs = [pool.apply_async(run_seed, args=(algo,seed,dim,lr,bitrate,args.compress_type,args)) for seed in seeds]
seed_results = [p.get() for p in seed_runs]
for i,seed in enumerate(seeds):
row = {}
row["algo"] = algo
row["seed"] = seed
row["dim"] = dim
row["lr"] = lr
row["bitrate"] = bitrate
row[dist_tag] = seed_results[i]
row["compress_type"] = args.compress_type
print(row)
results.append(row)
# Dump results
df_results = pd.DataFrame(results)
if args.compressed:
df_sum = df_results.groupby(['algo', 'bitrate']).aggregate(['mean', 'std']).reset_index()
else:
df_sum = df_results.groupby(['algo', 'dim']).aggregate(['mean', 'std']).reset_index()
print(df_results)
print(df_sum)
dist_tag = get_final_dist_tag(dist_tag, args)
df_results_path = f"{args.out}/{args.algo}_{dist_tag}_results.pkl"
df_results.to_pickle(df_results_path)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/dim_stability_analysis.py |
"""
Generates the selection criterion results
-- requires data file with embedding distance and disagreement
between pairs of embeddings as input.
"""
import argparse
import csv
import numpy as np
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--emb_metrics', type=str, nargs='+', required=True,
help='List of embedding metrics')
parser.add_argument('--ds_metrics', type=str, nargs='+', required=True,
help='List of downstream metrics')
parser.add_argument('--csv-file', type=str, required=True,
help='CSV file with pair results for emb metrics and DS results')
parser.add_argument('--acc-file', type=str, required=True,
help='File to write accuracy results to')
parser.add_argument('--rob-file', type=str, required=True,
help='File to write robustness results to')
parser.add_argument('--same-space', action='store_true',
help='Restrict selection to the same space budget')
parser.add_argument('--verbose', action='store_true',
help='Print information about selection')
args = parser.parse_args()
return args
def get_selection_error(emb_metric, ds_metric, df, space_budget=None,
verbose=False, seed=None, same_space=False):
"""
Returns the selection error when using emb_metric to select the more
stable pair on ds_metric and the maximum difference to the oracle when
a mistake is made.
"""
# Only compare pairs of a specific seed
df = df.loc[df['seed'] == seed].reset_index()
n_rows = len(df.index)
count = 0.
total = 0.
max_diff = 0
idx = None
# Iterate over all pairs of pairs
for i in range(n_rows-1):
for j in range(i+1, n_rows):
row1 = df.loc[i][emb_metric]
row2 = df.loc[j][emb_metric]
ds_row1 = df.loc[i][ds_metric]
ds_row2 = df.loc[j][ds_metric]
# Skip pairs of pairs where either pair exceed space budget
if space_budget is not None:
if (df.loc[i]['space'] > space_budget
or df.loc[j]['space'] > space_budget):
continue
# If same_space, only compare pairs of pairs of the same space budget
if same_space and (df.loc[i]['space'] != df.loc[j]['space']):
continue
# most stable emb depends on metric
# for knn and eigen_overlap, higher value is more stable
# for other metrics, lower value is more stable
if 'knn' in emb_metric or emb_metric == 'eigen_overlap_top_10000':
emb_vote = np.argmax([row1, row2])
else:
emb_vote = np.argmin([row1, row2])
# most stable downstream is smallest %
ds_vote = np.argmin([ds_row1, ds_row2])
# incorrect vote
if emb_vote != ds_vote:
count += 1
# keep track to compute the max. difference to oracle
diff = np.abs(ds_row1 - ds_row2)
if diff > max_diff:
max_diff = diff
idx = (i, j)
total += 1
error = count / total
if verbose:
print(f'Maximum difference {max_diff} @ {idx}')
print(f'Compared {total} pairs')
return error, max_diff
def compute_sel_results(df, acc_file, rob_file, emb_metrics, ds_metrics,
space_budgets=[None], seeds=[1234, 1235, 1236], verbose=False,
same_space=False):
"""
Write the selection error and max. error results to acc_file and rob_file,
respectively. Iterate over emb_metrics and ds_metrics, computing these
values for each combination and reporting the result as the average over
seeds.
"""
with open(acc_file, 'w') as f1, open(rob_file, 'w') as f2:
writer1 = csv.writer(f1)
writer1.writerow(['metric'] + ds_metrics)
writer2 = csv.writer(f2)
writer2.writerow(['metric'] + ds_metrics)
for budget in space_budgets:
for emb_metric in emb_metrics:
emb_results_acc = []
emb_results_robust = []
for ds_metric in ds_metrics:
seed_error = []
seed_diff = []
for seed in seeds:
error, max_diff = get_selection_error(emb_metric,
ds_metric, df, space_budget=budget, verbose=verbose,
seed=seed, same_space=same_space)
if verbose:
print(emb_metric, ds_metric, error, max_diff)
seed_error.append(error)
seed_diff.append(max_diff)
# take average and max over seed
emb_results_acc.append(np.mean(seed_error))
emb_results_robust.append(np.max(seed_diff))
writer1.writerow([emb_metric] + emb_results_acc)
writer2.writerow([emb_metric] + emb_results_robust)
def main():
args = parse_args()
# Read in pandas dataframe
df = pd.read_csv(args.csv_file)
compute_sel_results(df=df, acc_file=args.acc_file, rob_file=args.rob_file,
emb_metrics=args.emb_metrics, ds_metrics=args.ds_metrics,
same_space=args.same_space, verbose=args.verbose)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/selection_criterion.py |
"""
Save the high-precision, full-dimensional embeddings needed for the eigenspace instability measure
in advance for more efficient analysis.
"""
from anchor.embedding import Embedding
import argparse
import pickle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--algo', type=str, help='Embedding algorithm', required=True)
parser.add_argument('--n', type=int, help='Number to truncate', default=10000)
parser.add_argument('--embdir', type=str, help='Location to load embs and store anchors', default='runs/embs')
args = parser.parse_args()
seeds = [1234, 1235, 1236]
for seed in seeds:
if args.algo == 'mc':
path1 = f'{args.embdir}/wiki_2017/mc_wiki.en.txt_2017_seed_{seed}_dim_800_lr_0.2.50.txt_br_32'
path2 = f'{args.embdir}/wiki_2018/mc_wiki.en.txt_2018_seed_{seed}_dim_800_lr_0.2.50.txt_br_32_same_range'
elif args.algo == 'w2v_cbow':
path1 = f'{args.embdir}/wiki_2017/w2v_cbow_wiki.en.txt_2017_seed_{seed}_dim_800_lr_0.05.50.w.txt_br_32'
path2 = f'{args.embdir}/wiki_2018/w2v_cbow_wiki.en.txt_2018_seed_{seed}_dim_800_lr_0.05.50.w.txt_br_32_same_range'
else:
raise ValueError('Algorithm not supported')
emb1 = Embedding(path1)
emb2 = Embedding(path2)
print(f'Loaded {path1} and {path2}')
emb2_anchor, emb1_anchor, shared_vocab = emb2.get_subembeds_same_vocab(emb1, n=args.n, return_vocab=True)
filename = f'{args.embdir}/wiki_2017/{args.algo}_anchor_seed_{seed}_top_{args.n}.pkl'
with open(filename, 'wb') as f:
pickle.dump((emb2_anchor, emb1_anchor, shared_vocab), f)
print(f'Saved to {filename}')
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/create_anchors.py |
"""
Fits linear-log models to the instability v. memory, instability v. dimension,
and instability v. precision trends.
"""
import argparse
import pandas as pd
import numpy as np
import functools
import scipy.stats
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--csv-files', type=str, nargs='+', required=True,
help='CSV file (list) with pair results for emb metrics and DS results')
parser.add_argument('--tasks', type=str, nargs='+',
default=[
'la_sst_no_emb_norm',
'la_mr_no_emb_norm',
'la_subj_no_emb_norm',
'la_mpqa_no_emb_norm',
'rnn_no_crf_ner'],
help='List of downstream metrics')
parser.add_argument('--thresh', type=int, default=1000,
help='Maximum memory budget')
parser.add_argument('--dim', action='store_true',
help='Fit the trend with respect to dimension')
parser.add_argument('--prec', action='store_true',
help='Fit the trend with respect to precision')
return parser.parse_args()
def solve_lstsq_combine(dfs, thresh, tasks):
"""
Fit a single trend to the downstream stability v. memory results across
embedding algorithms and downstream tasks.
"""
ncombos = len(tasks) * len(dfs)
space_vals = np.log2(dfs[0].loc[dfs[0]['space'] < thresh]['space'].values)
num_vals = len(space_vals)
X = np.zeros((num_vals*len(tasks) * len(dfs), ncombos+1))
y = np.zeros((num_vals*len(tasks) * len(dfs)))
for i, df in enumerate(dfs):
# Only consider those results less than thresh space budget
df_subset = df.loc[df['space'] < thresh]
for j, t in enumerate(tasks):
idx = i*len(tasks) + j
y[idx*num_vals:(idx+1)*num_vals] = df_subset[t].values
# First column is the log2(m) results
X[idx*num_vals:(idx+1)*num_vals][:,0] = space_vals
# Append a 1-hot vector to learn a separate y-intercept per task
X[idx*num_vals:(idx+1)*num_vals][:,idx+1] = np.ones(num_vals)
# print(f'Dimensions of data matrix: {X.shape}')
return np.linalg.inv(X.T @ X) @ X.T @ y
def solve_lstsq_combine_prec(dfs, thresh, tasks, dims=[25, 50, 100, 200, 400, 800]):
"""
Fit a single trend to the downstream stability v. *precision* results across
embedding algorithms and downstream tasks and *dimensions*.
"""
ncombos = len(tasks) * len(dfs) * len(dims)
num_vals = len(dfs[0].loc[(dfs[0]['space'] < thresh)]['space'].values)
X = np.zeros((num_vals*len(tasks)*len(dfs), ncombos+1))
y = np.zeros((num_vals*len(tasks)*len(dfs)))
row_idx = 0
col_idx = 0
for i, df in enumerate(dfs):
for j, t in enumerate(tasks):
for k,dim in enumerate(dims):
df_subset = df.loc[(df['space'] < thresh) & (df['dim'] == dim)]
prec_vals = np.log2(df_subset['bitrate'].values)
# Number of values diffs by dimension
num_vals = len(prec_vals)
y[row_idx:row_idx+num_vals] = df_subset[t].values
X[row_idx:row_idx+num_vals][:,0] = prec_vals
X[row_idx:row_idx+num_vals][:,col_idx+1] = np.ones(num_vals)
row_idx += num_vals
# Learn a different y-intercept for each algo/task/dim combination
col_idx += 1
# print(f'Dimensions of data matrix: {X.shape}')
return np.linalg.inv(X.T @ X) @ X.T @ y
def solve_lstsq_combine_dim(dfs, thresh, tasks, bitrates=[1,2,4,8,16,32]):
"""
Fit a single trend to the downstream stability v. *dimension* results across
embedding algorithms and downstream tasks and *precisions*.
"""
ncombos = len(tasks) * len(dfs) * len(bitrates)
num_vals = len(dfs[0].loc[(dfs[0]['space'] < thresh)]['space'].values)
X = np.zeros((num_vals*len(tasks)*len(dfs), ncombos+1))
y = np.zeros((num_vals*len(tasks)*len(dfs)))
row_idx = 0
col_idx = 0
for i, df in enumerate(dfs):
for j, t in enumerate(tasks):
for k, bitrate in enumerate(bitrates):
df_subset = df.loc[(df['space'] < thresh) & (df['bitrate'] == bitrate)]
space_vals = np.log2(df_subset['dim'].values)
# Number of values diffs by precision
num_vals = len(space_vals)
y[row_idx:row_idx+num_vals] = df_subset[t].values
X[row_idx:row_idx+num_vals][:,0] = space_vals
X[row_idx:row_idx+num_vals][:,col_idx+1] = np.ones(num_vals)
row_idx += num_vals
# Learn a different y-intercept for each algo/task/prec combination
col_idx += 1
# print(f'Dimensions of data matrix: {X.shape}')
return np.linalg.inv(X.T @ X) @ X.T @ y
def main():
args = parse_args()
dfs = []
for file in args.csv_files:
dfs.append(pd.read_csv(file))
if args.dim:
print('Instability v. dimension trend')
m = solve_lstsq_combine_dim(dfs=dfs, thresh=args.thresh, tasks=args.tasks)
elif args.prec:
print('Instability v. precision trend')
m = solve_lstsq_combine_prec(dfs=dfs, thresh=args.thresh, tasks=args.tasks)
else:
print('Instability v. memory trend')
m = solve_lstsq_combine(dfs=dfs, thresh=args.thresh, tasks=args.tasks)
print(f'Slope: {m[0]}')
print(f'y-intercepts: {m[1:]}')
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/fit_trend.py |
"""
Utils file for analysis.
"""
import os
import subprocess
def check_ner_complete(modelpath1, modelpath2):
""" Checks that the NER task is complete by locating the final-model artifact. """
file1_final = f"{modelpath1}/final-model.pt"
file2_final = f"{modelpath2}/final-model.pt"
assert os.path.isfile(file1_final), file1_final
assert os.path.isfile(file2_final), file2_final
def check_sent_complete(modelpath1, modelpath2):
"""Checks that the logs for the sentence analysis task are complete."""
try:
ff = open(f'{modelpath1}.log', 'r')
dat = [_.strip() for _ in ff]
error1 = 1-float(dat[-2].strip().split(': ')[1])
except:
return False
try:
ff = open(f'{modelpath2}.log', 'r')
dat = [_.strip() for _ in ff]
error2 = 1-float(dat[-2].strip().split(': ')[1])
except:
return False
return True
def run_task(taskdir, taskfile, embpath):
results = subprocess.check_output(
["python", os.path.join(taskdir, "ws_eval.py"), "GLOVE", embpath, taskfile]
)
correlation = results.decode("utf8").strip().split(" ")[-1]
return float(correlation)
| anchor-stability-master | scripts/analysis/utils.py |
"""
Generates the csvs of all of the embedding and downstream results.
"""
import argparse
import pandas as pd
import numpy as np
# keep track of all the optimal parameters found from the grid search
optimal_wiki = {
('mc', 'la_sst_no_emb_norm'): 0.001,
('mc', 'la_subj_no_emb_norm'): 0.1,
('mc', 'la_mr_no_emb_norm'): 0.1,
('mc', 'la_mpqa_no_emb_norm'): 0.001,
('mc', 'rnn_no_crf_ner'): 1.0,
('w2v_cbow', 'la_sst_no_emb_norm'): 0.0001,
('w2v_cbow', 'la_subj_no_emb_norm'): 0.0001,
('w2v_cbow', 'la_mr_no_emb_norm'): 0.001,
('w2v_cbow', 'la_mpqa_no_emb_norm'): 0.001,
('w2v_cbow', 'rnn_no_crf_ner'): 0.1,
('mc', 'la_sst_no_emb_norm_val'): 0.001,
('mc', 'la_subj_no_emb_norm_val'): 0.1,
('mc', 'la_mr_no_emb_norm_val'): 0.1,
('mc', 'la_mpqa_no_emb_norm_val'): 0.001,
('mc', 'rnn_no_crf_ner_val'): 1.0,
('w2v_cbow', 'la_sst_no_emb_norm_val'): 0.0001,
('w2v_cbow', 'la_subj_no_emb_norm_val'): 0.0001,
('w2v_cbow', 'la_mr_no_emb_norm_val'): 0.001,
('w2v_cbow', 'la_mpqa_no_emb_norm_val'): 0.001,
('w2v_cbow', 'rnn_no_crf_ner_val'): 0.1,
('glove', 'la_sst_no_emb_norm'): 0.01,
('glove', 'la_subj_no_emb_norm'): 0.01,
('glove', 'la_mr_no_emb_norm'): 0.01,
('glove', 'la_mpqa_no_emb_norm'): 0.001,
('glove', 'rnn_no_crf_ner'): 1.0,
('ft_sg', 'la_sst_no_emb_norm'): 0.001,
('ft_sg', 'la_subj_no_emb_norm'): 100.0,
('ft_sg', 'la_mr_no_emb_norm'): 0.01,
('ft_sg', 'la_mpqa_no_emb_norm'): 0.01,
('ft_sg', 'rnn_no_crf_ner'): 1.0,
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--algo', type=str, help='Embedding algorithm', required=True)
parser.add_argument('--default', action='store_true', help='Use default hyperparameters for DS models')
parser.add_argument('--datadir', type=str, help='Data directory to read extracted dataframes', required=True)
parser.add_argument('--resultdir', type=str, help='Result directory to write csv of results', required=True)
parser.add_argument('--emb_metrics', type=str, nargs='+', required=True,
help='List of embedding metrics')
parser.add_argument('--ds_metrics', type=str, nargs='+', required=True,
help='List of downstream metrics')
return parser.parse_args()
def read(algo, dist, datadir):
df = pd.read_pickle(f"{datadir}/{algo}_{dist}_results.pkl")
return df
def gather_all_tasks(algo, optimal, emb_metrics, ds_metrics, dims, datadir):
total_metrics = ds_metrics + emb_metrics
merge_tags = ['space', 'bitrate', 'dim', 'seed', 'lr', 'compress_type', 'algo']
# merge over metrics
total_df = None
for m in total_metrics:
# concat dimensions
dfs = []
for d in dims:
if m in ds_metrics:
if optimal:
# hacky soln to avoid copying and pasting vals in dictionary
metric = m
if 'quality' in m:
metric = ''.join(m.split('_quality'))
full_metric = f'{m}_dim_{d}_uniform_lr_{optimal_wiki[(algo,metric)]}_wiki'
else:
full_metric = f'{m}_dim_{d}_uniform_lr_0.001_wiki'
else:
full_metric = f'{m}_dim_{d}_uniform_wiki'
single_df = read(algo, full_metric, datadir=datadir)
single_df['space'] = single_df['bitrate'] * single_df['dim']
dfs.append(single_df)
df = pd.concat(dfs)
# merge with other metrics
if total_df is None:
total_df = df
else:
total_df = total_df.merge(df, on=merge_tags)
return total_df
def main():
args = parse_args()
dims = [25, 50, 100, 200, 400, 800]
emb_metrics = args.emb_metrics
ds_metrics = args.ds_metrics
total_df = gather_all_tasks(algo=args.algo, optimal=not args.default,
emb_metrics=emb_metrics, ds_metrics=ds_metrics, dims=dims, datadir=args.datadir)
if not args.default:
total_df.to_csv(f'{args.resultdir}/{args.algo}_optimal_no_emb_norm_top_10000.csv', index=False)
else:
total_df.to_csv(f'{args.resultdir}/{args.algo}_no_emb_norm_top_10000.csv', index=False)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/gather_results.py |
"""
Computes the distance to the oracle when given a selection of pairs at
the same memory budget and must select the pair which attains the lowest
embedding distance measure.
"""
import pandas as pd
import argparse
import csv
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--csv-file', type=str, required=True,
help='CSV file with pair results for emb metrics and DS results')
parser.add_argument('--out-file', type=str, required=True,
help='File to write results to')
parser.add_argument('--emb_metrics', type=str, nargs='+', required=True,
help='List of embedding metrics')
parser.add_argument('--ds_metrics', type=str, nargs='+', required=True,
help='List of downstream metrics')
return parser.parse_args()
def compute_diff_to_oracle(df, summary_file, emb_metrics, ds_metrics):
"""
Compute the average difference to the oracle across space budgets and
seeds for each emb_metric and ds_metric combination and write to
the summary_file.
"""
space_vals = set(df['space'])
seeds = set(df['seed'])
space_vals = sorted(space_vals)
all_results = {}
for metric in emb_metrics:
metric_dict = {}
for space in space_vals:
# average over seeds
for seed in seeds:
vals = {}
# get rows for space
# need to match over seed as well
subset = df.loc[(df['space'] == space) & (df['seed'] == seed)]
# there is a vote to make (at least two things)
if len(subset.index) > 1:
for dist in ds_metrics:
oracle = subset.loc[[subset[(dist)].idxmin()]]
oracle_val = oracle[(dist)].values[0]
if 'baseline' not in metric:
# high means more stable
if 'knn' in metric or metric == 'eigen_overlap_top_10000':
predicted = subset.loc[[subset[(metric)].idxmax()]]
# low means more stable
else:
predicted = subset.loc[[subset[(metric)].idxmin()]]
else:
if 'fp' in metric:
# select highest bitrate
predicted = subset.loc[[subset['bitrate'].idxmax()]]
else:
# select lowest bitrate
predicted = subset.loc[[subset['bitrate'].idxmin()]]
predicted_val = predicted[(dist)].values[0]
diff = predicted_val-oracle_val
if dist in metric_dict:
metric_dict[dist].append(diff)
else:
metric_dict[dist] = [diff]
all_results[metric] = metric_dict
# write averages
with open(summary_file, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['metric'] + ds_metrics)
# write average values
for metric in emb_metrics:
writer.writerow([metric] + [str(np.mean(all_results[metric][ds_metric]))
for ds_metric in ds_metrics])
writer.writerow([])
# write max values
for metric in emb_metrics:
writer.writerow([metric] + [str(np.max(all_results[metric][ds_metric]))
for ds_metric in ds_metrics])
def main():
args = parse_args()
# Read in pandas dataframe
df = pd.read_csv(args.csv_file)
compute_diff_to_oracle(df=df, summary_file=args.out_file,
emb_metrics=args.emb_metrics, ds_metrics=args.ds_metrics)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/diff_to_oracle.py |
"""
Generates the Spearman correlations between the embedding and downstream measures.
"""
import argparse
import csv
import numpy as np
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--csv-file', type=str, required=True,
help='CSV file with pair results for emb metrics and DS results')
parser.add_argument('--out-file', type=str, required=True,
help='File to write results to')
parser.add_argument('--emb_metrics', type=str, nargs='+', required=True,
help='List of embedding metrics')
parser.add_argument('--ds_metrics', type=str, nargs='+', required=True,
help='List of downstream metrics')
return parser.parse_args()
def get_corr(df, emb_metrics, ds_metrics, summary_file):
"""
Writes the Spearman correlations for all emb_metrics and ds_metrics pairs
to summary_file.
"""
with open(summary_file, 'w') as f:
writer = csv.writer(f)
writer.writerow(['metric'] + ds_metrics)
for em in emb_metrics:
corr = []
for dm in ds_metrics:
# higher is more stable for these metrics, reverse to maintain consistent
# correlation value meaning
if 'knn' in em or em == 'eigen_overlap_top_10000':
emb_val = 1-df[em]
else:
emb_val = df[em]
# get spearman corr of column (aggregates over seeds and space budgets)
correlation = pd.Series.corr(emb_val, df[dm], method='spearman')
corr.append(correlation)
writer.writerow([em] + corr)
def main():
args = parse_args()
# Read in pandas dataframe
df = pd.read_csv(args.csv_file)
get_corr(df=df, summary_file=args.out_file,
emb_metrics=args.emb_metrics, ds_metrics=args.ds_metrics)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/analysis/get_correlation.py |
"""
Run the embedding compression using smallfry's implementation.
"""
from smallfry.compress import compress_uniform
from smallfry.utils import load_embeddings, save_embeddings
import argparse
import io
import numpy as np
import os
import sys
from anchor.embedding import Embedding
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--emb_path', type=str, required=True, help='Embedding to compress')
parser.add_argument('--base_emb_path', type=str, help='Base embedding to use for alignment and compression intervals')
parser.add_argument('--bitrate', type=int, default=1, required=True, help='Precision of embedding')
parser.add_argument('--out', type=str, required=True, help='Where to save compressed embedding')
parser.add_argument('--seed', type=int, required=True)
return parser.parse_args()
def main():
args = parse_args()
os.makedirs(os.path.dirname(args.out), exist_ok=True)
emb2 = Embedding(args.emb_path)
if args.base_emb_path is not None:
emb1 = Embedding(args.base_emb_path)
# align embeddings first
if args.base_emb_path is not None:
print('Aligning!')
emb2.align(emb1)
if args.base_emb_path is not None:
Xq, frob_squared_error, elapsed = compress_uniform(X=emb2.m, bit_rate=args.bitrate,
adaptive_range=True, X_0=emb1.m)
else:
Xq, frob_squared_error, elapsed = compress_uniform(X=emb2.m, bit_rate=args.bitrate,
adaptive_range=True)
print(frob_squared_error, elapsed)
print(Xq.shape)
# save compressed embedding
save_embeddings(path=args.out, embeds=Xq, wordlist=emb2.iw)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/embedding_generation/gen_compressed.py |
"""
Generate commands for compressing embeddings.
"""
import argparse
import pickle
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--resultdir1",
type=str,
required=True,
)
parser.add_argument(
"--resultdir2",
type=str,
required=True,
)
parser.add_argument('--algo',
type=str,
required=True,
help='Embedding algorithm')
parser.add_argument('--bitrate',
type=int,
help='Run specific bitrate')
return parser.parse_args()
def main():
args = parse_args()
algo = args.algo
bitrates = [1,2,4,8,16,32]
if args.bitrate:
bitrates = [args.bitrate]
seeds = range(1234, 1237)
dims = [25, 50, 100, 200, 400, 800]
lrs = {'mc': 0.2, 'w2v_cbow': 0.05, 'glove': 0.01}
for seed in seeds:
for dim in dims:
for bitrate in bitrates:
if algo != 'ft_sg':
lr = lrs[algo]
if algo == "w2v_cbow":
end_tag = "50.w.txt"
elif algo == 'mc':
end_tag = "50.txt"
elif algo == 'glove':
end_tag = "050.w.txt"
path1 = f"{args.resultdir1}/{algo}_wiki.en.txt_2017_seed_{seed}_dim_{dim}_lr_{lr}.{end_tag}"
path2 = f"{args.resultdir2}/{algo}_wiki.en.txt_2018_seed_{seed}_dim_{dim}_lr_{lr}.{end_tag}"
else:
path1 = f"{args.resultdir1}/ft_sg_wiki.en.txt_2017_seed_{seed}_dim_{dim}.vec"
path2 = f"{args.resultdir2}/ft_sg_wiki.en.txt_2018_seed_{seed}_dim_{dim}.vec"
# gen compressed 2017
print(f'python scripts/embedding_generation/gen_compressed.py --emb_path {path1} --out {path1}_br_{bitrate} --bitrate {bitrate} --seed {seed}')
# gen compressed 2018, align to 2017 embedding
print(f'python scripts/embedding_generation/gen_compressed.py --emb_path {path2} --base_emb_path {path1} --out {path2}_br_{bitrate}_same_range --bitrate {bitrate} --seed {seed}')
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/embedding_generation/compression_experiment_wiki.py |
"""
Downstream model training/prediction on top of an embedding; called from gen_model_cmds.
"""
import argparse
import logging
import glob
import numpy as np
import os
import random
import sys
import torch
from third_party.sentence_classification.train_classifier import train_sentiment
from third_party.flair.ner import train_ner, eval_ner
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--embed_path", type=str, required=True)
parser.add_argument("--data_path", type=str)
parser.add_argument("--seed", type=int, required=True)
parser.add_argument("--model", type=str)
parser.add_argument("--resultdir", type=str)
parser.add_argument("--dataset", type=str)
parser.add_argument("--task", type=str, required=True)
parser.add_argument("--no_normalize", action='store_true', help="Do not normalize embeddings")
parser.add_argument("--dim", type=int)
parser.add_argument("--predict", action='store_true')
parser.add_argument("--lr", type=float)
parser.add_argument("--crf", action="store_true", help="Use CRF for NER")
parser.add_argument("--finetune", action='store_true', help='Finetune embeddings')
parser.add_argument("--model_seed", type=int, help="Seed of the model")
parser.add_argument("--data_seed", type=int, help="Seed of the data")
return parser.parse_args()
def evaluate_ner(embed_path, resultdir, datadir, use_crf, lr):
train_ner(embed_path, resultdir, datadir, use_crf, lr)
def predict_ner(embed_path, resultdir, datadir, use_crf):
eval_ner(embed_path, resultdir, datadir, use_crf)
def evaluate_sentiment(
embed_path, data_path, result_dir, seed, model, dataset="sst", epochs=100, lr=0.001,
no_normalize=False, load_mdl=None, finetune=False, data_seed=None, model_seed=None
):
# use same seed if not provided
if data_seed is None:
data_seed = seed
if model_seed is None:
model_seed = seed
cmdlines = [
"--dataset",
dataset,
"--path",
data_path + "/",
"--embedding",
embed_path,
"--max_epoch",
str(epochs),
"--model_seed",
str(model_seed),
"--data_seed",
str(data_seed),
"--seed",
str(seed),
"--lr",
str(lr),
"--out",
str(result_dir)
]
if model == "la":
cmdlines += ["--la"]
elif model == "cnn":
cmdlines += ["--cnn"]
elif model == "lstm":
cmdlines += ["--lstm"]
if no_normalize:
cmdlines += ["--no_normalize"]
if load_mdl is not None:
cmdlines += ['--load_mdl', load_mdl, '--eval']
if finetune:
cmdlines += ['--finetune']
err_valid, err_test = train_sentiment(cmdlines)
def main():
args = parse_args()
# set seeds -- need to set again per app if otherwise changed to defaults in apps
seed = args.seed
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic=True
np.random.seed(seed)
random.seed(seed)
if args.resultdir is not None:
os.makedirs(args.resultdir, exist_ok=True)
if args.task == 'sentiment':
if not args.predict:
evaluate_sentiment(
embed_path=args.embed_path,
data_path=args.data_path,
seed=args.seed,
model=args.model,
result_dir=args.resultdir,
dataset=args.dataset,
no_normalize=args.no_normalize,
lr=args.lr,
finetune=args.finetune,
model_seed=args.model_seed,
data_seed=args.data_seed
)
else:
pretrained = glob.glob(f'{args.resultdir}/*ckpt')
assert len(pretrained) == 1, "More than one model available"
pretrained = pretrained[0]
print(pretrained)
evaluate_sentiment(
embed_path=args.embed_path,
data_path=args.data_path,
seed=args.seed,
model=args.model,
result_dir=args.resultdir,
dataset=args.dataset,
no_normalize=args.no_normalize,
lr=args.lr,
load_mdl=pretrained,
model_seed=args.model_seed,
data_seed=args.data_seed
)
elif args.task == 'ner':
if args.predict:
predict_ner(embed_path=args.embed_path,
resultdir=args.resultdir,
datadir=args.data_path,
use_crf=args.crf)
else:
evaluate_ner(embed_path=args.embed_path,
resultdir=args.resultdir,
datadir=args.data_path,
use_crf=args.crf,
lr=args.lr)
if __name__ == "__main__":
main()
| anchor-stability-master | scripts/model_training/train_downstream.py |
"""
Generate commands for model training for a list of embeddings.
"""
import argparse
import glob
import os
import sys
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--homedir', type=str, help='Root directory of code', required=True)
parser.add_argument('--python', type=str, help='Python version', required=True)
parser.add_argument('--resultdir', type=str, help='Directory to save results', required=True)
parser.add_argument('--dataset', type=str, help='Dataset for sentiment analysis')
parser.add_argument('--model', type=str, default='la', choices=['cnn', 'lstm', 'la'])
parser.add_argument('--task', type=str, choices=['sentiment', 'ner'], required=True)
parser.add_argument('--gpu', type=int, help='GPU id', default=0)
parser.add_argument('--predict', action='store_true')
parser.add_argument('--lr', type=float)
parser.add_argument('--crf', action='store_true', help='Use CRF for NER')
parser.add_argument('--finetune', action='store_true', help='Finetune embeddings')
parser.add_argument('--model_seed', type=int, help='Seed of the model')
parser.add_argument('--data_seed', type=int, help='Seed of the data')
parser.add_argument('--seed_test', action='store_true', help='Used for testing different model and data seeds')
return parser.parse_args()
def main():
args = parse_args()
resultdir = args.resultdir
homedir = args.homedir
# read in list of embeddings from stdin
embs = [line.strip() for line in sys.stdin.readlines()]
os.makedirs(resultdir, exist_ok=True)
if args.dataset == 'sst2':
dataset = 'sst'
else:
dataset = args.dataset
assert len(embs) > 0, 'Must provide embs with stdin'
for emb in embs:
terms = os.path.basename(emb).split('_')
seed = terms[terms.index('seed') + 1].split('.')[0]
dim = terms[terms.index('dim') + 1].split('.')[0]
if args.task == 'sentiment':
assert args.lr is not None, 'Must provide lr'
assert args.model is not None and args.dataset is not None, 'Must provide model and dataset for sentiment task!'
line = f'CUDA_VISIBLE_DEVICES={args.gpu} {args.python} {homedir}/scripts/model_training/train_downstream.py --dataset {dataset} --embed_path {emb} --data_path {homedir}/third_party/sentence_classification/data --seed {seed} --model {args.model} --task sentiment --lr {args.lr} --no_normalize'
resultpath = f'{resultdir}/{os.path.basename(emb)}/{args.dataset}'
if args.predict:
line += ' --predict'
if args.finetune:
line += ' --finetune'
if args.model_seed is not None:
line += f' --model_seed {args.model_seed}'
if args.data_seed is not None:
line += f' --data_seed {args.data_seed}'
if args.seed_test:
line += f' --model_seed {int(seed)+1000} --data_seed {int(seed)+1000}'
print(f'{line} --resultdir {resultpath}')
elif args.task == 'ner':
assert (args.lr is not None or args.predict), 'Must provide a lr for training'
if not args.crf:
line = f'{args.python} {homedir}/scripts/model_training/train_downstream.py --embed_path {emb} --data_path {homedir}/third_party/flair/resources/tasks --seed {seed} --resultdir {resultdir}/{os.path.basename(emb)}/ner_no_crf_lr_{args.lr} --task ner --lr {args.lr}'
else:
line = f'{args.python} {homedir}/scripts/model_training/train_downstream.py --embed_path {emb} --data_path {homedir}/third_party/flair/resources/tasks --seed {seed} --resultdir {resultdir}/{os.path.basename(emb)}/ner_crf_lr_{args.lr} --crf --task ner --lr {args.lr}'
if args.predict:
line += ' --predict'
print(line)
if __name__ == '__main__':
main()
| anchor-stability-master | scripts/model_training/gen_model_cmds.py |
"""
Embedding class with embedding distance measures.
modified from https://github.com/williamleif/histwords/blob/master/representations/embedding.py
"""
import copy
import io
import logging
import numpy as np
import os
from scipy import spatial
from scipy.linalg import orthogonal_procrustes
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger(__name__)
class Embedding:
"""
Embedding class to load embeddings and compute distances between embeddings.
"""
def __init__(
self, emb_path=None, vecs=None, vocab=None, header=False):
# Use lazy loading of the embedding if not provided
if (vecs is not None and vocab is None) or (vecs is None and vocab is not None):
raise ValueError("vecs and vocab must be provided together")
self.emb_path = emb_path
self._iw = vocab
self._wi = None
self._dim = None
self._m = None
if vecs is not None:
self._m = copy.deepcopy(vecs)
if self._iw is not None:
self._wi = {w: i for i, w in enumerate(self._iw)}
if self._m is not None:
self._dim = self._m.shape[1]
self.header = header # bool, whether there is a header in emb file
@property
def wi(self):
if self._wi is None:
# Load all embedding info
self._load()
return self._wi
@property
def iw(self):
if self._iw is None:
# Load all embedding info
self._load()
return self._iw
@property
def m(self):
if self._m is None:
# Load all embedding info
self._load()
return self._m
@property
def dim(self):
if self._dim is None:
# Load all embedding info
self._load()
return self._dim
def __getitem__(self, key):
if self.oov(key):
raise KeyError
else:
return self.represent(key)
def __iter__(self):
return self.iw.__iter__()
def __contains__(self, key):
return not self.oov(key)
def oov(self, w):
return not (w in self.wi)
def represent(self, w):
if w in self.wi:
return self.m[self.wi[w], :]
return np.zeros(self.dim)
def _load(self):
"""Load embeddings from a text file."""
logger.info("Loading embedding...")
f = open(self.emb_path, "r")
dat = [_.rstrip() for _ in f]
# Ignore the header and incomplete rows
if len(dat[0].split()) == 2:
dat = dat[1:]
# ignore word
self._dim = len(dat[0].split()) - 1
self._m = np.zeros((len(dat), self._dim))
vocab = []
cnt = 0
for i, _ in enumerate(dat):
d = _.split(' ')
if len(d) != self._dim + 1:
cnt += 1
w = ' '.join(d[:-self._dim])
self._m[i] = d[-self._dim:]
vocab.append(w)
self._wi, self._iw = dict([(a, i) for i, a in enumerate(vocab)]), vocab
if cnt > 0:
logger.debug(f"Found {cnt} empty word(s)")
f.close()
def save(self, path=None):
"""
Save embeddings to a text file. If path is None, then overwrites the embeddings at the original
path used to load the embeddings.
Uses code from: https://github.com/facebookresearch/fastText/blob/99f23802d4572ba50417b062137fbd20aa03a794/alignment/utils.py
"""
if path is None:
path = self.emb_path
assert path is not None
# Write current vecs to file
logger.info(f"Writing embedding to {path}")
if os.path.exists(path):
logger.warning(f"Overwriting existing embedding file: {path}")
n = len(self._iw)
fout = io.open(path, "w", encoding="utf-8")
if self.header:
fout.write("%d %d\n" % (n, self.dim))
for i in range(n):
fout.write(
self._iw[i]
+ " "
+ " ".join(map(lambda a: "%.6f" % a, self._m[i, :]))
+ "\n"
)
fout.close()
self.emb_path = path
def align(self, reference):
"""
Aligns the embedding to the reference Embedding object in-place via orthogonal Procrustes.
:param reference: Embedding object to align current instance to. Computes the rotation matrix
based on the shared vocabulary between the current instance and reference object.
"""
if not isinstance(reference, Embedding):
raise ValueError("Argument must be an embedding")
shared_words = list(set(reference.iw) & set(self.iw))
num_shared = len(shared_words)
logger.info(f"{num_shared} words are shared with the reference matrix.")
sub_emb1 = self.get_subembed(shared_words)
sub_emb2 = reference.get_subembed(shared_words)
R, _ = orthogonal_procrustes(sub_emb1, sub_emb2)
# Rotate entire matrix, not just new words
self._m = np.dot(self._m, R)
def normalize(self):
"""Row normalizes the embedding in-place"""
norm = np.sqrt(np.sum(self.m * self.m, axis=1))
norm[norm == 0.0] = 1.0
self._m = self.m / norm[:, np.newaxis]
def get_subembed(self, word_list):
"""
Extracts the sub-embedding for a given vocabulary.
:param word_list: list of words to extract word vectors for. Word vectors will
be returned matching the order of the words in this list.
:return: numpy array of sub-embedding
"""
assert isinstance(
word_list, list
), "Must be list to use subembed for consistent orderings"
keep_indices = [self.wi[word] for word in word_list]
return self.m[keep_indices, :]
def get_subembeds_same_vocab(self, other, n=-1, random=False, return_vocab=False):
"""
Extracts the sub-embeddings for the current instance and other Embedding object for
distance computation.
:param other: other Embedding object to compare
:param n (optional): int, number of words to sample from the current instance to form the
sub-embeddings. Samples from the shared list of words in the current instance and the other
Embedding object. Takes the top 'n' most frequent words from the current instance (assumes
current instance is sorted by frequency) if 'random' is not True.
:param random: (optional): Randomly sample the 'n' words from the full shared vocabulary
rather than taking the top 'n' words sorted by frequency.
:param return_vocab (optional): bool , return the vocabulary used to generate the sub-embeddings
:return: numpy arrays of embeddings for current instance and other Embedding object, and optionally,
the vocabulary used to generate the sub-embeddings
"""
shared_vocab = list(set(self.iw) & set(other.iw))
num_shared = len(shared_vocab)
logger.info(f"{num_shared} words are shared between the embeddings.")
if n > 0:
if not random:
# sort words by original order in self
# assuming this is by frequency, then the most frequent words are first
vocab_new = {word: self.wi[word] for word in shared_vocab}
sorted_shared_vocab , _ = zip(*sorted(vocab_new.items(), key=lambda kv: kv[1]))
shared_vocab = list(sorted_shared_vocab)[:n]
else:
print('Randomly sampling vocab')
# choose n random words
shared_vocab = [shared_vocab[i] for i in np.random.choice(len(shared_vocab), n, replace=False)]
emb1 = self.get_subembed(shared_vocab)
emb2 = other.get_subembed(shared_vocab)
if return_vocab:
return emb1, emb2, shared_vocab
return emb1, emb2
def _eigen_overlap(self, X1, X2):
"""
Helper function to implement the eigenspace overlap score as described in May et al., NeurIPS, 2019.
The values will range from 0 to 1, where 1 is more stable.
:param X1: numpy array of the first embedding matrix
:param X2: numpy array of the second embedding matrix, must have the same shape
and corresponding vocabulary order as X1.
:return: float distance
"""
# X1 and X2 are n x d where n is the dataset size, d is the feature dimensionality
assert X1.shape[0] == X2.shape[0]
U1, _, _ = np.linalg.svd(X1, full_matrices=False)
U2, _, _ = np.linalg.svd(X2, full_matrices=False)
normalizer = max(X1.shape[1], X2.shape[1])
return np.linalg.norm(np.matmul(U1.T, U2), ord="fro") ** 2 / normalizer
def _weighted_eigen_overlap(self, X1, X2, exp=1, normalize=True):
"""
Helper function to implement a weighted variant of the eigenspace overlap score
from May et al., NeurIPS, 2019. If normalized, the values will range from 0 to 1.
:param X1: numpy array of the first embedding matrix
:param X2: numpy array of the second embedding matrix, must have the same shape
and corresponding vocabulary order as X1.
:param exp: int, scales the eigenvalues by this factor to weight their importance
:param normalize: bool, whether to normalize this measure , needed to compare across dimensions
:return: float distance
"""
# X1 and X2 are n x d where n is the dataset size, d is the feature dimensionality
assert X1.shape[0] == X2.shape[0]
U1, S1, _ = np.linalg.svd(X1, full_matrices=False)
U2, S2, _ = np.linalg.svd(X2, full_matrices=False)
if normalize:
normalizer = np.sum(np.diag(S2**(exp*2)))
return np.linalg.norm(np.matmul(np.matmul(U1.T, U2), np.diag(S2**exp)), ord="fro")**2 / normalizer
return np.linalg.norm(np.matmul(np.matmul(U1.T, U2), np.diag(S2**exp)), ord="fro")**2
def eigen_overlap(self, other, weighted=False, exp=1, normalize=True, n=-1):
"""
Computes the eigenspace overlap score between the current instance and another Embedding.
:param other: other Embedding object to compare
:param weighted (optional): bool, whether to use the weight variant of this measure (weights by singular values)
:param exp (optional): int, scalar hyperparameter for the weighted variant
:param normalize (optional): bool, whether to normalize the weighted variant, needed to compare across dimensions
:param n: (optional) int value representing the number of words to use to compute the distance,
where the words are assumed to be sorted by frequency in curr_anchor.
:return: float distance
"""
emb1, emb2 = self.get_subembeds_same_vocab(other, n=n)
if not weighted:
norm = self._eigen_overlap(emb1, emb2)
else:
norm = self._weighted_eigen_overlap(emb1, emb2, exp=exp, normalize=normalize)
return norm
def eis(self, other, curr_anchor, other_anchor, vocab=None, exp=3, n=-1):
"""
Computes the eigenspace instability measure between the current instance and another Embedding.
See details in Appendix B.1 of the MLSys 2020 paper "Understanding the Downstream Instability
of Word Embeddings" for the derivation of this implementation.
:param other: other Embedding object to compare
:param curr_anchor: 'anchor' embedding corresponding to current instance to compute distance
relative to, Embedding object or numpy array. Corresponds to E in the EIS equation.
:param other_anchor: 'anchor' embedding corresponding to other instance to compute distance
relative to, Embedding object or numpy array. Corresponds to E_tilde in the EIS equation.
:param vocab: (optional, required if curr_anchor is a numpy array) list of words to use to
compute the measure, if not provided computed from curr_anchor for the top 'n' most frequent
words
:param exp: (optional) int value to scale eigenvalues by for weighting measure.
Corresponds to alpha in the EIS equation.
:param n: (optional) int value representing the number of words to use to compute the distance,
where the words are assumed to be sorted by frequency in curr_anchor.
:return: float value of the distance
"""
if vocab is None:
# get vocab from anchor embs
curr_anchor, other_anchor, vocab = curr_anchor.get_subembeds_same_vocab(other_anchor, n=n, return_vocab=True)
curr_emb = self.get_subembed(vocab)
other_emb = other.get_subembed(vocab)
V1, R1, _ = np.linalg.svd(curr_anchor, full_matrices=False)
V2, R2, _ = np.linalg.svd(other_anchor, full_matrices=False)
U1, _, _ = np.linalg.svd(curr_emb, full_matrices=False)
U2, _, _ = np.linalg.svd(other_emb, full_matrices=False)
R1_a = np.diag(R1**exp)
R2_a = np.diag(R2**exp)
t1 = np.linalg.norm(np.matmul(np.matmul(U1.T, V1), R1_a), ord='fro')**2
t2 = np.linalg.norm(np.matmul(np.matmul(U2.T, V1), R1_a), ord='fro')**2
t3_1 = np.matmul(R1_a, np.matmul(V1.T, U2))
t3_2 = np.matmul(np.matmul(U2.T, U1), np.matmul(U1.T, V1))
t3 = np.trace(np.matmul(np.matmul(t3_1, t3_2), R1_a))
t4 = np.linalg.norm(np.matmul(np.matmul(U1.T, V2), R2_a), ord='fro')**2
t5 = np.linalg.norm(np.matmul(np.matmul(U2.T, V2), R2_a), ord='fro')**2
t6_1 = np.matmul(R2_a, np.matmul(V2.T, U2))
t6_2 = np.matmul(np.matmul(U2.T, U1), np.matmul(U1.T, V2))
t6 = np.trace(np.matmul(np.matmul(t6_1, t6_2), R2_a))
normalizer = np.trace(np.diag(R1**(exp*2))) + np.trace(np.diag(R2**(exp*2)))
return (t1 + t2 - 2*t3 + t4 + t5 - 2*t6) / normalizer
def sem_disp(self, other, average=True, align=True, n=-1):
"""
Computes the semantic displacement measure between the current instance and another Embedding.
:param other: other Embedding object to compare
:param average: (optional) average over the cosine distance between 'n' word vectors from
two embeddings.
:param align: (optional) bool value, use orthogonal Procrustes to align the current instance
to the other Embedding prior to computing the measure.
:param n: (optional) int value representing the number of words to use to compute the distance,
where the words are assumed to be sorted by frequency in the current instance.
:return: float distance
"""
if not isinstance(other, Embedding):
raise ValueError("Only Embedding objects supported.")
emb1, emb2 = self.get_subembeds_same_vocab(other, n=n)
if align:
R, _ = orthogonal_procrustes(emb1, emb2)
emb1 = np.dot(emb1, R)
rcos_dist = np.array(
[spatial.distance.cosine(emb1[k], emb2[k]) for k in range(emb2.shape[0])]
)
if average:
rcos_dist = np.nanmean(rcos_dist, axis=0)
return rcos_dist
def fro_norm(self, other, n=-1):
"""
Computes the Frobenius norm between the current instance and another Embedding.
:param other: other Embedding object to compare
:param n: (optional) int value representing the number of words to use to compute the distance,
where the words are assumed to be sorted by frequency in the current instance.
:return: float distance
"""
if not isinstance(other, Embedding):
raise ValueError("Only Embedding objects supported.")
emb1, emb2 = self.get_subembeds_same_vocab(other, n=n)
return float(np.linalg.norm(emb1 - emb2, ord='fro'))
def pip_loss(self, other, n=10000, random=False):
"""
Computes the PIP loss between the current instance and another Embedding as described
in Yin et al., NeurIPS, 2018.
:param other: other Embedding object to compare
:param n: (optional) int value representing the number of words to use to compute the distance,
where the words are assumed to be sorted by frequency in the current instance.
This measure scales with O(n^2), so it may be slow if 'n' is large.
:param random: (optional): randomly sample the 'n' words from the full vocabulary
rather than taking the top 'n' words sorted by frequency.
:return: float distance
"""
assert n > 0, "Truncation required for pip loss"
X, Xq = self.get_subembeds_same_vocab(other, n=n, random=random)
K = X @ X.T
Kq = Xq @ Xq.T
return float(np.linalg.norm(K - Kq))
def knn(self, other, n=10000, nquery=1000, nneighbors=5):
"""
Computes the k-nearest neighbors measure between the current instance and another Embedding.
Based on that described in Wendlandt et al., NAACL-HLT, 2018
:param other: other Embedding object to compare
:param n (optional): int, number of words to consider for query word selection and
for ranking nearest neighbors. Words are assumed to be sorted by frequency in the current instance,
and we take the 'n' most frequent words from the current instance to form sub-embeddings
for distance computation.
:param nquery (optional): int, number of words to query from the list of 'n' words. The nearest
neighbors of each query word are compared between the current instance and the other Embedding.
:param nneighbors (optional): int, number of nearest neighbors to compare for each query word
:return: float distance
"""
np.random.seed(1234)
assert n > 0 and nquery > 0 and nneighbors > 0, "N, nquery, nneighbors must be > 0"
emb1, emb2 = self.get_subembeds_same_vocab(other, n=n)
# randomly sample queries from n
rand_indices = np.random.choice(n, nquery, replace=False)
query1 = emb1[rand_indices]
query2 = emb2[rand_indices]
neigh1 = NearestNeighbors(nneighbors+1, metric='cosine')
neigh1.fit(emb1)
_, neighbors1 = neigh1.kneighbors(X=query1)
neigh2 = NearestNeighbors(nneighbors+1, metric='cosine')
neigh2.fit(emb2)
_, neighbors2 = neigh2.kneighbors(X=query2)
def _filter_nn(neighbors):
actual_neighbors = np.zeros((len(neighbors), nneighbors))
for i, nn in enumerate(neighbors):
nn = np.array(nn)
# Delete query itself from neighbors
try:
actual_neighbors[i] = np.delete(nn, np.where(nn == rand_indices[i]))
# Cut last neighbor if query not in list (for example all distances are small)
except:
actual_neighbors[i] = nn[:-1]
return actual_neighbors
neighbors1 = _filter_nn(neighbors1)
neighbors2 = _filter_nn(neighbors2)
assert neighbors1.shape[1] == nneighbors and neighbors2.shape[1] == nneighbors, 'Dimensions not correct for nearest neighbors'
count = 0.
for n1, n2 in zip(neighbors1, neighbors2):
count += len(set(n1).intersection(n2)) / len(n1)
return count / len(neighbors1)
| anchor-stability-master | anchor/embedding.py |
anchor-stability-master | anchor/__init__.py |
|
"""
Utils file.
"""
import logging
import time
from datetime import timedelta
def load_vocab_list(vocab_file):
vocab = []
fin = open(vocab_file, "r", encoding="utf-8")
for line in fin:
try:
w, _ = line.rstrip().split(' ')
vocab.append(w)
except:
print(line)
return vocab
def load_vocab(vocab_file):
vocab = {}
fin = open(vocab_file, "r", encoding="utf-8")
for line in fin:
w, count = line.rstrip().split(' ')
vocab[w] = int(count)
return vocab
# https://github.com/facebookresearch/DME
class LogFormatter(object):
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime("%x %X"),
timedelta(seconds=elapsed_seconds),
)
message = record.getMessage()
message = message.replace("\n", "\n" + " " * (len(prefix) + 3))
return "%s - %s" % (prefix, message)
# https://github.com/facebookresearch/DME
def create_logger(filepath):
"""
Create a logger.
"""
# create log formatter
log_formatter = LogFormatter()
# create file handler and set level to debug
file_handler = logging.FileHandler(filepath, "a")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(file_handler)
| anchor-stability-master | anchor/utils.py |
import matplotlib
import matplotlib.pyplot as plt
import pandas
markers = ['s', 'v', '^', '<', '>', 'o']
def plt_single(df, vals, val_tag, xtag, dist, ylog=False, ylabel='', xlabel='', title='', val_tag_label='', legend=False, color='C4', marker='s', line_label=None):
if val_tag_label == '':
val_tag_label=val_tag
for i, val in enumerate(vals):
df_sub = df.loc[df[val_tag] == val]
xval = df_sub[xtag]
yval = df_sub[(dist, 'mean')]
if ('overlap' in dist and 'sym' not in dist and 'anchor' not in dist) or 'knn' in dist:
yval = 1 - yval
yerr = df_sub[(dist, 'std')]
if len(vals) == 1:
if line_label is None:
line_label = f'{val_tag_label}={val}'
plt.errorbar(xval, yval, yerr, label=line_label, capsize=5, marker=marker, linestyle='--', color=color)
else:
plt.errorbar(xval, yval, yerr, label=f'{val_tag_label}={val}', capsize=5, marker=markers[i], linestyle='--')
if xtag == 'dim':
plt.xscale('log')
plt.minorticks_off()
plt.xticks([25, 50, 100, 200, 400, 800])
plt.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
elif xtag == 'bitrate':
plt.xscale('log', basex=2)
plt.minorticks_off()
plt.xticks([1, 2, 4, 8, 16, 32])
plt.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
elif xtag == 'space':
plt.xscale('log')
plt.title(title)
if legend:
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if ylog:
plt.yscale('log')
def plt_correlations(df, values, val_tag, metrics, xmetric, ylog=False, ylabel='', title='', xlabel='', xlog=False, legend=False):
if len(metrics) > 1:
fig = plt.figure(figsize=(20,30))
plt.subplots_adjust(wspace=0.2, hspace=0.2)
num = 321
for dist in metrics:
if len(metrics) > 1:
plt.subplot(num)
for i,val in enumerate(values):
df_sub = df.loc[df[val_tag] == val]
xval = df_sub[(xmetric, 'mean')]
yval = df_sub[(dist, 'mean')]
yerr = df_sub[(dist, 'std')]
if ('overlap' in xmetric and 'sym' not in xmetric and 'anchor' not in xmetric) or 'knn' in xmetric:
xval = 1 - xval
plt.errorbar(xval, yval, yerr, label=f'b={val}', capsize=5, marker=markers[i], linestyle='--')
if xlog:
plt.xscale('log')
plt.minorticks_off()
plt.title(title)
if legend:
plt.legend(ncol=2)
if xlabel == '':
xlabel = xmetric
plt.xlabel(xlabel)
plt.ylabel('% Disagreement')
num += 1
if ylog:
plt.yscale('log')
# load csv results
def plt_csv(xlabel, filepath):
df = pandas.read_csv(filepath)
plt.errorbar(df['Disagreement|x'], df['Disagreement|y'], df['Disagreement|y_std'], capsize=5, marker='s', linestyle='--', color='C4')
if xlabel == 'Precision':
plt.xscale('log', basex=2)
plt.minorticks_off()
plt.xticks([1, 2, 4, 8, 16, 32])
plt.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
else:
plt.xscale('log')
plt.minorticks_off()
plt.xticks([192, 384, 768, 1536, 3072])
plt.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xlabel(xlabel)
plt.ylabel('% Disagreement')
| anchor-stability-master | notebooks/plot_utils.py |
"""Train Knowledge Graph embeddings for link prediction."""
import argparse
import json
import logging
import os
import torch
import torch.optim
import models
import optimizers.regularizers as regularizers
from datasets.kg_dataset import KGDataset
from models import all_models
from optimizers.kg_optimizer import KGOptimizer
from utils.train import get_savedir, avg_both, format_metrics, count_params
parser = argparse.ArgumentParser(
description="Knowledge Graph Embedding"
)
parser.add_argument(
"--dataset", default="WN18RR", choices=["FB15K", "WN", "WN18RR", "FB237", "YAGO3-10"],
help="Knowledge Graph dataset"
)
parser.add_argument(
"--model", default="RotE", choices=all_models, help="Knowledge Graph embedding model"
)
parser.add_argument(
"--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer"
)
parser.add_argument(
"--reg", default=0, type=float, help="Regularization weight"
)
parser.add_argument(
"--optimizer", choices=["Adagrad", "Adam", "SparseAdam"], default="Adagrad",
help="Optimizer"
)
parser.add_argument(
"--max_epochs", default=50, type=int, help="Maximum number of epochs to train for"
)
parser.add_argument(
"--patience", default=10, type=int, help="Number of epochs before early stopping"
)
parser.add_argument(
"--valid", default=3, type=float, help="Number of epochs before validation"
)
parser.add_argument(
"--rank", default=1000, type=int, help="Embedding dimension"
)
parser.add_argument(
"--batch_size", default=1000, type=int, help="Batch size"
)
parser.add_argument(
"--neg_sample_size", default=50, type=int, help="Negative sample size, -1 to not use negative sampling"
)
parser.add_argument(
"--dropout", default=0, type=float, help="Dropout rate"
)
parser.add_argument(
"--init_size", default=1e-3, type=float, help="Initial embeddings' scale"
)
parser.add_argument(
"--learning_rate", default=1e-1, type=float, help="Learning rate"
)
parser.add_argument(
"--gamma", default=0, type=float, help="Margin for distance-based losses"
)
parser.add_argument(
"--bias", default="constant", type=str, choices=["constant", "learn", "none"], help="Bias type (none for no bias)"
)
parser.add_argument(
"--dtype", default="double", type=str, choices=["single", "double"], help="Machine precision"
)
parser.add_argument(
"--double_neg", action="store_true",
help="Whether to negative sample both head and tail entities"
)
parser.add_argument(
"--debug", action="store_true",
help="Only use 1000 examples for debugging"
)
parser.add_argument(
"--multi_c", action="store_true", help="Multiple curvatures per relation"
)
def train(args):
save_dir = get_savedir(args.model, args.dataset)
# file logger
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
filename=os.path.join(save_dir, "train.log")
)
# stdout logger
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
logging.info("Saving logs in: {}".format(save_dir))
# create dataset
dataset_path = os.path.join(os.environ["DATA_PATH"], args.dataset)
dataset = KGDataset(dataset_path, args.debug)
args.sizes = dataset.get_shape()
# load data
logging.info("\t " + str(dataset.get_shape()))
train_examples = dataset.get_examples("train")
valid_examples = dataset.get_examples("valid")
test_examples = dataset.get_examples("test")
filters = dataset.get_filters()
# save config
with open(os.path.join(save_dir, "config.json"), "w") as fjson:
json.dump(vars(args), fjson)
# create model
model = getattr(models, args.model)(args)
total = count_params(model)
logging.info("Total number of parameters {}".format(total))
device = "cuda"
model.to(device)
# get optimizer
regularizer = getattr(regularizers, args.regularizer)(args.reg)
optim_method = getattr(torch.optim, args.optimizer)(model.parameters(), lr=args.learning_rate)
optimizer = KGOptimizer(model, regularizer, optim_method, args.batch_size, args.neg_sample_size,
bool(args.double_neg))
counter = 0
best_mrr = None
best_epoch = None
logging.info("\t Start training")
for step in range(args.max_epochs):
# Train step
model.train()
train_loss = optimizer.epoch(train_examples)
logging.info("\t Epoch {} | average train loss: {:.4f}".format(step, train_loss))
# Valid step
model.eval()
valid_loss = optimizer.calculate_valid_loss(valid_examples)
logging.info("\t Epoch {} | average valid loss: {:.4f}".format(step, valid_loss))
if (step + 1) % args.valid == 0:
valid_metrics = avg_both(*model.compute_metrics(valid_examples, filters))
logging.info(format_metrics(valid_metrics, split="valid"))
valid_mrr = valid_metrics["MRR"]
if not best_mrr or valid_mrr > best_mrr:
best_mrr = valid_mrr
counter = 0
best_epoch = step
logging.info("\t Saving model at epoch {} in {}".format(step, save_dir))
torch.save(model.cpu().state_dict(), os.path.join(save_dir, "model.pt"))
model.cuda()
else:
counter += 1
if counter == args.patience:
logging.info("\t Early stopping")
break
elif counter == args.patience // 2:
pass
# logging.info("\t Reducing learning rate")
# optimizer.reduce_lr()
logging.info("\t Optimization finished")
if not best_mrr:
torch.save(model.cpu().state_dict(), os.path.join(save_dir, "model.pt"))
else:
logging.info("\t Loading best model saved at epoch {}".format(best_epoch))
model.load_state_dict(torch.load(os.path.join(save_dir, "model.pt")))
model.cuda()
model.eval()
# Validation metrics
valid_metrics = avg_both(*model.compute_metrics(valid_examples, filters))
logging.info(format_metrics(valid_metrics, split="valid"))
# Test metrics
test_metrics = avg_both(*model.compute_metrics(test_examples, filters))
logging.info(format_metrics(test_metrics, split="test"))
if __name__ == "__main__":
train(parser.parse_args())
| KGEmb-master | run.py |
"""Evaluation script."""
import argparse
import json
import os
import torch
import models
from datasets.kg_dataset import KGDataset
from utils.train import avg_both, format_metrics
parser = argparse.ArgumentParser(description="Test")
parser.add_argument(
'--model_dir',
help="Model path"
)
def test(model_dir):
# load config
with open(os.path.join(model_dir, "config.json"), "r") as f:
config = json.load(f)
args = argparse.Namespace(**config)
# create dataset
dataset_path = os.path.join(os.environ["DATA_PATH"], args.dataset)
dataset = KGDataset(dataset_path, False)
test_examples = dataset.get_examples("test")
filters = dataset.get_filters()
# load pretrained model weights
model = getattr(models, args.model)(args)
device = 'cuda'
model.to(device)
model.load_state_dict(torch.load(os.path.join(model_dir, 'model.pt')))
# eval
test_metrics = avg_both(*model.compute_metrics(test_examples, filters))
return test_metrics
if __name__ == "__main__":
args = parser.parse_args()
test_metrics = test(args.model_dir)
print(format_metrics(test_metrics, split='test'))
| KGEmb-master | test.py |
"""Dataset class for loading and processing KG datasets."""
import os
import pickle as pkl
import numpy as np
import torch
class KGDataset(object):
"""Knowledge Graph dataset class."""
def __init__(self, data_path, debug):
"""Creates KG dataset object for data loading.
Args:
data_path: Path to directory containing train/valid/test pickle files produced by process.py
debug: boolean indicating whether to use debug mode or not
if true, the dataset will only contain 1000 examples for debugging.
"""
self.data_path = data_path
self.debug = debug
self.data = {}
for split in ["train", "test", "valid"]:
file_path = os.path.join(self.data_path, split + ".pickle")
with open(file_path, "rb") as in_file:
self.data[split] = pkl.load(in_file)
filters_file = open(os.path.join(self.data_path, "to_skip.pickle"), "rb")
self.to_skip = pkl.load(filters_file)
filters_file.close()
max_axis = np.max(self.data["train"], axis=0)
self.n_entities = int(max(max_axis[0], max_axis[2]) + 1)
self.n_predicates = int(max_axis[1] + 1) * 2
def get_examples(self, split, rel_idx=-1):
"""Get examples in a split.
Args:
split: String indicating the split to use (train/valid/test)
rel_idx: integer for relation index to keep (-1 to keep all relation)
Returns:
examples: torch.LongTensor containing KG triples in a split
"""
examples = self.data[split]
if split == "train":
copy = np.copy(examples)
tmp = np.copy(copy[:, 0])
copy[:, 0] = copy[:, 2]
copy[:, 2] = tmp
copy[:, 1] += self.n_predicates // 2
examples = np.vstack((examples, copy))
if rel_idx >= 0:
examples = examples[examples[:, 1] == rel_idx]
if self.debug:
examples = examples[:1000]
return torch.from_numpy(examples.astype("int64"))
def get_filters(self, ):
"""Return filter dict to compute ranking metrics in the filtered setting."""
return self.to_skip
def get_shape(self):
"""Returns KG dataset shape."""
return self.n_entities, self.n_predicates, self.n_entities
| KGEmb-master | datasets/kg_dataset.py |
KGEmb-master | datasets/__init__.py |
|
"""Knowledge Graph dataset pre-processing functions."""
import collections
import os
import pickle
import numpy as np
def get_idx(path):
"""Map entities and relations to unique ids.
Args:
path: path to directory with raw dataset files (tab-separated train/valid/test triples)
Returns:
ent2idx: Dictionary mapping raw entities to unique ids
rel2idx: Dictionary mapping raw relations to unique ids
"""
entities, relations = set(), set()
for split in ["train", "valid", "test"]:
with open(os.path.join(path, split), "r") as lines:
for line in lines:
lhs, rel, rhs = line.strip().split("\t")
entities.add(lhs)
entities.add(rhs)
relations.add(rel)
ent2idx = {x: i for (i, x) in enumerate(sorted(entities))}
rel2idx = {x: i for (i, x) in enumerate(sorted(relations))}
return ent2idx, rel2idx
def to_np_array(dataset_file, ent2idx, rel2idx):
"""Map raw dataset file to numpy array with unique ids.
Args:
dataset_file: Path to file containing raw triples in a split
ent2idx: Dictionary mapping raw entities to unique ids
rel2idx: Dictionary mapping raw relations to unique ids
Returns:
Numpy array of size n_examples x 3 mapping the raw dataset file to ids
"""
examples = []
with open(dataset_file, "r") as lines:
for line in lines:
lhs, rel, rhs = line.strip().split("\t")
try:
examples.append([ent2idx[lhs], rel2idx[rel], ent2idx[rhs]])
except ValueError:
continue
return np.array(examples).astype("int64")
def get_filters(examples, n_relations):
"""Create filtering lists for evaluation.
Args:
examples: Numpy array of size n_examples x 3 containing KG triples
n_relations: Int indicating the total number of relations in the KG
Returns:
lhs_final: Dictionary mapping queries (entity, relation) to filtered entities for left-hand-side prediction
rhs_final: Dictionary mapping queries (entity, relation) to filtered entities for right-hand-side prediction
"""
lhs_filters = collections.defaultdict(set)
rhs_filters = collections.defaultdict(set)
for lhs, rel, rhs in examples:
rhs_filters[(lhs, rel)].add(rhs)
lhs_filters[(rhs, rel + n_relations)].add(lhs)
lhs_final = {}
rhs_final = {}
for k, v in lhs_filters.items():
lhs_final[k] = sorted(list(v))
for k, v in rhs_filters.items():
rhs_final[k] = sorted(list(v))
return lhs_final, rhs_final
def process_dataset(path):
"""Map entities and relations to ids and saves corresponding pickle arrays.
Args:
path: Path to dataset directory
Returns:
examples: Dictionary mapping splits to with Numpy array containing corresponding KG triples.
filters: Dictionary containing filters for lhs and rhs predictions.
"""
ent2idx, rel2idx = get_idx(dataset_path)
examples = {}
splits = ["train", "valid", "test"]
for split in splits:
dataset_file = os.path.join(path, split)
examples[split] = to_np_array(dataset_file, ent2idx, rel2idx)
all_examples = np.concatenate([examples[split] for split in splits], axis=0)
lhs_skip, rhs_skip = get_filters(all_examples, len(rel2idx))
filters = {"lhs": lhs_skip, "rhs": rhs_skip}
return examples, filters
if __name__ == "__main__":
data_path = os.environ["DATA_PATH"]
for dataset_name in os.listdir(data_path):
dataset_path = os.path.join(data_path, dataset_name)
dataset_examples, dataset_filters = process_dataset(dataset_path)
for dataset_split in ["train", "valid", "test"]:
save_path = os.path.join(dataset_path, dataset_split + ".pickle")
with open(save_path, "wb") as save_file:
pickle.dump(dataset_examples[dataset_split], save_file)
with open(os.path.join(dataset_path, "to_skip.pickle"), "wb") as save_file:
pickle.dump(dataset_filters, save_file)
| KGEmb-master | datasets/process.py |
KGEmb-master | utils/__init__.py |
|
"""Hyperbolic operations utils functions."""
import torch
MIN_NORM = 1e-15
BALL_EPS = {torch.float32: 4e-3, torch.float64: 1e-5}
# ################# MATH FUNCTIONS ########################
class Artanh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(-1 + 1e-5, 1 - 1e-5)
ctx.save_for_backward(x)
dtype = x.dtype
x = x.double()
return (torch.log_(1 + x).sub_(torch.log_(1 - x))).mul_(0.5).to(dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (1 - input ** 2)
def artanh(x):
return Artanh.apply(x)
def tanh(x):
return x.clamp(-15, 15).tanh()
# ################# HYP OPS ########################
def expmap0(u, c):
"""Exponential map taken at the origin of the Poincare ball with curvature c.
Args:
u: torch.Tensor of size B x d with hyperbolic points
c: torch.Tensor of size 1 or B x 1 with absolute hyperbolic curvatures
Returns:
torch.Tensor with tangent points.
"""
sqrt_c = c ** 0.5
u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
gamma_1 = tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return project(gamma_1, c)
def logmap0(y, c):
"""Logarithmic map taken at the origin of the Poincare ball with curvature c.
Args:
y: torch.Tensor of size B x d with tangent points
c: torch.Tensor of size 1 or B x 1 with absolute hyperbolic curvatures
Returns:
torch.Tensor with hyperbolic points.
"""
sqrt_c = c ** 0.5
y_norm = y.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
return y / y_norm / sqrt_c * artanh(sqrt_c * y_norm)
def project(x, c):
"""Project points to Poincare ball with curvature c.
Args:
x: torch.Tensor of size B x d with hyperbolic points
c: torch.Tensor of size 1 or B x 1 with absolute hyperbolic curvatures
Returns:
torch.Tensor with projected hyperbolic points.
"""
norm = x.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
eps = BALL_EPS[x.dtype]
maxnorm = (1 - eps) / (c ** 0.5)
cond = norm > maxnorm
projected = x / norm * maxnorm
return torch.where(cond, projected, x)
def mobius_add(x, y, c):
"""Mobius addition of points in the Poincare ball with curvature c.
Args:
x: torch.Tensor of size B x d with hyperbolic points
y: torch.Tensor of size B x d with hyperbolic points
c: torch.Tensor of size 1 or B x 1 with absolute hyperbolic curvatures
Returns:
Tensor of shape B x d representing the element-wise Mobius addition of x and y.
"""
x2 = torch.sum(x * x, dim=-1, keepdim=True)
y2 = torch.sum(y * y, dim=-1, keepdim=True)
xy = torch.sum(x * y, dim=-1, keepdim=True)
num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y
denom = 1 + 2 * c * xy + c ** 2 * x2 * y2
return num / denom.clamp_min(MIN_NORM)
# ################# HYP DISTANCES ########################
def hyp_distance(x, y, c, eval_mode=False):
"""Hyperbolic distance on the Poincare ball with curvature c.
Args:
x: torch.Tensor of size B x d with hyperbolic queries
y: torch.Tensor with hyperbolic queries, shape n_entities x d if eval_mode is true else (B x d)
c: torch.Tensor of size 1 with absolute hyperbolic curvature
Returns: torch,Tensor with hyperbolic distances, size B x 1 if eval_mode is False
else B x n_entities matrix with all pairs distances
"""
sqrt_c = c ** 0.5
x2 = torch.sum(x * x, dim=-1, keepdim=True)
if eval_mode:
y2 = torch.sum(y * y, dim=-1, keepdim=True).transpose(0, 1)
xy = x @ y.transpose(0, 1)
else:
y2 = torch.sum(y * y, dim=-1, keepdim=True)
xy = torch.sum(x * y, dim=-1, keepdim=True)
c1 = 1 - 2 * c * xy + c * y2
c2 = 1 - c * x2
num = torch.sqrt((c1 ** 2) * x2 + (c2 ** 2) * y2 - (2 * c1 * c2) * xy)
denom = 1 - 2 * c * xy + c ** 2 * x2 * y2
pairwise_norm = num / denom.clamp_min(MIN_NORM)
dist = artanh(sqrt_c * pairwise_norm)
return 2 * dist / sqrt_c
def hyp_distance_multi_c(x, v, c, eval_mode=False):
"""Hyperbolic distance on Poincare balls with varying curvatures c.
Args:
x: torch.Tensor of size B x d with hyperbolic queries
y: torch.Tensor with hyperbolic queries, shape n_entities x d if eval_mode is true else (B x d)
c: torch.Tensor of size B x d with absolute hyperbolic curvatures
Return: torch,Tensor with hyperbolic distances, size B x 1 if eval_mode is False
else B x n_entities matrix with all pairs distances
"""
sqrt_c = c ** 0.5
if eval_mode:
vnorm = torch.norm(v, p=2, dim=-1, keepdim=True).transpose(0, 1)
xv = x @ v.transpose(0, 1) / vnorm
else:
vnorm = torch.norm(v, p=2, dim=-1, keepdim=True)
xv = torch.sum(x * v / vnorm, dim=-1, keepdim=True)
gamma = tanh(sqrt_c * vnorm) / sqrt_c
x2 = torch.sum(x * x, dim=-1, keepdim=True)
c1 = 1 - 2 * c * gamma * xv + c * gamma ** 2
c2 = 1 - c * x2
num = torch.sqrt((c1 ** 2) * x2 + (c2 ** 2) * (gamma ** 2) - (2 * c1 * c2) * gamma * xv)
denom = 1 - 2 * c * gamma * xv + (c ** 2) * (gamma ** 2) * x2
pairwise_norm = num / denom.clamp_min(MIN_NORM)
dist = artanh(sqrt_c * pairwise_norm)
return 2 * dist / sqrt_c
| KGEmb-master | utils/hyperbolic.py |
"""Training utils."""
import datetime
import os
def get_savedir(model, dataset):
"""Get unique saving directory name."""
dt = datetime.datetime.now()
date = dt.strftime("%m_%d")
save_dir = os.path.join(
os.environ["LOG_DIR"], date, dataset,
model + dt.strftime('_%H_%M_%S')
)
os.makedirs(save_dir)
return save_dir
def avg_both(mrs, mrrs, hits):
"""Aggregate metrics for missing lhs and rhs.
Args:
mrs: Dict[str, float]
mrrs: Dict[str, float]
hits: Dict[str, torch.FloatTensor]
Returns:
Dict[str, torch.FloatTensor] mapping metric name to averaged score
"""
mr = (mrs['lhs'] + mrs['rhs']) / 2.
mrr = (mrrs['lhs'] + mrrs['rhs']) / 2.
h = (hits['lhs'] + hits['rhs']) / 2.
return {'MR': mr, 'MRR': mrr, 'hits@[1,3,10]': h}
def format_metrics(metrics, split):
"""Format metrics for logging."""
result = "\t {} MR: {:.2f} | ".format(split, metrics['MR'])
result += "MRR: {:.3f} | ".format(metrics['MRR'])
result += "H@1: {:.3f} | ".format(metrics['hits@[1,3,10]'][0])
result += "H@3: {:.3f} | ".format(metrics['hits@[1,3,10]'][1])
result += "H@10: {:.3f}".format(metrics['hits@[1,3,10]'][2])
return result
def write_metrics(writer, step, metrics, split):
"""Write metrics to tensorboard logs."""
writer.add_scalar('{}_MR'.format(split), metrics['MR'], global_step=step)
writer.add_scalar('{}_MRR'.format(split), metrics['MRR'], global_step=step)
writer.add_scalar('{}_H1'.format(split), metrics['hits@[1,3,10]'][0], global_step=step)
writer.add_scalar('{}_H3'.format(split), metrics['hits@[1,3,10]'][1], global_step=step)
writer.add_scalar('{}_H10'.format(split), metrics['hits@[1,3,10]'][2], global_step=step)
def count_params(model):
"""Count total number of trainable parameters in model"""
total = 0
for x in model.parameters():
if x.requires_grad:
res = 1
for y in x.shape:
res *= y
total += res
return total
| KGEmb-master | utils/train.py |
"""Euclidean operations utils functions."""
import torch
def euc_sqdistance(x, y, eval_mode=False):
"""Compute euclidean squared distance between tensors.
Args:
x: torch.Tensor of shape (N1 x d)
y: torch.Tensor of shape (N2 x d)
eval_mode: boolean
Returns:
torch.Tensor of shape N1 x 1 with pairwise squared distances if eval_mode is false
else torch.Tensor of shape N1 x N2 with all-pairs distances
"""
x2 = torch.sum(x * x, dim=-1, keepdim=True)
y2 = torch.sum(y * y, dim=-1, keepdim=True)
if eval_mode:
y2 = y2.t()
xy = x @ y.t()
else:
assert x.shape[0] == y.shape[0]
xy = torch.sum(x * y, dim=-1, keepdim=True)
return x2 + y2 - 2 * xy
def givens_rotations(r, x):
"""Givens rotations.
Args:
r: torch.Tensor of shape (N x d), rotation parameters
x: torch.Tensor of shape (N x d), points to rotate
Returns:
torch.Tensor os shape (N x d) representing rotation of x by r
"""
givens = r.view((r.shape[0], -1, 2))
givens = givens / torch.norm(givens, p=2, dim=-1, keepdim=True).clamp_min(1e-15)
x = x.view((r.shape[0], -1, 2))
x_rot = givens[:, :, 0:1] * x + givens[:, :, 1:] * torch.cat((-x[:, :, 1:], x[:, :, 0:1]), dim=-1)
return x_rot.view((r.shape[0], -1))
def givens_reflection(r, x):
"""Givens reflections.
Args:
r: torch.Tensor of shape (N x d), rotation parameters
x: torch.Tensor of shape (N x d), points to reflect
Returns:
torch.Tensor os shape (N x d) representing reflection of x by r
"""
givens = r.view((r.shape[0], -1, 2))
givens = givens / torch.norm(givens, p=2, dim=-1, keepdim=True).clamp_min(1e-15)
x = x.view((r.shape[0], -1, 2))
x_ref = givens[:, :, 0:1] * torch.cat((x[:, :, 0:1], -x[:, :, 1:]), dim=-1) + givens[:, :, 1:] * torch.cat(
(x[:, :, 1:], x[:, :, 0:1]), dim=-1)
return x_ref.view((r.shape[0], -1))
| KGEmb-master | utils/euclidean.py |
from .complex import *
from .euclidean import *
from .hyperbolic import *
all_models = EUC_MODELS + HYP_MODELS + COMPLEX_MODELS
| KGEmb-master | models/__init__.py |
"""Hyperbolic Knowledge Graph embedding models where all parameters are defined in tangent spaces."""
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from models.base import KGModel
from utils.euclidean import givens_rotations, givens_reflection
from utils.hyperbolic import mobius_add, expmap0, project, hyp_distance_multi_c
HYP_MODELS = ["RotH", "RefH", "AttH"]
class BaseH(KGModel):
"""Trainable curvature for each relationship."""
def __init__(self, args):
super(BaseH, self).__init__(args.sizes, args.rank, args.dropout, args.gamma, args.dtype, args.bias,
args.init_size)
self.entity.weight.data = self.init_size * torch.randn((self.sizes[0], self.rank), dtype=self.data_type)
self.rel.weight.data = self.init_size * torch.randn((self.sizes[1], 2 * self.rank), dtype=self.data_type)
self.rel_diag = nn.Embedding(self.sizes[1], self.rank)
self.rel_diag.weight.data = 2 * torch.rand((self.sizes[1], self.rank), dtype=self.data_type) - 1.0
self.multi_c = args.multi_c
if self.multi_c:
c_init = torch.ones((self.sizes[1], 1), dtype=self.data_type)
else:
c_init = torch.ones((1, 1), dtype=self.data_type)
self.c = nn.Parameter(c_init, requires_grad=True)
def get_rhs(self, queries, eval_mode):
"""Get embeddings and biases of target entities."""
if eval_mode:
return self.entity.weight, self.bt.weight
else:
return self.entity(queries[:, 2]), self.bt(queries[:, 2])
def similarity_score(self, lhs_e, rhs_e, eval_mode):
"""Compute similarity scores or queries against targets in embedding space."""
lhs_e, c = lhs_e
return - hyp_distance_multi_c(lhs_e, rhs_e, c, eval_mode) ** 2
class RotH(BaseH):
"""Hyperbolic 2x2 Givens rotations"""
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
c = F.softplus(self.c[queries[:, 1]])
head = expmap0(self.entity(queries[:, 0]), c)
rel1, rel2 = torch.chunk(self.rel(queries[:, 1]), 2, dim=1)
rel1 = expmap0(rel1, c)
rel2 = expmap0(rel2, c)
lhs = project(mobius_add(head, rel1, c), c)
res1 = givens_rotations(self.rel_diag(queries[:, 1]), lhs)
res2 = mobius_add(res1, rel2, c)
return (res2, c), self.bh(queries[:, 0])
class RefH(BaseH):
"""Hyperbolic 2x2 Givens reflections"""
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
c = F.softplus(self.c[queries[:, 1]])
rel, _ = torch.chunk(self.rel(queries[:, 1]), 2, dim=1)
rel = expmap0(rel, c)
lhs = givens_reflection(self.rel_diag(queries[:, 1]), self.entity(queries[:, 0]))
lhs = expmap0(lhs, c)
res = project(mobius_add(lhs, rel, c), c)
return (res, c), self.bh(queries[:, 0])
class AttH(BaseH):
"""Hyperbolic attention model combining translations, reflections and rotations"""
def __init__(self, args):
super(AttH, self).__init__(args)
self.rel_diag = nn.Embedding(self.sizes[1], 2 * self.rank)
self.rel_diag.weight.data = 2 * torch.rand((self.sizes[1], 2 * self.rank), dtype=self.data_type) - 1.0
self.context_vec = nn.Embedding(self.sizes[1], self.rank)
self.context_vec.weight.data = self.init_size * torch.randn((self.sizes[1], self.rank), dtype=self.data_type)
self.act = nn.Softmax(dim=1)
if args.dtype == "double":
self.scale = torch.Tensor([1. / np.sqrt(self.rank)]).double().cuda()
else:
self.scale = torch.Tensor([1. / np.sqrt(self.rank)]).cuda()
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
c = F.softplus(self.c[queries[:, 1]])
head = self.entity(queries[:, 0])
rot_mat, ref_mat = torch.chunk(self.rel_diag(queries[:, 1]), 2, dim=1)
rot_q = givens_rotations(rot_mat, head).view((-1, 1, self.rank))
ref_q = givens_reflection(ref_mat, head).view((-1, 1, self.rank))
cands = torch.cat([ref_q, rot_q], dim=1)
context_vec = self.context_vec(queries[:, 1]).view((-1, 1, self.rank))
att_weights = torch.sum(context_vec * cands * self.scale, dim=-1, keepdim=True)
att_weights = self.act(att_weights)
att_q = torch.sum(att_weights * cands, dim=1)
lhs = expmap0(att_q, c)
rel, _ = torch.chunk(self.rel(queries[:, 1]), 2, dim=1)
rel = expmap0(rel, c)
res = project(mobius_add(lhs, rel, c), c)
return (res, c), self.bh(queries[:, 0])
| KGEmb-master | models/hyperbolic.py |
"""Euclidean Knowledge Graph embedding models where embeddings are in complex space."""
import torch
from torch import nn
from models.base import KGModel
COMPLEX_MODELS = ["ComplEx", "RotatE"]
class BaseC(KGModel):
"""Complex Knowledge Graph Embedding models.
Attributes:
embeddings: complex embeddings for entities and relations
"""
def __init__(self, args):
"""Initialize a Complex KGModel."""
super(BaseC, self).__init__(args.sizes, args.rank, args.dropout, args.gamma, args.dtype, args.bias,
args.init_size)
assert self.rank % 2 == 0, "Complex models require even embedding dimension"
self.rank = self.rank // 2
self.embeddings = nn.ModuleList([
nn.Embedding(s, 2 * self.rank, sparse=True)
for s in self.sizes[:2]
])
self.embeddings[0].weight.data = self.init_size * self.embeddings[0].weight.to(self.data_type)
self.embeddings[1].weight.data = self.init_size * self.embeddings[1].weight.to(self.data_type)
def get_rhs(self, queries, eval_mode):
"""Get embeddings and biases of target entities."""
if eval_mode:
return self.embeddings[0].weight, self.bt.weight
else:
return self.embeddings[0](queries[:, 2]), self.bt(queries[:, 2])
def similarity_score(self, lhs_e, rhs_e, eval_mode):
"""Compute similarity scores or queries against targets in embedding space."""
lhs_e = lhs_e[:, :self.rank], lhs_e[:, self.rank:]
rhs_e = rhs_e[:, :self.rank], rhs_e[:, self.rank:]
if eval_mode:
return lhs_e[0] @ rhs_e[0].transpose(0, 1) + lhs_e[1] @ rhs_e[1].transpose(0, 1)
else:
return torch.sum(
lhs_e[0] * rhs_e[0] + lhs_e[1] * rhs_e[1],
1, keepdim=True
)
def get_complex_embeddings(self, queries):
"""Get complex embeddings of queries."""
head_e = self.embeddings[0](queries[:, 0])
rel_e = self.embeddings[1](queries[:, 1])
rhs_e = self.embeddings[0](queries[:, 2])
head_e = head_e[:, :self.rank], head_e[:, self.rank:]
rel_e = rel_e[:, :self.rank], rel_e[:, self.rank:]
rhs_e = rhs_e[:, :self.rank], rhs_e[:, self.rank:]
return head_e, rel_e, rhs_e
def get_factors(self, queries):
"""Compute factors for embeddings' regularization."""
head_e, rel_e, rhs_e = self.get_complex_embeddings(queries)
head_f = torch.sqrt(head_e[0] ** 2 + head_e[1] ** 2)
rel_f = torch.sqrt(rel_e[0] ** 2 + rel_e[1] ** 2)
rhs_f = torch.sqrt(rhs_e[0] ** 2 + rhs_e[1] ** 2)
return head_f, rel_f, rhs_f
class ComplEx(BaseC):
"""Simple complex model http://proceedings.mlr.press/v48/trouillon16.pdf"""
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
head_e, rel_e, _ = self.get_complex_embeddings(queries)
lhs_e = torch.cat([
head_e[0] * rel_e[0] - head_e[1] * rel_e[1],
head_e[0] * rel_e[1] + head_e[1] * rel_e[0]
], 1)
return lhs_e, self.bh(queries[:, 0])
class RotatE(BaseC):
"""Rotations in complex space https://openreview.net/pdf?id=HkgEQnRqYQ"""
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
head_e, rel_e, _ = self.get_complex_embeddings(queries)
rel_norm = torch.sqrt(rel_e[0] ** 2 + rel_e[1] ** 2)
cos = rel_e[0] / rel_norm
sin = rel_e[1] / rel_norm
lhs_e = torch.cat([
head_e[0] * cos - head_e[1] * sin,
head_e[0] * sin + head_e[1] * cos
], 1)
return lhs_e, self.bh(queries[:, 0])
| KGEmb-master | models/complex.py |
"""Base Knowledge Graph embedding model."""
from abc import ABC, abstractmethod
import torch
from torch import nn
class KGModel(nn.Module, ABC):
"""Base Knowledge Graph Embedding model class.
Attributes:
sizes: Tuple[int, int, int] with (n_entities, n_relations, n_entities)
rank: integer for embedding dimension
dropout: float for dropout rate
gamma: torch.nn.Parameter for margin in ranking-based loss
data_type: torch.dtype for machine precision (single or double)
bias: string for whether to learn or fix bias (none for no bias)
init_size: float for embeddings' initialization scale
entity: torch.nn.Embedding with entity embeddings
rel: torch.nn.Embedding with relation embeddings
bh: torch.nn.Embedding with head entity bias embeddings
bt: torch.nn.Embedding with tail entity bias embeddings
"""
def __init__(self, sizes, rank, dropout, gamma, data_type, bias, init_size):
"""Initialize KGModel."""
super(KGModel, self).__init__()
if data_type == 'double':
self.data_type = torch.double
else:
self.data_type = torch.float
self.sizes = sizes
self.rank = rank
self.dropout = dropout
self.bias = bias
self.init_size = init_size
self.gamma = nn.Parameter(torch.Tensor([gamma]), requires_grad=False)
self.entity = nn.Embedding(sizes[0], rank)
self.rel = nn.Embedding(sizes[1], rank)
self.bh = nn.Embedding(sizes[0], 1)
self.bh.weight.data = torch.zeros((sizes[0], 1), dtype=self.data_type)
self.bt = nn.Embedding(sizes[0], 1)
self.bt.weight.data = torch.zeros((sizes[0], 1), dtype=self.data_type)
@abstractmethod
def get_queries(self, queries):
"""Compute embedding and biases of queries.
Args:
queries: torch.LongTensor with query triples (head, relation, tail)
Returns:
lhs_e: torch.Tensor with queries' embeddings (embedding of head entities and relations)
lhs_biases: torch.Tensor with head entities' biases
"""
pass
@abstractmethod
def get_rhs(self, queries, eval_mode):
"""Get embeddings and biases of target entities.
Args:
queries: torch.LongTensor with query triples (head, relation, tail)
eval_mode: boolean, true for evaluation, false for training
Returns:
rhs_e: torch.Tensor with targets' embeddings
if eval_mode=False returns embedding of tail entities (n_queries x rank)
else returns embedding of all possible entities in the KG dataset (n_entities x rank)
rhs_biases: torch.Tensor with targets' biases
if eval_mode=False returns biases of tail entities (n_queries x 1)
else returns biases of all possible entities in the KG dataset (n_entities x 1)
"""
pass
@abstractmethod
def similarity_score(self, lhs_e, rhs_e, eval_mode):
"""Compute similarity scores or queries against targets in embedding space.
Args:
lhs_e: torch.Tensor with queries' embeddings
rhs_e: torch.Tensor with targets' embeddings
eval_mode: boolean, true for evaluation, false for training
Returns:
scores: torch.Tensor with similarity scores of queries against targets
"""
pass
def score(self, lhs, rhs, eval_mode):
"""Scores queries against targets
Args:
lhs: Tuple[torch.Tensor, torch.Tensor] with queries' embeddings and head biases
returned by get_queries(queries)
rhs: Tuple[torch.Tensor, torch.Tensor] with targets' embeddings and tail biases
returned by get_rhs(queries, eval_mode)
eval_mode: boolean, true for evaluation, false for training
Returns:
score: torch.Tensor with scores of queries against targets
if eval_mode=True, returns scores against all possible tail entities, shape (n_queries x n_entities)
else returns scores for triples in batch (shape n_queries x 1)
"""
lhs_e, lhs_biases = lhs
rhs_e, rhs_biases = rhs
score = self.similarity_score(lhs_e, rhs_e, eval_mode)
if self.bias == 'constant':
return self.gamma.item() + score
elif self.bias == 'learn':
if eval_mode:
return lhs_biases + rhs_biases.t() + score
else:
return lhs_biases + rhs_biases + score
else:
return score
def get_factors(self, queries):
"""Computes factors for embeddings' regularization.
Args:
queries: torch.LongTensor with query triples (head, relation, tail)
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor] with embeddings to regularize
"""
head_e = self.entity(queries[:, 0])
rel_e = self.rel(queries[:, 1])
rhs_e = self.entity(queries[:, 2])
return head_e, rel_e, rhs_e
def forward(self, queries, eval_mode=False):
"""KGModel forward pass.
Args:
queries: torch.LongTensor with query triples (head, relation, tail)
eval_mode: boolean, true for evaluation, false for training
Returns:
predictions: torch.Tensor with triples' scores
shape is (n_queries x 1) if eval_mode is false
else (n_queries x n_entities)
factors: embeddings to regularize
"""
# get embeddings and similarity scores
lhs_e, lhs_biases = self.get_queries(queries)
# queries = F.dropout(queries, self.dropout, training=self.training)
rhs_e, rhs_biases = self.get_rhs(queries, eval_mode)
# candidates = F.dropout(candidates, self.dropout, training=self.training)
predictions = self.score((lhs_e, lhs_biases), (rhs_e, rhs_biases), eval_mode)
# get factors for regularization
factors = self.get_factors(queries)
return predictions, factors
def get_ranking(self, queries, filters, batch_size=1000):
"""Compute filtered ranking of correct entity for evaluation.
Args:
queries: torch.LongTensor with query triples (head, relation, tail)
filters: filters[(head, relation)] gives entities to ignore (filtered setting)
batch_size: int for evaluation batch size
Returns:
ranks: torch.Tensor with ranks or correct entities
"""
ranks = torch.ones(len(queries))
with torch.no_grad():
b_begin = 0
candidates = self.get_rhs(queries, eval_mode=True)
while b_begin < len(queries):
these_queries = queries[b_begin:b_begin + batch_size].cuda()
q = self.get_queries(these_queries)
rhs = self.get_rhs(these_queries, eval_mode=False)
scores = self.score(q, candidates, eval_mode=True)
targets = self.score(q, rhs, eval_mode=False)
# set filtered and true scores to -1e6 to be ignored
for i, query in enumerate(these_queries):
filter_out = filters[(query[0].item(), query[1].item())]
filter_out += [queries[b_begin + i, 2].item()]
scores[i, torch.LongTensor(filter_out)] = -1e6
ranks[b_begin:b_begin + batch_size] += torch.sum(
(scores >= targets).float(), dim=1
).cpu()
b_begin += batch_size
return ranks
def compute_metrics(self, examples, filters, batch_size=500):
"""Compute ranking-based evaluation metrics.
Args:
examples: torch.LongTensor of size n_examples x 3 containing triples' indices
filters: Dict with entities to skip per query for evaluation in the filtered setting
batch_size: integer for batch size to use to compute scores
Returns:
Evaluation metrics (mean rank, mean reciprocical rank and hits)
"""
mean_rank = {}
mean_reciprocal_rank = {}
hits_at = {}
for m in ["rhs", "lhs"]:
q = examples.clone()
if m == "lhs":
tmp = torch.clone(q[:, 0])
q[:, 0] = q[:, 2]
q[:, 2] = tmp
q[:, 1] += self.sizes[1] // 2
ranks = self.get_ranking(q, filters[m], batch_size=batch_size)
mean_rank[m] = torch.mean(ranks).item()
mean_reciprocal_rank[m] = torch.mean(1. / ranks).item()
hits_at[m] = torch.FloatTensor((list(map(
lambda x: torch.mean((ranks <= x).float()).item(),
(1, 3, 10)
))))
return mean_rank, mean_reciprocal_rank, hits_at
| KGEmb-master | models/base.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.