python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import time
import torch
import comet.utils as utils
import comet.data.config as cfg
class Evaluator(object):
def __init__(self, opt, model, data_loader):
super(Evaluator, self).__init__()
self.data_loader = data_loader
self.model = model
self.batch_variables = {
"model": model,
"data": data_loader
}
self.opt = opt
def validate(self, l, split="dev", losses={}, keyset=None):
self.batch_variables["split"] = split
print("Evaluating {}".format(split))
epoch_losses = self.epoch(
self.opt, self.model, self.data_loader, split, keyset)
self.print_result(split, epoch_losses)
for loss_name, loss_val in epoch_losses.items():
losses.setdefault(loss_name, {})
losses[loss_name][l] = loss_val
def epoch(self, opt, model, data_loader, split, keyset=None):
average_loss, nums = self.initialize_losses()
data_loader.reset_offsets(splits=split, shuffle=False)
# Set evaluation mode
model.eval()
start = time.time()
# Initialize progress bar
bar = utils.set_progress_bar(
data_loader.total_size[split])
reset = False
with torch.no_grad():
while not reset:
start = data_loader.offset_summary(split)
outputs = self.batch(
opt, nums, average_loss,
self.batch_variables, eval_mode=True)
end = data_loader.offset_summary(split)
reset = outputs["reset"]
if not reset:
bar.update(end - start)
else:
print(end)
if cfg.toy and self.counter(nums) > 100:
break
if (opt.eval.es != "full" and
(self.counter(nums) > opt.eval.es)):
break
nums = outputs["nums"]
torch.cuda.synchronize()
print("{} evaluation completed in: {} s".format(
split.capitalize(), time.time() - start))
average_loss = self.compute_final_scores(
average_loss, nums)
return average_loss
| comet-public-master | comet/evaluate/evaluate.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import comet.data.data as data
import comet.data.config as cfg
import comet.models.utils as model_utils
import comet.evaluate.utils as eval_utils
import comet.train.batch as batch_utils
def make_sampler(sampler_type, opt, *args, **kwargs):
print("Initializing Greedy Sampler")
return GreedySampler(opt, *args, **kwargs)
class Sampler():
def __init__(self, opt, data_loader, batch_mode=False):
# Token on which to end sampling
self.end_token = data_loader.vocab_encoder[data.end_token]
self.opt = opt
def generate_sequence(self, batch, model):
raise
class GreedySampler(Sampler):
def __init__(self, opt, data_loader, batch_mode=True):
super(GreedySampler, self).__init__(opt, data_loader)
def append_batch(self, X, next_idx, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((next_idx, next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
values, indices = lm_probs[:, -1, :].max(dim=-1)
seqs = indices.clone().unsqueeze(1)
loss = values
counts = 1
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((indices.view(-1, 1), next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
# Sample from top k
for _ in range(self.opt.eval.smax):
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
# Sample from top k
values, next_idx = lm_probs[:, -1, :].max(dim=-1)
loss += values
counts += 1
next_idx = next_idx.unsqueeze(1)
seqs = torch.cat([seqs, next_idx], 1)
if (next_idx.item() == self.end_token) or (_ == end_len - 1):
break
XMB, MMB = self.append_batch(XMB, next_idx, MMB)
beams = []
for beam in seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": [loss.item()],
"loss": loss.item(),
"beam_lengths": [counts],
"length": counts
}
return sampling_result
class TopKSampler(Sampler):
def __init__(self, opt, data_loader, batch_mode=True):
super(TopKSampler, self).__init__(opt, data_loader)
def append_batch(self, X, next_idx, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((next_idx, next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
# start_idx = context_size_event + 1
# start_idx = max_e1 + max_r
# end_idx = context_size_effect - 1
# end_idx = max_e2
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k)
seqs = indices.t().clone()
losses = - values.view(-1, 1)
ended = (seqs == self.end_token).float()
counts = (1 - ended)
XMB = XMB.repeat(self.opt.eval.k, 1, 1)
MMB = MMB.repeat(self.opt.eval.k, 1)
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((indices.view(self.opt.eval.k, -1), next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
# Sample from top k
for _ in range(end_len):
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
# Sample from top k
values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k)
choice = torch.multinomial(values.exp(), 1)
next_idx = indices.gather(-1, choice)
ended = ended + (next_idx == self.end_token).float() * (1 - ended)
next_idx = next_idx * (1 - ended).long() + ended.long() * self.end_token
counts += (1 - ended)
seqs = torch.cat([seqs, next_idx], 1)
if ended.sum().item() == self.opt.eval.k:
break
losses -= values.gather(-1, choice) * (1 - ended)
XMB, MMB = self.append_batch(XMB, next_idx, MMB)
beams = []
for beam in seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": losses.squeeze().tolist(),
"loss": losses[0].item(),
"beam_lengths": counts.long().squeeze().tolist(),
"length": counts[0].long().item()
}
return sampling_result
class BeamSampler(TopKSampler):
def __init__(self, opt, data_loader, batch_mode=True, scorer=None):
super(BeamSampler, self).__init__(opt, data_loader, batch_mode)
self.kill_mask = torch.ones(opt.eval.bs, opt.eval.bs).to(cfg.device) * 9000
self.kill_mask[:, 0] = 0
def make_batch(self, X):
X = np.array(X)
assert X.ndim in [1, 2]
if X.ndim == 1:
X = np.expand_dims(X, axis=0)
pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
pos_enc = np.expand_dims(pos_enc, axis=0)
batch = np.stack([X, pos_enc], axis=-1)
batch = torch.tensor(batch, dtype=torch.long).to(device)
return batch
def append_batch(self, X, beam_toks, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((beam_toks.unsqueeze(1), next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
# start_idx = context_size_event + 1
# start_idx = max_e1 + max_r
# end_idx = context_size_effect - 1
# end_idx = max_e2
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
tokens = []
beam_losses = []
# Beam Search
beam_lls, beam_toks, beam_seqs = None, None, None
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
dist = lm_probs[:, -1, :].squeeze()
beam_lls, beam_toks = dist.topk(self.opt.eval.bs)
beam_losses.append(beam_lls)
ended = (beam_toks == self.end_token).float()
counts = (2 - ended)
beam_toks = beam_toks.unsqueeze(1)
beam_seqs = beam_toks.clone()
XMB = XMB.repeat(self.opt.eval.bs, 1, 1)
MMB = MMB.repeat(self.opt.eval.bs, 1)
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((beam_toks, next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
for _ in range(end_len):
# Compute distribution for current beam
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
dist = lm_probs[:, -1, :].squeeze()
# get hypothesis tokens for distribution
hyp_beam_lls, hyp_beam_toks = dist.topk(self.opt.eval.bs)
# Compute masks and expand beam
expanded_ended = ended.unsqueeze(1).repeat(1, self.opt.eval.bs)
hypothesis_mask = expanded_ended * self.kill_mask + (1 - expanded_ended)
current_beam_lls = beam_lls.unsqueeze(1).repeat(
1, self.opt.eval.bs).view(self.opt.eval.bs**2)
# Compute losses of hypotheses, masking those that have ended
hyp_beam_lls = (hyp_beam_lls.view(self.opt.eval.bs**2) *
hypothesis_mask.view(-1)) + current_beam_lls
# Get normalizer for sequences
temp_counts = counts.unsqueeze(1).repeat(1, self.opt.eval.bs).view(
self.opt.eval.bs ** 2)
# Select best beams with lowest aggregate loss
beam_lls, top_beam_idxs = (hyp_beam_lls / temp_counts).topk(self.opt.eval.bs)
# Update placements in beam based on selecetion
beam_losses = [i.index_select(0, top_beam_idxs // self.opt.eval.bs)
for i in beam_losses]
ended = ended.index_select(0, top_beam_idxs // self.opt.eval.bs)
counts = temp_counts.index_select(0, top_beam_idxs)
# Save beam losses
beam_losses.append(beam_lls * counts)
# Update beam tokens
ended_mask = (1 - ended).long()
end_replacement = (self.end_token * ended).long()
next_toks = hyp_beam_toks.view(-1)[top_beam_idxs]
beam_toks = next_toks * ended_mask + end_replacement
# Update ended and counts
ended = ended + (beam_toks == self.end_token).float() * (1 - ended)
counts = counts + (1 - ended)
# Update beam sequences
beam_seqs = beam_seqs.t().repeat(self.opt.eval.bs, 1).t().contiguous().view(
self.opt.eval.bs**2, -1)[top_beam_idxs]
beam_seqs = torch.cat((beam_seqs, beam_toks.unsqueeze(1)), dim=1)
# I have no idea what's going on but Ari's on point with it
XMB = XMB.transpose(0, 1).transpose(1, 2).repeat(
self.opt.eval.bs, 1, 1).transpose(2, 1).transpose(
1, 0).contiguous().view(
self.opt.eval.bs**2, XMB.size(1), XMB.size(2))[top_beam_idxs]
XMB, MMB = self.append_batch(XMB, beam_toks, MMB)
if (beam_toks == self.end_token).sum().item() == self.opt.eval.bs:
break
beams = []
for beam in beam_seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": beam_lls.tolist(),
"loss": beam_lls[0].item(),
"beam_lengths": counts.tolist(),
"length": counts[0].item()
}
return sampling_result
| comet-public-master | comet/evaluate/sampler.py |
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import comet.data.config as cfg
import comet.train.utils as train_utils
import comet.models.utils as model_utils
import comet.evaluate.utils as eval_utils
import comet.utils as utils
from IPython import embed
##############################################################################
# BATCH
##############################################################################
def batch_atomic_generate(opt, nums, losses, batch_variables, eval_mode=False):
data_loader = batch_variables["data"]
model = batch_variables["model"]
split = batch_variables["split"]
batch, reset = data_loader.sample_batch(split, bs=opt.train.dynamic.bs)
input_ = model_utils.prepare_position_embeddings(
opt, data_loader.vocab_encoder, batch["sequences"].unsqueeze(-1))
attention_mask = batch["attention_mask"]
loss_mask = batch["loss_mask"]
targets = input_.squeeze(0)[:, 1:, 0].contiguous().view(-1)
loss, dist = mle_steps(
opt.net.model, model, input_[:, :-1, :], targets,
attention_mask[:, :-1], loss_reduction="none")
# Set loss name
micro_name = "total_micro"
macro_name = "total_macro"
length = loss_mask.sum(1)
bs = input_.size(0)
final_loss = (loss * loss_mask).sum(1)
update_generation_losses(losses, nums, micro_name, macro_name, bs,
length, (loss * loss_mask).sum(1), split)
final_loss = final_loss / length
outputs = {"loss": final_loss.sum(), "nums": nums, "reset": reset}
return outputs
def batch_conceptnet_generate(opt, nums, losses, batch_variables,
eval_mode=False, tracking_mode=False):
data_loader = batch_variables["data"]
model = batch_variables["model"]
split = batch_variables["split"]
category = batch_variables["category"]
batch, reset = data_loader.sample_batch(
split, bs=opt.train.dynamic.bs, cat=category)
input_ = model_utils.prepare_position_embeddings(
opt, data_loader.vocab_encoder, batch["sequences"].unsqueeze(-1))
attention_mask = batch["attention_mask"]
loss_mask = batch["loss_mask"]
targets = input_.squeeze(0)[:, 1:, 0].contiguous().view(-1)
loss, dist = mle_steps(
opt.net.model, model, input_[:, :-1, :], targets,
attention_mask[:, :-1], loss_reduction="none")
# Set loss name
if not eval_mode or batch_variables["category"] == "positive":
micro_name = "total_micro"
macro_name = "total_macro"
else:
micro_name = "negative_micro"
macro_name = "negative_macro"
length = loss_mask.sum(1)
bs = input_.size(0)
final_loss = (loss * loss_mask).sum(1)
update_generation_losses(losses, nums, micro_name, macro_name, bs,
length, (loss * loss_mask).sum(1), split)
final_loss = final_loss / length
outputs = {"loss": final_loss.sum(), "nums": nums, "reset": reset}
if tracking_mode:
outputs["tracking"] = final_loss.squeeze().tolist()
return outputs
def mle_steps(key, model, input_, targets, attention_mask,
loss_reduction="mean", i=None):
word_acts = decode(model, input_.unsqueeze(1),
attention_mask, i)
word_dist = train_utils.modify_output_for_loss_fn(
"nll", word_acts, dim=-1)
# Compute losses
loss = F.nll_loss(
word_dist.view(-1, word_dist.size(-1)),
targets, reduction=loss_reduction)
if loss_reduction != "mean":
return loss.view(word_dist.size(0), -1), word_dist
else:
return loss, word_dist
def decode(model, input_, attention_mask, i=None):
return model(input_, sequence_mask=attention_mask)
def update_generation_losses(losses, nums, micro, macro, bs,
length, loss, split):
if split == "train":
train_utils.update_generation_losses(
losses, nums, micro, macro, bs, length, loss)
else:
eval_utils.update_generation_losses(
losses, nums, micro, macro, bs, length, loss)
| comet-public-master | comet/train/batch.py |
import random
import torch
import comet.data.config as cfg
import comet.train.atomic_train as base_train
import comet.train.batch as batch_utils
import comet.evaluate.conceptnet_evaluate as evaluate
import comet.evaluate.conceptnet_generate as gen
def make_trainer(opt, *args):
return ConceptNetGenerationIteratorTrainer(opt, *args)
class ConceptNetGenerationIteratorTrainer(
base_train.AtomicGenerationIteratorTrainer):
def set_evaluator(self, opt, model, data_loader):
self.evaluator = evaluate.make_evaluator(
opt, model, data_loader)
def set_generator(self, opt, model, data_loader):
self.generator = gen.make_generator(
opt, model, data_loader)
def batch(self, opt, *args):
outputs = batch_utils.batch_atomic_generate(opt, *args)
token_loss = outputs["loss"]
nums = outputs["nums"]
reset = outputs["reset"]
return token_loss, nums, reset
def update_top_score(self, opt):
print(self.top_score)
tracked_scores = self.get_tracked_score()
if self.top_score is None:
self.top_score = \
self.top_score = {"epoch": {}, "score": {}}
self.top_score["epoch"]["total_micro"] = self.opt.train.dynamic.epoch
self.top_score["score"]["total_micro"] = tracked_scores["total_micro"]
else:
if tracked_scores["total_micro"] < self.top_score["score"]["total_micro"]:
self.top_score["epoch"]["total_micro"] = self.opt.train.dynamic.epoch
self.top_score["score"]["total_micro"] = tracked_scores["total_micro"]
print(self.top_score)
def get_tracked_score(self):
return {
"total_micro": self.losses["dev"]["total_micro"][self.opt.train.dynamic.epoch]
}
def decide_to_save(self):
to_save = cfg.save and not cfg.toy
curr_epoch = self.opt.train.dynamic.epoch
to_save = to_save or cfg.test_save
print(cfg.save_strategy)
if cfg.save_strategy == "best":
if ((self.top_score["epoch"]["total_micro"] != curr_epoch)):
to_save = False
return to_save
| comet-public-master | comet/train/conceptnet_train.py |
import random
import comet.train.train as base_train
import comet.train.batch as batch
import comet.evaluate.atomic_evaluate as evaluate
# import comet.evaluate.atomic_generate as gen
def make_trainer(opt, *args):
return AtomicGenerationIteratorTrainer(opt, *args)
class AtomicGenerationIteratorTrainer(base_train.IteratorTrainer):
def __init__(self, opt, *args):
super(AtomicGenerationIteratorTrainer, self).__init__(opt, *args)
self.initialize_losses(opt.data.get("categories", []))
def set_evaluator(self, opt, model, data_loader):
self.evaluator = evaluate.make_evaluator(
opt, model, data_loader)
# def set_generator(self, opt, model, data_loader, scores, reward=None):
# self.generator = gen.make_generator(
# opt, model, data_loader, scores, reward)
def set_sampler(self, opt):
if opt.train.static.samp not in self.samplers:
self.samplers[opt.train.static.samp] = sampling.make_sampler(
opt.train.static.samp, opt, self.data_loader, batch_mode=True)
self.batch_variables["sampler"] = self.samplers
def batch(self, opt, *args):
outputs = batch.batch_atomic_generate(opt, *args)
token_loss = outputs["loss"]
nums = outputs["nums"]
reset = outputs["reset"]
return token_loss, nums, reset
def initialize_losses(self, categories):
self.losses["train"] = {
"total_micro": [0],
"total_macro": [0]
}
nums = {"total_micro": 0, "total_macro": 0}
for category in categories:
micro_name = "{}_micro".format(category)
macro_name = "{}_macro".format(category)
self.losses["train"][micro_name] = [0]
self.losses["train"][macro_name] = [0]
nums[micro_name] = 0
nums[macro_name] = 0
return nums
def update_top_score(self, opt):
print(self.top_score)
if self.top_score is None:
self.top_score = (self.opt.train.dynamic.epoch,
self.get_tracked_score())
elif self.get_tracked_score() < self.top_score[-1]:
self.top_score = (self.opt.train.dynamic.epoch,
self.get_tracked_score())
print(self.top_score)
def get_tracked_score(self):
return self.losses["dev"]["total_micro"][self.opt.train.dynamic.epoch]
def counter(self, nums):
return nums["total_macro"]
| comet-public-master | comet/train/atomic_train.py |
comet-public-master | comet/train/__init__.py |
|
'''TAKEN from OpenAI LM Code by HuggingFace'''
import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*(0.5 * (1 + torch.cos(math.pi * x)))
def warmup_constant(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*1
def warmup_linear(x, warmup=0.002):
s = 1 if x <= warmup else 0
# print(s)
return (s*(x/warmup) + (1-s))*(1-x)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr, schedule, warmup, t_total,
b1=0.9, b2=0.999, e=1e-8, l2=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0 <= warmup:
raise ValueError("Invalid warmup: {}".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {}".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {}".format(b2))
if not 0.0 <= e:
raise ValueError("Invalid epsilon value: {}".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, l2=l2, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# print(group['t_total'])
# print(group['warmup'])
# if self.state[group['params'][0]]:
# print(self.state[group['params'][0]]['step'] / group['t_total'])
# print()
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, \
please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct(state['step'] /
group['t_total'], group['warmup']))
step_size = (lr_scheduled * math.sqrt(bias_correction2) /
bias_correction1)
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['l2'] > 0:
p.data.add_(-lr_scheduled * group['l2'], p.data)
return loss
| comet-public-master | comet/train/opt.py |
import torch
import torch.optim
import torch.nn.functional as F
import copy
def update_generation_losses(losses, nums, micro, macro, bs, length, loss):
# Update Losses
losses[micro] += \
[copy.deepcopy(losses[micro][-1])]
losses[macro] += \
[copy.deepcopy(losses[macro][-1])]
losses[micro][-1] *= nums[micro]
losses[macro][-1] *= nums[macro]
nums[macro] += bs
if isinstance(length, int):
update_indiv_generation_losses(
losses, nums, micro, macro, bs, length, loss)
else:
update_tensor_generation_losses(
losses, nums, micro, macro, bs, length, loss)
def update_indiv_generation_losses(losses, nums, micro,
macro, bs, length, loss):
nums[micro] += (bs * length)
batch_loss = loss * bs
losses[micro][-1] += batch_loss
losses[micro][-1] /= nums[micro]
losses[macro][-1] += batch_loss / length
losses[macro][-1] /= nums[macro]
def update_tensor_generation_losses(losses, nums, micro,
macro, bs, length, loss):
nums[micro] += length.sum().item()
losses[micro][-1] += loss.sum().item()
losses[micro][-1] /= nums[micro]
losses[macro][-1] += (loss / length.float()).sum().item()
losses[macro][-1] /= nums[macro]
def modify_output_for_loss_fn(loss_fn, output, dim):
if loss_fn == "ce":
return output
if loss_fn == "mse":
return F.softmax(output, dim=dim)
if loss_fn == "nll":
return F.log_softmax(output, dim=dim)
if loss_fn in ["bce", "wbce", "wbce1"]:
return torch.sigmoid(output)
| comet-public-master | comet/train/utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import comet.data.config as cfg
import comet.data.data as data
import comet.train.utils as train_utils
import comet.train.batch as batch
import comet.evaluate.evaluate as evaluate
import comet.evaluate.generate as gen
import comet.evaluate.sampler as sampling
import comet.utils as utils
from tensorboardX import SummaryWriter
class Trainer(object):
def __init__(self, opt, meta, data_loader, model, optimizer):
self.optimizer = optimizer
self.model = model
if opt.trainer == "epoch":
self.epochs = meta.epochs
self.data_loader = data_loader
self.opt = opt
self.losses = {"dev": {}, "test": {}, "train": {}}
self.top_score = None
self.lrs = {}
self.batch_variables = {
"data": self.data_loader,
"model": self.model,
"split": "train"
}
self.do_gen = cfg.do_gen
self.samplers = {}
def decide_to_save(self):
to_save = cfg.save and not cfg.toy
to_save = to_save or cfg.test_save
print(cfg.save_strategy)
if cfg.save_strategy == "best":
if self.top_score[0] != self.opt.train.dynamic.epoch:
print("DOING IT RIGHT")
to_save = False
return to_save
def save_model(self, tracked_score):
lrs = {}
for i, param_group in enumerate(self.optimizer.param_groups):
lrs[i] = param_group['lr']
self.lrs[self.opt.train.dynamic.epoch] = lrs
to_save = self.decide_to_save()
if to_save:
data.save_step(
self.model, self.data_loader.vocab_encoder,
self.optimizer, self.opt,
self.opt.train.dynamic.epoch, self.lrs)
def log_losses(self, opt, losses):
if (not cfg.toy and cfg.save) or cfg.test_save:
data.save_eval_file(opt, losses["train"], "losses", split="train")
data.save_eval_file(opt, losses['dev'], "losses", split="dev")
data.save_eval_file(opt, losses['test'], "losses", split="test")
def set_logger(self):
if cfg.toy:
self.logger = SummaryWriter(utils.make_name(
self.opt, prefix="garbage/logs/", eval_=True, do_epoch=False))
else:
self.logger = SummaryWriter(utils.make_name(
self.opt, prefix="logs/", eval_=True, do_epoch=False))
print("Logging Tensorboard Files at: {}".format(self.logger.log_dir))
def stop_logger(self):
self.logger.close()
def run(self):
self.set_logger()
self.count = 0
for epoch in range(self.epochs):
self.model.train()
self.opt.train.dynamic.epoch += 1
self.epoch()
self.stop_logger()
def epoch(self):
nums = self.reset_losses()
# Initialize progress bar
bar = utils.initialize_progress_bar(
self.data_loader.sequences["train"])
reset = False
while not reset:
loss, nums, reset = self.do_forward_pass(nums)
self.do_backward_pass(loss)
self.update_parameters()
bar.update(self.opt.train.dynamic.bs)
self.count += 1
for loss_name in self.losses["train"]:
self.logger.add_scalar(
"train/{}".format(loss_name),
loss.item() / self.opt.train.dynamic.bs,
self.count)
if cfg.toy and self.counter(nums) > 300:
break
with torch.no_grad():
self.run_evaluation_cycle()
self.log_losses(self.opt, self.losses)
self.update_top_score(self.opt)
self.save_model(self.get_tracked_score())
self.data_loader.reset_offsets("train")
def run_evaluation_cycle(self):
for split in ["dev", "test"]:
self.evaluator.validate(
self.opt.train.dynamic.epoch, split,
self.losses[split])
if self.do_gen:
gen.do_gen_run(
self.opt, self.generator, self.opt.train.dynamic.epoch,
split, self.losses[split])
iter_num = self.opt.train.dynamic.epoch
for loss_name in self.losses[split]:
self.logger.add_scalar(
"{}/{}".format(split, loss_name),
self.losses[split][loss_name][iter_num],
iter_num)
def clip_gradients(self):
if self.opt.train.static.clip:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.opt.train.static.clip)
def do_forward_pass(self, nums):
token_loss, nums, reset = self.batch(
self.opt, nums, self.losses["train"],
self.batch_variables)
return token_loss, nums, reset
def do_backward_pass(self, loss):
loss.backward()
def update_parameters(self):
if self.opt.model == "lstm":
self.clip_gradients()
self.optimizer.step()
self.optimizer.zero_grad()
def reset_losses(self):
loss_names = set([i.rstrip("maicro").rstrip("_") for
i in self.losses["train"].keys()])
return self.initialize_losses(list(loss_names))
class IteratorTrainer(Trainer):
def __init__(self, opt, meta, data_loader, model, optimizer):
super(IteratorTrainer, self).__init__(
opt, meta, data_loader, model, optimizer)
self.iters = meta.cycle
self.total_iters = meta.iterations
def run(self):
self.set_logger()
# Initialize progress bar
bar = utils.set_progress_bar(self.total_iters)
for cycle_num in range(int(self.total_iters / self.iters)):
self.model.train()
self.cycle(bar, cycle_num)
with torch.no_grad():
self.run_evaluation_cycle()
self.log_losses(self.opt, self.losses)
self.update_top_score(self.opt)
self.save_model(self.get_tracked_score())
self.stop_logger()
def cycle(self, bar, cycle_num):
nums = self.reset_losses()
print(self.losses["train"])
for i in range(1, self.iters + 1):
# self.model.zero_grad()
loss, nums, reset = self.do_forward_pass(nums)
self.do_backward_pass(loss)
self.update_parameters()
# print(loss)
# print(loss.item())
self.opt.train.dynamic.epoch += 1
for loss_name in self.losses["train"]:
self.logger.add_scalar(
"train/{}".format(loss_name),
loss.item() / self.opt.train.dynamic.bs,
self.opt.train.dynamic.epoch)
bar.update(1)
if cfg.toy and i > 10:
break
if reset:
self.data_loader.reset_offsets("train")
| comet-public-master | comet/train/train.py |
import torch
import sys
import os
import time
from comet.data.utils import TextEncoder
import comet.data.config as cfg
import comet.data.data as data
import comet.models.models as models
from comet.evaluate.sampler import BeamSampler, GreedySampler, TopKSampler
import comet.utils as utils
def set_compute_mode(mode):
if mode == "auto" and torch.cuda.is_available():
cfg.device = "cuda"
elif mode.isdigit():
cfg.device = int(mode)
elif mode in ["cpu", "cuda"]:
cfg.device = mode
else:
raise
print("Pushing units to: {}".format(cfg.device))
class BaseDataLoader(object):
def __init__(self, opt, vocab):
self.vocab_encoder = vocab
self.vocab_decoder = {j: i for i, j in self.vocab_encoder.items()}
class ConceptNetBaseDataLoader(BaseDataLoader):
def __init__(self, opt, vocab):
super(ConceptNetBaseDataLoader, self).__init__(opt, vocab)
self.max_e1 = opt.data.get("maxe1", 10)
self.max_e2 = opt.data.get("maxe2", 15) + 1
self.max_r = opt.data.get("maxr", 5)
class AtomicBaseDataLoader(BaseDataLoader):
def __init__(self, opt, vocab):
super(AtomicBaseDataLoader, self).__init__(opt, vocab)
self.max_event = opt.data.get("maxe1", 17)
self.max_effect = opt.data.get("maxe2", 34) + 1
def load_model_file(model_file):
model_stuff = data.load_checkpoint(model_file)
opt = utils.convert_nested_dict_to_DD(model_stuff["opt"])
state_dict = model_stuff["state_dict"]
vocab = model_stuff['vocab']
return opt, state_dict, vocab
def load_data(dataset, opt, vocab, vocabulary_path):
if dataset == "atomic":
data_loader = load_atomic_data(opt, vocab)
elif dataset == "conceptnet":
data_loader = load_conceptnet_data(opt, vocab)
# Initialize TextEncoder
encoder_path = vocabulary_path + "encoder_bpe_40000.json"
bpe_path = vocabulary_path + "vocab_40000.bpe"
text_encoder = TextEncoder(encoder_path, bpe_path)
text_encoder.encoder = data_loader.vocab_encoder
text_encoder.decoder = data_loader.vocab_decoder
return data_loader, text_encoder
def load_atomic_data(opt, vocab):
# path = "data/atomic/processed/generation/{}.pickle".format(
# utils.make_name_string(opt.data))
# data_loader = data.make_data_loader(opt, opt.data.categories)
# loaded = data_loader.load_data(path)
data_loader = AtomicBaseDataLoader(opt, vocab)
return data_loader
def load_conceptnet_data(opt, vocab):
# path = "data/conceptnet/processed/generation/{}.pickle".format(
# utils.make_name_string(opt.data))
# data_loader = data.make_data_loader(opt)
# loaded = data_loader.load_data(path)
data_loader = ConceptNetBaseDataLoader(opt, vocab)
return data_loader
def make_model(opt, n_vocab, n_ctx, state_dict):
model = models.make_model(
opt, n_vocab, n_ctx, None, load=False,
return_acts=True, return_probs=False)
model.load_state_dict(state_dict)
model.to(cfg.device)
model.eval()
return model
def set_sampler(opt, sampling_algorithm, data_loader):
if "beam" in sampling_algorithm:
opt.eval.bs = int(sampling_algorithm.split("-")[1])
sampler = BeamSampler(opt, data_loader)
elif "topk" in sampling_algorithm:
# print("Still bugs in the topk sampler. Use beam or greedy instead")
# raise NotImplementedError
opt.eval.k = int(sampling_algorithm.split("-")[1])
sampler = TopKSampler(opt, data_loader)
else:
sampler = GreedySampler(opt, data_loader)
return sampler
def get_atomic_sequence(input_event, model, sampler, data_loader, text_encoder, category):
if isinstance(category, list):
outputs = {}
for cat in category:
new_outputs = get_atomic_sequence(
input_event, model, sampler, data_loader, text_encoder, cat)
outputs.update(new_outputs)
return outputs
elif category == "all":
outputs = {}
# start = time.time()
for category in data.atomic_data.all_categories:
new_outputs = get_atomic_sequence(
input_event, model, sampler, data_loader, text_encoder, category)
outputs.update(new_outputs)
# end = time.time()
# print("total time for all categories: {} s".format(end - start))
return outputs
else:
sequence_all = {}
sequence_all["event"] = input_event
sequence_all["effect_type"] = category
with torch.no_grad():
# start = time.time()
batch = set_atomic_inputs(
input_event, category, data_loader, text_encoder)
# end_set = time.time()
sampling_result = sampler.generate_sequence(
batch, model, data_loader, data_loader.max_event +
data.atomic_data.num_delimiter_tokens["category"],
data_loader.max_effect -
data.atomic_data.num_delimiter_tokens["category"])
# end_sample = time.time()
# print(category)
# print("Set inputs: {} s".format(end_set - start))
# print("Sample: {} s".format(end_sample - end_set))
sequence_all['beams'] = sampling_result["beams"]
print_atomic_sequence(sequence_all)
return {category: sequence_all}
def print_atomic_sequence(sequence_object):
input_event = sequence_object["event"]
category = sequence_object["effect_type"]
print("Input Event: {}".format(input_event))
print("Target Effect: {}".format(category))
print("")
print("Candidate Sequences:")
for beam in sequence_object["beams"]:
print(beam)
print("")
print("====================================================")
print("")
def set_atomic_inputs(input_event, category, data_loader, text_encoder):
XMB = torch.zeros(1, data_loader.max_event + 1).long().to(cfg.device)
prefix, suffix = data.atomic_data.do_example(text_encoder, input_event, None, True, None)
XMB[:, :len(prefix)] = torch.LongTensor(prefix)
XMB[:, -1] = torch.LongTensor([text_encoder.encoder["<{}>".format(category)]])
batch = {}
batch["sequences"] = XMB
batch["attention_mask"] = data.atomic_data.make_attention_mask(XMB)
return batch
def get_conceptnet_sequence(e1, model, sampler, data_loader, text_encoder, relation, force=False):
if isinstance(relation, list):
outputs = {}
for rel in relation:
new_outputs = get_conceptnet_sequence(
e1, model, sampler, data_loader, text_encoder, rel)
outputs.update(new_outputs)
return outputs
elif relation == "all":
outputs = {}
for relation in data.conceptnet_data.conceptnet_relations:
new_outputs = get_conceptnet_sequence(
e1, model, sampler, data_loader, text_encoder, relation)
outputs.update(new_outputs)
return outputs
else:
sequence_all = {}
sequence_all["e1"] = e1
sequence_all["relation"] = relation
with torch.no_grad():
if data_loader.max_r != 1:
relation_sequence = data.conceptnet_data.split_into_words[relation]
else:
relation_sequence = "<{}>".format(relation)
batch, abort = set_conceptnet_inputs(
e1, relation_sequence, text_encoder,
data_loader.max_e1, data_loader.max_r, force)
if abort:
return {relation: sequence_all}
sampling_result = sampler.generate_sequence(
batch, model, data_loader,
data_loader.max_e1 + data_loader.max_r,
data_loader.max_e2)
sequence_all['beams'] = sampling_result["beams"]
print_conceptnet_sequence(sequence_all)
return {relation: sequence_all}
def set_conceptnet_inputs(input_event, relation, text_encoder, max_e1, max_r, force):
abort = False
e1_tokens, rel_tokens, _ = data.conceptnet_data.do_example(text_encoder, input_event, relation, None)
if len(e1_tokens) > max_e1:
if force:
XMB = torch.zeros(1, len(e1_tokens) + max_r).long().to(cfg.device)
else:
XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
return {}, True
else:
XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
XMB[:, :len(e1_tokens)] = torch.LongTensor(e1_tokens)
XMB[:, max_e1:max_e1 + len(rel_tokens)] = torch.LongTensor(rel_tokens)
batch = {}
batch["sequences"] = XMB
batch["attention_mask"] = data.conceptnet_data.make_attention_mask(XMB)
return batch, abort
def print_conceptnet_sequence(sequence_object):
e1 = sequence_object["e1"]
relation = sequence_object["relation"]
print("Input Entity: {}".format(e1))
print("Target Relation: {}".format(relation))
print("")
print("Candidate Sequences:")
for beam in sequence_object["beams"]:
print(beam)
print("")
print("====================================================")
print("")
def print_help(data):
print("")
if data == "atomic":
print("Provide a seed event such as \"PersonX goes to the mall\"")
print("Don't include names, instead replacing them with PersonX, PersonY, etc.")
print("The event should always have PersonX included")
if data == "conceptnet":
print("Provide a seed entity such as \"go to the mall\"")
print("Because the model was trained on lemmatized entities,")
print("it works best if the input entities are also lemmatized")
print("")
def print_relation_help(data):
print_category_help(data)
def print_category_help(data):
print("")
if data == "atomic":
print("Enter a possible effect type from the following effect types:")
print("all - compute the output for all effect types {{oEffect, oReact, oWant, xAttr, xEffect, xIntent, xNeed, xReact, xWant}}")
print("oEffect - generate the effect of the event on participants other than PersonX")
print("oReact - generate the reactions of participants other than PersonX to the event")
print("oEffect - generate what participants other than PersonX may want after the event")
elif data == "conceptnet":
print("Enter a possible relation from the following list:")
print("")
print('AtLocation')
print('CapableOf')
print('Causes')
print('CausesDesire')
print('CreatedBy')
print('DefinedAs')
print('DesireOf')
print('Desires')
print('HasA')
print('HasFirstSubevent')
print('HasLastSubevent')
print('HasPainCharacter')
print('HasPainIntensity')
print('HasPrerequisite')
print('HasProperty')
print('HasSubevent')
print('InheritsFrom')
print('InstanceOf')
print('IsA')
print('LocatedNear')
print('LocationOfAction')
print('MadeOf')
print('MotivatedByGoal')
print('NotCapableOf')
print('NotDesires')
print('NotHasA')
print('NotHasProperty')
print('NotIsA')
print('NotMadeOf')
print('PartOf')
print('ReceivesAction')
print('RelatedTo')
print('SymbolOf')
print('UsedFor')
print("")
print("NOTE: Capitalization is important")
else:
raise
print("")
def print_sampling_help():
print("")
print("Provide a sampling algorithm to produce the sequence with from the following:")
print("")
print("greedy")
print("beam-# where # is the beam size")
print("topk-# where # is k")
print("")
| comet-public-master | comet/interactive/functions.py |
import sys
import os
import argparse
import comet.interactive.functions as interactive
sampling_mapping = {
"b10": "beam-10",
"b5": "beam-5",
"g": "greedy"
}
def parse_input_string(string):
objects = string.split("|")
relations = objects[1]
if not relations or relations == "all":
base_relations = [
'AtLocation', 'CapableOf', 'Causes', 'CausesDesire',
'CreatedBy', 'DefinedAs', 'Desires', 'HasA', 'HasFirstSubevent',
'HasLastSubevent', 'HasPrerequisite', 'HasProperty', 'HasSubevent',
'IsA', 'MadeOf', 'MotivatedByGoal', 'PartOf', 'ReceivesAction',
'SymbolOf', 'UsedFor'
]
# final_relations = ["<{}>".format(i) for i in base_relations]
else:
final_relations = relations.split(",")
sampling = sampling_mapping[objects[2]]
sequence = objects[0]
return sequence, final_relations, sampling
def format_output_string(text_sequence, sequences):
print_string = []
print_string.append("<h3>{}</h3>".format(text_sequence))
for relation, stuff in sequences.items():
print_string.append("<b>{}</b>".format(relation))
for i, sequence in enumerate(stuff["beams"]):
print_string.append("({}) {}".format(i + 1, sequence))
print_string.append("")
print_string.append("")
return "<br>".join(print_string)
class DemoModel(object):
def __init__(self, model_file, vocabulary_path="model/"):
opt, state_dict, vocab = interactive.load_model_file(model_file)
data_loader, text_encoder = interactive.load_data(
"conceptnet", opt, vocab, vocabulary_path)
self.opt = opt
self.data_loader = data_loader
self.text_encoder = text_encoder
n_ctx = data_loader.max_e1 + data_loader.max_e2 + data_loader.max_r
n_vocab = len(text_encoder.encoder) + n_ctx
model = interactive.make_model(opt, n_vocab, n_ctx, state_dict)
self.model = model
def predict(self, text_sequence, relations, sampling_algorithm, verbose=True):
sampler = interactive.set_sampler(
self.opt, sampling_algorithm, self.data_loader)
sequences = interactive.get_conceptnet_sequence(
text_sequence, self.model, sampler, self.data_loader,
self.text_encoder, relations)
return sequences
def getOutput(self, text_string):
text_sequence, relations, sampling_algorithm = parse_input_string(text_string)
model_output_sequences = self.predict(
text_sequence, relations, sampling_algorithm, verbose=True)
return format_output_string(text_sequence, model_output_sequences)
if __name__ == "__main__":
# sys.path.append("ConceptNet_NLGWebsite")
# sys.path.append(os.getcwd())
from server import run
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=str, default="cpu")
parser.add_argument("--model_file", type=str, default="models/conceptnet-generation/iteration-500-100000/transformer/rel_language-trainsize_100-devversion_12-maxe1_10-maxe2_15/model_transformer-nL_12-nH_12-hSize_768-edpt_0.1-adpt_0.1-rdpt_0.1-odpt_0.1-pt_gpt-afn_gelu-init_pt-vSize_40545/exp_generation-seed_123-l2_0.01-vl2_T-lrsched_warmup_linear-lrwarm_0.002-clip_1-loss_nll-b2_0.999-b1_0.9-e_1e-08/bs_1-smax_40-sample_greedy-numseq_1-gs_full-es_full-categories_None/1e-05_adam_64_15500.pickle")
parser.add_argument("--sampling_algorithm", type=str, default="help")
args = parser.parse_args()
interactive.set_compute_mode(args.device)
myNLGmodel = DemoModel(args.model_file)
run(nlg=myNLGmodel, port=8001)
| comet-public-master | comet/interactive/conceptnet_demo.py |
comet-public-master | comet/interactive/__init__.py |
|
import sys
import os
import argparse
import comet.interactive.functions as interactive
descriptions = {
"oEffect": "The effect of the event on participants besides PersonX might be: ",
"oReact": "Other participants may react to the event in this way: ",
"oWant": "After the event, other participants may want: ",
"xAttr": "Because of the event, we can say that PersonX is: ",
"xEffect": "The effect of the event on PersonX might be: ",
"xIntent": "The intent of PersonX in participating in this event is: ",
"xNeed": "To participate in the event, PersonX needs: ",
"xReact": "PersonX may react to the event in this way: ",
"xWant": "After the event, PersonX may want: ",
}
sampling_mapping = {
"b10": "beam-10",
"b5": "beam-5",
"g": "greedy"
}
def parse_input_string(string):
objects = string.split("|")
categories = objects[1]
if not categories or categories == "all":
final_categories = list(descriptions.keys())
else:
final_categories = categories.split(",")
sampling = sampling_mapping[objects[2]]
sequence = objects[0]
return sequence, final_categories, sampling
def format_output_string(text_sequence, sequences):
print_string = []
print_string.append("<h3>{}</h3>".format(text_sequence))
for category, stuff in sequences.items():
print_string.append("<b>{}</b>".format(descriptions[category]))
for i, sequence in enumerate(stuff["beams"]):
print_string.append("({}) {}".format(i + 1, sequence))
print_string.append("")
print_string.append("")
return "<br>".join(print_string)
class DemoModel(object):
def __init__(self, model_file, vocabulary_path="model/"):
opt, state_dict, vocab = interactive.load_model_file(model_file)
# print(opt)
data_loader, text_encoder = interactive.load_data(
"atomic", opt, vocab, vocabulary_path)
self.opt = opt
self.data_loader = data_loader
self.text_encoder = text_encoder
n_ctx = data_loader.max_event + data_loader.max_effect
n_vocab = len(text_encoder.encoder) + n_ctx
model = interactive.make_model(opt, n_vocab, n_ctx, state_dict)
self.model = model
def predict(self, text_sequence, categories, sampling_algorithm, verbose=True):
sampler = interactive.set_sampler(
self.opt, sampling_algorithm, self.data_loader)
sequences = interactive.get_atomic_sequence(
text_sequence, self.model, sampler, self.data_loader,
self.text_encoder, categories)
return sequences
def getOutput(self, text_string):
print(text_string)
text_sequence, categories, sampling_algorithm = parse_input_string(text_string)
model_output_sequences = self.predict(
text_sequence, categories, sampling_algorithm, verbose=True)
return format_output_string(text_sequence, model_output_sequences)
if __name__ == "__main__":
# from server import run
# sys.path.append("ATOMIC_NLGWebsite")
# sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=str, default="cpu")
parser.add_argument("--model_file", type=str, default="models/atomic-generation/iteration-500-50000/transformer/categories_oEffect#oReact#oWant#xAttr#xEffect#xIntent#xNeed#xReact#xWant/model_transformer-nL_12-nH_12-hSize_768-edpt_0.1-adpt_0.1-rdpt_0.1-odpt_0.1-pt_gpt-afn_gelu-init_pt-vSize_40542/exp_generation-seed_123-l2_0.01-vl2_T-lrsched_warmup_linear-lrwarm_0.002-clip_1-loss_nll-b2_0.999-b1_0.9-e_1e-08/bs_1-smax_40-sample_greedy-numseq_1-gs_1000-es_1000-categories_oEffect#oReact#oWant#xAttr#xEffect#xIntent#xNeed#xReact#xWant/6.25e-05_adam_64_22000.pickle")
args = parser.parse_args()
interactive.set_compute_mode(args.device)
myNLGmodel = DemoModel(args.model_file)
run(nlg=myNLGmodel)
| comet-public-master | comet/interactive/atomic_demo.py |
import json
from comet.utils import DD
device = "cuda"
save = False
test_save = False
toy = False
do_gen = False
save_strategy = "all"
def get_parameters(opt, exp_type="model"):
params = DD()
params.net = DD()
params.mle = 0
params.dataset = opt.dataset
params.net = get_net_parameters(opt)
params.train = get_training_parameters(opt)
params.model = params.net.model
params.exp = opt.exp
params.data = get_data_parameters(opt, params.exp, params.dataset)
params.eval = get_eval_parameters(opt, params.data.get("categories", None))
meta = DD()
params.trainer = opt.trainer
meta.iterations = int(opt.iterations)
meta.cycle = opt.cycle
params.cycle = opt.cycle
params.iters = int(opt.iterations)
global toy
toy = opt.toy
global do_gen
do_gen = opt.do_gen
global save
save = opt.save
global test_save
test_save = opt.test_save
global save_strategy
save_strategy = opt.save_strategy
print(params)
return params, meta
def get_eval_parameters(opt, force_categories=None):
evaluate = DD()
if opt.eval_sampler == "beam":
evaluate.bs = opt.beam_size
elif opt.eval_sampler == "greedy":
evaluate.bs = 1
elif opt.eval_sampler == "topk":
evaluate.k = opt.topk_size
evaluate.smax = opt.gen_seqlength
evaluate.sample = opt.eval_sampler
evaluate.numseq = opt.num_sequences
evaluate.gs = opt.generate_sequences
evaluate.es = opt.evaluate_sequences
if opt.dataset == "atomic":
if "eval_categories" in opt and force_categories is None:
evaluate.categories = opt.eval_categories
else:
evaluate.categories = force_categories
return evaluate
def get_data_parameters(opt, experiment, dataset):
data = DD()
if dataset == "atomic":
data.categories = sorted(opt.categories)
elif dataset == "conceptnet":
data.rel = opt.relation_format
data.trainsize = opt.training_set_size
data.devversion = opt.development_set_versions_to_use
data.maxe1 = opt.max_event_1_size
data.maxe2 = opt.max_event_2_size
return data
def get_training_parameters(opt):
train = DD()
static = DD()
static.exp = opt.exp
static.seed = opt.random_seed
# weight decay
static.l2 = opt.l2
static.vl2 = True
static.lrsched = opt.learning_rate_schedule # 'warmup_linear'
static.lrwarm = opt.learning_rate_warmup # 0.002
# gradient clipping
static.clip = opt.clip
# what loss function to use
static.loss = opt.loss
dynamic = DD()
dynamic.lr = opt.learning_rate # learning rate
dynamic.bs = opt.batch_size # batch size
# optimizer to use {adam, rmsprop, etc.}
dynamic.optim = opt.optimizer
# rmsprop
# alpha is interpolation average
static.update(opt[dynamic.optim])
train.static = static
train.dynamic = dynamic
return train
def get_net_parameters(opt):
net = DD()
net.model = opt.model
net.nL = opt.num_layers
net.nH = opt.num_heads
net.hSize = opt.hidden_dim
net.edpt = opt.embedding_dropout
net.adpt = opt.attention_dropout
net.rdpt = opt.residual_dropout
net.odpt = opt.output_dropout
net.pt = opt.pretrain
net.afn = opt.activation
# how to intialize parameters
# format is gauss+{}+{}.format(mean, std)
# n = the default initialization pytorch
net.init = opt.init
return net
def read_config(file_):
config = DD()
print(file_)
for k, v in file_.items():
if v == "True" or v == "T" or v == "true":
config[k] = True
elif v == "False" or v == "F" or v == "false":
config[k] = False
elif type(v) == dict:
config[k] = read_config(v)
else:
config[k] = v
return config
def load_config(name):
with open(name, "r") as f:
config = json.load(f)
return config
| comet-public-master | comet/data/config.py |
import comet.data.utils as data_utils
import comet.data.atomic as adata
import comet.data.config as cfg
import torch
import random
from tqdm import tqdm
def map_name(name, opt):
if name == "train":
return "train{}k.txt".format(opt.trainsize)
elif name == "test":
return "test.txt"
else:
return "dev{}.txt".format(opt.devversion)
conceptnet_relations = [
'AtLocation', 'CapableOf', 'Causes', 'CausesDesire',
'CreatedBy', 'DefinedAs', 'DesireOf', 'Desires', 'HasA',
'HasFirstSubevent', 'HasLastSubevent', 'HasPainCharacter',
'HasPainIntensity', 'HasPrerequisite', 'HasProperty',
'HasSubevent', 'InheritsFrom', 'InstanceOf', 'IsA',
'LocatedNear', 'LocationOfAction', 'MadeOf', 'MotivatedByGoal',
'NotCapableOf', 'NotDesires', 'NotHasA', 'NotHasProperty',
'NotIsA', 'NotMadeOf', 'PartOf', 'ReceivesAction', 'RelatedTo',
'SymbolOf', 'UsedFor'
]
split_into_words = {
'AtLocation': "at location",
'CapableOf': "capable of",
'Causes': "causes",
'CausesDesire': "causes desire",
'CreatedBy': "created by",
'DefinedAs': "defined as",
'DesireOf': "desire of",
'Desires': "desires",
'HasA': "has a",
'HasFirstSubevent': "has first subevent",
'HasLastSubevent': "has last subevent",
'HasPainCharacter': "has pain character",
'HasPainIntensity': "has pain intensity",
'HasPrerequisite': "has prequisite",
'HasProperty': "has property",
'HasSubevent': "has subevent",
'InheritsFrom': "inherits from",
'InstanceOf': 'instance of',
'IsA': "is a",
'LocatedNear': "located near",
'LocationOfAction': "location of action",
'MadeOf': "made of",
'MotivatedByGoal': "motivated by goal",
'NotCapableOf': "not capable of",
'NotDesires': "not desires",
'NotHasA': "not has a",
'NotHasProperty': "not has property",
'NotIsA': "not is a",
'NotMadeOf': "not made of",
'PartOf': "part of",
'ReceivesAction': "receives action",
'RelatedTo': "related to",
'SymbolOf': "symbol of",
'UsedFor': "used for"
}
class GenerationDataLoader(adata.DataLoader):
def __init__(self, opt, categories=None):
super(GenerationDataLoader, self).__init__(opt)
self.opt = opt
for split in self.data:
self.data[split] = {"total": []}
self.offsets[split] = {"total": 0}
self.vocab_encoder = None
self.vocab_decoder = None
self.special_chars = None
self.max_e1 = None
self.max_e2 = None
self.max_r = None
def offset_summary(self, split):
return sum(self.offsets[split].values())
def load_data(self, path):
if ".pickle" in path:
print("Loading data from: {}".format(path))
data_utils.load_existing_data_loader(self, path)
return True
for split in self.data:
file_name = map_name(split, self.opt.data)
if split != "dev" or self.opt.data.devversion != "12":
string_tuples = open("{}/{}".format(
path, file_name), "r").read().split("\n")
tuples = [x.split("\t") for x in string_tuples if x]
else:
string_tuples = open("{}/{}".format(
path, "dev1.txt"), "r").read().split("\n")
tuples = [x.split("\t") for x in string_tuples if x]
string_tuples = open("{}/{}".format(
path, "dev2.txt"), "r").read().split("\n")
tuples += [x.split("\t") for x in string_tuples if x]
if split in ["dev", "test"]:
if self.opt.data.rel == "language":
self.data[split]["total"] = \
[(i[1].lower().strip(), split_into_words[i[0]],
i[2].lower().strip(), int(i[3])) for i in tuples]
self.data[split]["positive"] = \
[(i[1].lower().strip(), split_into_words[i[0]],
i[2].lower().strip(), int(i[3])) for i in tuples if int(i[3])]
self.data[split]["negative"] = \
[(i[1].lower().strip(), split_into_words[i[0]],
i[2].lower().strip(), int(i[3])) for i in tuples if not int(i[3])]
elif self.opt.data.rel == "relation":
self.data[split]["total"] = \
[(i[1].lower().strip(), "<{}>".format(i[0]),
i[2].lower().strip(), int(i[3])) for i in tuples]
self.data[split]["positive"] = \
[(i[1].lower().strip(), "<{}>".format(i[0]),
i[2].lower().strip(), int(i[3])) for i in tuples if int(i[3])]
self.data[split]["negative"] = \
[(i[1].lower().strip(), "<{}>".format(i[0]),
i[2].lower().strip(), int(i[3])) for i in tuples if not int(i[3])]
else:
if self.opt.data.rel == "language":
self.data[split]["total"] = \
[(i[1].lower().strip(), split_into_words[i[0]],
i[2].lower().strip(), i[3]) for i in tuples]
elif self.opt.data.rel == "relation":
self.data[split]["total"] = \
[(i[1].lower().strip(), "<{}>".format(i[0]),
i[2].lower().strip(), i[3]) for i in tuples]
return False
def make_tensors(self, text_encoder, special,
splits=["train", "dev", "test"], test=False):
self.vocab_encoder = text_encoder.encoder
self.vocab_decoder = text_encoder.decoder
self.special_chars = special
sequences = {}
for split in splits:
sequences[split], discarded = get_generation_sequences(
self.data, split, text_encoder, test, self.opt.data.maxe1,
self.opt.data.maxe2)
if split == "train":
self.data[split]["total"] = [j for i, j in enumerate(
self.data[split]["total"]) if i not in set(discarded)]
self.masks[split]["total"] = [(len(i[0]), len(i[1]), len(i[2])) for
i in sequences[split]]
self.max_e1 = max([max([l[0] for l in self.masks[split]["total"]])
for split in self.masks])
self.max_r = max([max([l[1] for l in self.masks[split]["total"]])
for split in self.masks])
self.max_e2 = max([max([l[2] for l in self.masks[split]["total"]])
for split in self.masks])
print(self.max_e1)
print(self.max_r)
print(self.max_e2)
for split in splits:
num_elements = len(sequences[split])
self.sequences[split]["total"] = torch.LongTensor(
num_elements, self.max_e1 + self.max_e2 + self.max_r).fill_(0)
for i, seq in enumerate(sequences[split]):
# print(self.sequences[split]["total"][i, :len(seq[0])].size())
# print(torch.FloatTensor(seq[0]).size())
self.sequences[split]["total"][i, :len(seq[0])] = \
torch.LongTensor(seq[0])
start_r = self.max_e1
end_r = self.max_e1 + len(seq[1])
self.sequences[split]["total"][i, start_r:end_r] = \
torch.LongTensor(seq[1])
start_e2 = self.max_e1 + self.max_r
end_e2 = self.max_e1 + self.max_r + len(seq[2])
self.sequences[split]["total"][i, start_e2:end_e2] = \
torch.LongTensor(seq[2])
if split in ["test", "dev"]:
print(split)
self.sequences[split]["negative"] = \
self.sequences[split]["total"].index_select(
0, torch.LongTensor([i for i, j in enumerate(
self.data[split]['total']) if not j[3]]))
# self.data[split]['total'][:self.sequences[split]["total"].size(0)]) if not j[3]]))
self.sequences[split]["positive"] = \
self.sequences[split]["total"].index_select(
0, torch.LongTensor([i for i, j in enumerate(
self.data[split]['total']) if j[3]]))
# self.data[split]['total'][:self.sequences[split]["total"].size(0)]) if j[3]]))
def sample_batch(self, split, bs, cat="total", idxs=None):
offset = self.offsets[split][cat]
batch = {}
# Decided not to reduce computation on here because it's all parallel
# anyway and we don't want to run out of memory in cases where we
# don't see the longest version quickly enough
if idxs:
seqs = self.sequences[split][cat].index_select(
0, torch.LongTensor(idxs).to(
self.sequences[split][cat].device))
else:
seqs = self.sequences[split][cat][offset:offset + bs]
batch["sequences"] = seqs.to(cfg.device)
batch["attention_mask"] = make_attention_mask(seqs)
batch["loss_mask"] = make_loss_mask(seqs, self.max_e1 + self.max_r)
batch["key"] = (cat, offset, offset + bs)
offset += seqs.size(0)
self.offsets[split][cat] = offset
if split == "train" and offset + bs > len(self.sequences[split][cat]):
return batch, True
elif offset >= len(self.sequences[split][cat]):
return batch, True
else:
return batch, False
def reset_offsets(self, splits=["train", "test", "dev"],
shuffle=True, keys=None):
if isinstance(splits, str):
splits = [splits]
for split in splits:
if keys is None:
keys = ["total", "positive", "negative"]
for key in keys:
self.offsets[split][key] = 0
if shuffle:
self.shuffle_sequences(split, keys)
def shuffle_sequences(self, split="train", keys=None):
if keys is None:
# print(type(self.data))
# print(type(self.data.keys()))
keys = self.data[split].keys()
for key in keys:
if key in ["positive", "negative"]:
continue
idxs = list(range(len(self.data[split][key])))
random.shuffle(idxs)
self.sequences[split][key] = \
self.sequences[split][key].index_select(
0, torch.LongTensor(idxs))
temp = [self.data[split][key][i] for i in idxs]
self.data[split][key] = temp
temp = [self.masks[split][key][i] for i in idxs]
self.masks[split][key] = temp
def make_attention_mask(sequences):
return (sequences != 0).float().to(cfg.device)
def make_loss_mask(sequences, max_event):
# print(sequences.size())
mask = (sequences != 0).float()
mask[:, :max_event] = 0
return mask[:, 1:].to(cfg.device)
def get_generation_sequences(data, split, text_encoder, test,
max_e1=10, max_e2=15):
sequences = []
count = 0
final_event1 = None
final_event2 = None
final_relation = None
discarded = []
for event1, relation, event2, _ in tqdm(data[split]["total"]):
e1, r, e2 = do_example(text_encoder, event1, relation, event2)
if (split == "train" and len(e1) > max_e1 or
len(e2) > max_e2):
discarded.append(count)
count += 1
continue
final = compile_final_sequence(
e1, e2, r, text_encoder)
sequences.append(final)
count += 1
if count > 10 and test:
break
return sequences, discarded
def do_example(text_encoder, event1, relation, event2):
final_event1 = text_encoder.encode([event1], verbose=False)[0]
if relation.lower() != relation:
final_relation = [text_encoder.encoder[relation]]
else:
final_relation = text_encoder.encode(
[relation], verbose=False)[0]
if event2 is not None:
final_event2 = text_encoder.encode([event2], verbose=False)[0]
else:
final_event2 = None
return final_event1, final_relation, final_event2
def compile_final_sequence(final_event1, final_event2, final_relation, text_encoder):
final = []
final.append(final_event1)
final.append(final_relation)
final.append(final_event2)
final[-1].append(text_encoder.encoder["<END>"])
return final
| comet-public-master | comet/data/conceptnet.py |
comet-public-master | comet/data/__init__.py |
|
import re
import ftfy
import json
import spacy
import torch
from tqdm import tqdm
def load_existing_data_loader(data_loader, path):
old_data_loader = torch.load(path)
for attr in data_loader.__dict__.keys():
if attr not in old_data_loader.__dict__.keys():
continue
setattr(data_loader, attr, getattr(old_data_loader, attr))
################################################################################
#
# Code Below taken from HuggingFace pytorch-openai-lm repository
#
################################################################################
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class TextEncoder(object):
"""
mostly a wrapper for a public python bpe tokenizer
"""
def __init__(self, encoder_path, bpe_path):
self.nlp = spacy.load(
'en', disable=['parser', 'tagger', 'ner', 'textcat'])
self.encoder = json.load(open(encoder_path))
self.decoder = {v: k for k, v in self.encoder.items()}
merges = open(bpe_path, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(
pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if (word[i] == first and i < len(word) - 1 and
word[i+1] == second):
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def encode(self, texts, verbose=True):
texts_tokens = []
if verbose:
for text in tqdm(texts, ncols=80, leave=False):
text = self.nlp(text_standardize(ftfy.fix_text(text)))
text_tokens = []
for token in text:
text_tokens.extend(
[self.encoder.get(t, 0) for t in
self.bpe(token.text.lower()).split(' ')])
texts_tokens.append(text_tokens)
else:
for text in texts:
text = self.nlp(text_standardize(ftfy.fix_text(text)))
text_tokens = []
for token in text:
text_tokens.extend(
[self.encoder.get(t, 0) for t in
self.bpe(token.text.lower()).split(' ')])
texts_tokens.append(text_tokens)
return texts_tokens
| comet-public-master | comet/data/utils.py |
import comet.utils as utils
import comet.data.utils as data_utils
import comet.data.config as cfg
import pandas
import json
import random
import math
import torch
from tqdm import tqdm
all_categories = ["oEffect", "oReact", "oWant", "xAttr", "xEffect", "xIntent", "xNeed", "xReact", "xWant"]
def map_name(name):
if name == "train":
return "trn"
elif name == "test":
return "tst"
else:
return "dev"
class DataLoader(object):
def __init__(self, opt):
self.data = {}
self.data["train"] = {}
self.data["dev"] = {}
self.data["test"] = {}
self.sequences = {}
self.sequences["train"] = {}
self.sequences["dev"] = {}
self.sequences["test"] = {}
self.masks = {}
self.masks["train"] = {}
self.masks["dev"] = {}
self.masks["test"] = {}
self.offsets = {}
self.offsets["train"] = {}
self.offsets["dev"] = {}
self.offsets["test"] = {}
def offset_summary(self, split):
return self.offsets[split]["total"]
def do_take_partial_dataset(data_opts):
if data_opts.get("kr", None) is None:
return False
if data_opts.kr == 1:
return False
return True
def select_partial_dataset(data_opts, data):
num_selections = math.ceil(data_opts.kr * len(data))
return random.sample(data, num_selections)
class GenerationDataLoader(DataLoader):
def __init__(self, opt, categories):
super(GenerationDataLoader, self).__init__(opt)
self.categories = categories
self.opt = opt
for split in self.data:
self.data[split] = {"total": []}
self.offsets[split] = {"total": 0}
self.vocab_encoder = None
self.vocab_decoder = None
self.special_chars = None
self.max_event = None
self.max_effect = None
def load_data(self, path):
if ".pickle" in path:
print("Loading data from: {}".format(path))
data_utils.load_existing_data_loader(self, path)
return True
for split in self.data:
file_name = "v4_atomic_{}.csv".format(map_name(split))
df = pandas.read_csv("{}/{}".format(path, file_name), index_col=0)
df.iloc[:, :9] = df.iloc[:, :9].apply(
lambda col: col.apply(json.loads))
for cat in self.categories:
attr = df[cat]
self.data[split]["total"] += utils.zipped_flatten(zip(
attr.index, ["<{}>".format(cat)] * len(attr), attr.values))
if do_take_partial_dataset(self.opt.data):
self.data["train"]["total"] = select_partial_dataset(
self.opt.data, self.data["train"]["total"])
return False
def make_tensors(self, text_encoder, special,
splits=["train", "dev", "test"], test=False):
self.vocab_encoder = text_encoder.encoder
self.vocab_decoder = text_encoder.decoder
self.special_chars = special
sequences = {}
for split in splits:
sequences[split] = get_generation_sequences(
self.opt, self.data, split, text_encoder, test)
self.masks[split]["total"] = [(len(i[0]), len(i[1])) for
i in sequences[split]]
self.max_event = max([max([l[0] for l in self.masks[split]["total"]])
for split in self.masks])
self.max_effect = max([max([l[1] for l in self.masks[split]["total"]])
for split in self.masks])
print(self.max_event)
print(self.max_effect)
for split in splits:
num_elements = len(sequences[split])
self.sequences[split]["total"] = torch.LongTensor(
num_elements, self.max_event + self.max_effect).fill_(0)
for i, seq in enumerate(sequences[split]):
# print(self.sequences[split]["total"][i, :len(seq[0])].size())
# print(torch.FloatTensor(seq[0]).size())
self.sequences[split]["total"][i, :len(seq[0])] = \
torch.LongTensor(seq[0])
self.sequences[split]["total"][i, self.max_event:self.max_event + len(seq[1])] = \
torch.LongTensor(seq[1])
def sample_batch(self, split, bs, idxs=None):
offset = self.offsets[split]["total"]
batch = {}
# Decided not to reduce computation on here because it's all parallel
# anyway and we don't want to run out of memory in cases where we
# don't see the longest version quickly enough
if idxs:
seqs = self.sequences[split]["total"].index_select(
0, torch.LongTensor(idxs).to(
self.sequences[split]["total"].device))
else:
seqs = self.sequences[split]["total"][offset:offset + bs]
batch["sequences"] = seqs.to(cfg.device)
batch["attention_mask"] = make_attention_mask(seqs)
batch["loss_mask"] = make_loss_mask(
seqs, self.max_event, 1)
batch["key"] = ("total", offset, offset + bs)
offset += seqs.size(0)
self.offsets[split]["total"] = offset
if split == "train" and offset + bs > len(self.sequences[split]["total"]):
return batch, True
elif offset >= len(self.sequences[split]["total"]):
return batch, True
else:
return batch, False
def reset_offsets(self, splits=["train", "test", "dev"],
shuffle=True, keys=None):
if isinstance(splits, str):
splits = [splits]
for split in splits:
if keys is None:
keys = ["total"]
for key in keys:
self.offsets[split][key] = 0
if shuffle:
self.shuffle_sequences(split, keys)
def shuffle_sequences(self, split="train", keys=None):
if keys is None:
# print(type(self.data))
# print(type(self.data.keys()))
keys = self.data[split].keys()
for key in keys:
idxs = list(range(len(self.data[split][key])))
random.shuffle(idxs)
self.sequences[split][key] = \
self.sequences[split][key].index_select(
0, torch.LongTensor(idxs))
temp = [self.data[split][key][i] for i in idxs]
self.data[split][key] = temp
temp = [self.masks[split][key][i] for i in idxs]
self.masks[split][key] = temp
def prune_data_for_evaluation(data_loader, categories, split):
indices = []
for i, example in enumerate(data_loader.data[split]["total"]):
if example[1] in categories:
indices.append(i)
data_loader.masks[split]["total"] = [data_loader.masks[split]["total"][i]
for i in indices]
data_loader.sequences[split]["total"] = \
data_loader.sequences[split]["total"].index_select(
0, torch.LongTensor(indices))
data_loader.data[split]["total"] = [data_loader.data[split]["total"][i]
for i in indices]
def make_attention_mask(sequences):
return (sequences != 0).float().to(cfg.device)
def make_loss_mask(sequences, max_event, num_delim_tokens):
# print(num_delim_tokens)
# print(sequences.size())
mask = (sequences != 0).float()
mask[:, :max_event + num_delim_tokens] = 0
return mask[:, 1:].to(cfg.device)
def find_underscore_length(seq):
start = "_"
while start in seq:
start += "_"
return start[:-1]
def handle_underscores(suffix, text_encoder, prefix=False):
encoder = text_encoder.encoder
if prefix:
tok = "___"
else:
tok = find_underscore_length(suffix)
suffix_parts = [i.strip() for i in suffix.split("{}".format(tok))]
to_flatten = []
for i, part in enumerate(suffix_parts):
if part:
to_flatten.append(text_encoder.encode([part], verbose=False)[0])
if i != len(suffix_parts) - 1 and suffix_parts[i+1]:
to_flatten.append([encoder["<blank>"]])
else:
to_flatten.append([encoder["<blank>"]])
final_suffix = utils.flatten(to_flatten)
return final_suffix
def get_generation_sequences(opt, data, split, text_encoder, test):
sequences = []
count = 0
final_prefix = None
final_suffix = None
for prefix, category, suffix in tqdm(data[split]["total"]):
final_prefix, final_suffix = do_example(
text_encoder, prefix, suffix, True, True)
# if do_prefix:
# if "___" in prefix:
# final_prefix = handle_underscores(prefix, text_encoder, True)
# else:
# final_prefix = text_encoder.encode([prefix], verbose=False)[0]
# if do_suffix:
# if "_" in suffix:
# final_suffix = handle_underscores(suffix, text_encoder)
# else:
# final_suffix = text_encoder.encode([suffix], verbose=False)[0]
final = compile_final_sequence(
opt, final_prefix, final_suffix, category, text_encoder)
sequences.append(final)
count += 1
if count > 10 and test:
break
return sequences
def do_example(text_encoder, prefix, suffix, do_prefix, do_suffix):
final_prefix = None
final_suffix = None
if do_prefix:
if "___" in prefix:
final_prefix = handle_underscores(prefix, text_encoder, True)
else:
final_prefix = text_encoder.encode([prefix], verbose=False)[0]
if do_suffix:
if "_" in suffix:
final_suffix = handle_underscores(suffix, text_encoder)
else:
final_suffix = text_encoder.encode([suffix], verbose=False)[0]
return final_prefix, final_suffix
def compile_final_sequence(opt, final_prefix, final_suffix, category, text_encoder):
final = []
final.append(final_prefix)
final.append(
[text_encoder.encoder[category]]
+ final_suffix)
final[-1].append(text_encoder.encoder["<END>"])
return final
num_delimiter_tokens = {
"category": 1,
"hierarchy": 3,
"hierarchy+label": 4,
"category+hierarchy": 4,
"category+hierarchy+label": 5
}
| comet-public-master | comet/data/atomic.py |
import os
import comet.data.atomic as atomic_data
import comet.data.conceptnet as conceptnet_data
import comet.data.config as cfg
import comet.utils as utils
import pickle
import torch
import json
start_token = "<START>"
end_token = "<END>"
blank_token = "<blank>"
def save_checkpoint(state, filename):
print("Saving model to {}".format(filename))
torch.save(state, filename)
def save_step(model, vocab, optimizer, opt, length, lrs):
if cfg.test_save:
name = "{}.pickle".format(utils.make_name(
opt, prefix="garbage/models/", is_dir=False, eval_=True))
else:
name = "{}.pickle".format(utils.make_name(
opt, prefix="models/", is_dir=False, eval_=True))
save_checkpoint({
"epoch": length, "state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(), "opt": opt,
"vocab": vocab, "epoch_learning_rates": lrs},
name)
def save_eval_file(opt, stats, eval_type="losses", split="dev", ext="pickle"):
if cfg.test_save:
name = "{}/{}.{}".format(utils.make_name(
opt, prefix="garbage/{}/".format(eval_type),
is_dir=True, eval_=True), split, ext)
else:
name = "{}/{}.{}".format(utils.make_name(
opt, prefix="results/{}/".format(eval_type),
is_dir=True, eval_=True), split, ext)
print("Saving {} {} to {}".format(split, eval_type, name))
if ext == "pickle":
with open(name, "wb") as f:
pickle.dump(stats, f)
elif ext == "txt":
with open(name, "w") as f:
f.write(stats)
elif ext == "json":
with open(name, "w") as f:
json.dump(stats, f)
else:
raise
def load_checkpoint(filename, gpu=True):
if os.path.exists(filename):
checkpoint = torch.load(
filename, map_location=lambda storage, loc: storage)
else:
print("No model found at {}".format(filename))
return checkpoint
def make_data_loader(opt, *args):
if opt.dataset == "atomic":
return atomic_data.GenerationDataLoader(opt, *args)
elif opt.dataset == "conceptnet":
return conceptnet_data.GenerationDataLoader(opt, *args)
def set_max_sizes(data_loader, force_split=None):
data_loader.total_size = {}
if force_split is not None:
data_loader.total_size[force_split] = \
data_loader.sequences[force_split]["total"].size(0)
return
for split in data_loader.sequences:
data_loader.total_size[split] = \
data_loader.sequences[split]["total"].size(0)
| comet-public-master | comet/data/data.py |
__version__ = "0.0.1a"
| allentune-master | __init__.py |
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py and setup.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi allennlp
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from setuptools import find_packages, setup
setup(
name="allentune",
version="0.0.1a",
author="Suchin Gururangan",
author_email="[email protected]",
description="Hyperparameter tuning for AllenNLP",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP hyperparameter tuning",
license="Apache",
url="https://github.com/kernelmachine/allentune",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=["ray==0.8.6",
"allennlp==1.0.0",
"tabulate",
"seaborn",
"pandas",
"pytest",
"pytest-cov"],
entry_points={"console_scripts": ["allentune=allentune.__main__:main"]},
python_requires=">=3.6.0",
tests_require=["pytest"],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| allentune-master | setup.py |
allentune-master | allentune/__init__.py |
|
#!/usr/bin/env python
import logging
import os
import sys
if os.environ.get("ALLENTUNE_DEBUG"):
LEVEL = logging.DEBUG
else:
LEVEL = logging.INFO
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=LEVEL)
from allentune.commands import main # pylint: disable=wrong-import-position
def run():
main(prog="allentune")
if __name__ == "__main__":
run() | allentune-master | allentune/__main__.py |
allentune-master | allentune/util/__init__.py |
|
import logging
import os
from typing import Any, Dict, List, Union
import numpy as np
import ray
# Create a custom logger
logger = logging.getLogger(__name__)
class RandomSearch:
@staticmethod
def random_choice(args: List[Any], n: int = 1):
"""
pick a random element from a set.
Example:
>> sampler = RandomSearch.random_choice(1,2,3)
>> sampler()
2
"""
choices = []
for arg in args:
choices.append(arg)
if n == 1:
return lambda: np.random.choice(choices, replace=False)
else:
return lambda: np.random.choice(choices, n, replace=False)
@staticmethod
def random_integer(low: Union[int, float], high: Union[int, float]):
"""
pick a random integer between two bounds
Example:
>> sampler = RandomSearch.random_integer(1, 10)
>> sampler()
9
"""
return lambda: int(np.random.randint(low, high))
@staticmethod
def random_loguniform(low: Union[float, int], high: Union[float, int]):
"""
pick a random float between two bounds, using loguniform distribution
Example:
>> sampler = RandomSearch.random_loguniform(1e-5, 1e-2)
>> sampler()
0.0004
"""
return lambda: np.exp(np.random.uniform(np.log(low), np.log(high)))
@staticmethod
def random_uniform(low: Union[float, int], high: Union[float, int]):
"""
pick a random float between two bounds, using uniform distribution
Example:
>> sampler = RandomSearch.random_uniform(0, 1)
>> sampler()
0.01
"""
return lambda: np.random.uniform(low, high)
class HyperparameterSearch:
def __init__(self, **kwargs):
self.search_space = {}
self.lambda_ = lambda: 0
for key, val in kwargs.items():
self.search_space[key] = val
def parse(self, val: Any):
if isinstance(val, type(lambda x: x)):
val = val()
if isinstance(val, (int, np.int)):
return int(val)
elif isinstance(val, (float, np.float)):
return val
elif isinstance(val, (np.ndarray, list)):
return " ".join(val)
else:
return val
elif isinstance(val, (int, np.int)):
return int(val)
elif isinstance(val, (float, np.float)):
return val
elif isinstance(val, (np.ndarray, list)):
return " ".join(val)
elif val is None:
return None
else:
return val
def sample(self) -> Dict:
res = {}
for key, val in self.search_space.items():
try:
res[key] = self.parse(val)
except TypeError as error:
logger.error(f"Could not parse key {key} with value {val}. {error}")
return res
def update_environment(self, sample) -> None:
for key, val in sample.items():
os.environ[key] = str(val)
| allentune-master | allentune/util/random_search.py |
from allentune.runners.runner import Runner
from allentune.runners.allennlp_runner import AllenNlpRunner
| allentune-master | allentune/runners/__init__.py |
import argparse
import glob
import json
import logging
import os
import re
import sys
from collections import ChainMap
from typing import Optional
import pandas as pd
from allentune.commands.subcommand import Subcommand
logger = logging.getLogger(__name__)
class Merge(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
subparser = parser.add_parser(
name, description="generate report from experiment", help='Generate a report from hyperparameter search experiments.')
subparser.add_argument(
"--input-files",
nargs="+",
type=str,
required=True,
)
subparser.add_argument(
'--output-file',
type=str,
required=True,
)
subparser.set_defaults(func=merge_reports)
return subparser
def merge_reports(args: argparse.Namespace):
dfs = []
for file in args.input_files:
dfs.append(pd.read_json(file, lines=True))
master = pd.concat(dfs, 0)
try:
os.makedirs(os.path.dirname(args.output_file))
except FileExistsError:
logger.error(f"{args.output_file} already exists, aborting.")
master.to_json(args.output_file, lines=True, orient='records')
logger.info(f"Merged files in {args.output_file}.") | allentune-master | allentune/commands/merge.py |
import argparse
import datetime
import glob
import json
import os
from collections import ChainMap
from typing import Dict, List, Optional, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
from matplotlib.ticker import ScalarFormatter
from allentune.commands.subcommand import Subcommand
sns.set_style("white")
class Plot(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
subparser = parser.add_parser(
name, description="generate report from experiment", help='Plot expected validation accuracy curves.')
subparser.add_argument(
"--result-file",
type=str,
required=True
)
subparser.add_argument(
"--output-file",
type=str,
required=True
)
subparser.add_argument(
"--linestyle",
type=str,
required=False,
default="-"
)
subparser.add_argument(
"--logx",
action="store_true"
)
subparser.add_argument(
"--duration-field",
type=str,
required=False,
default="training_duration"
)
subparser.add_argument(
"--performance-metric-field",
type=str,
required=False,
default="best_validation_accuracy"
)
subparser.add_argument(
"--model-field",
type=str,
required=False,
default="model"
)
subparser.add_argument(
"--plot-errorbar",
action="store_true"
)
subparser.add_argument(
"--show-xticks",
action="store_true"
)
subparser.add_argument(
"--legend-location",
type=str,
required=False,
default="lower right"
)
subparser.add_argument(
"--x-axis-time",
action="store_true"
)
subparser.add_argument(
"--linewidth",
type=int,
required=False,
default=3
)
subparser.add_argument(
"--relabel-logx-scalar",
type=list,
required=False,
default=None
)
subparser.add_argument(
"--x-axis-rot",
type=float,
required=False,
default=0.0
)
subparser.add_argument(
"--data-name",
type=str,
required=True,
)
subparser.add_argument(
'--performance-metric',
required=False,
type=str,
default="accuracy"
)
subparser.add_argument(
"--fontsize",
type=int,
required=False,
default=24
)
subparser.add_argument(
"--subplots",
nargs=2,
type=int,
required=True
)
subparser.add_argument(
"--figsize",
nargs=2,
type=int,
required=True
)
subparser.set_defaults(func=plotter)
return subparser
def _cdf_with_replacement(i,n,N):
return (i/N)**n
def _cdf_without_replacement(i,n,N):
return scipy.special.comb(i,n) / scipy.special.comb(N,n)
def _compute_variance(N, cur_data, expected_max_cond_n, pdfs):
"""
this computes the standard error of the max.
this is what the std dev of the bootstrap estimates of the mean of the max converges to, as
is stated in the last sentence of the summary on page 10 of http://www.stat.cmu.edu/~larry/=stat705/Lecture13.pdf
uses equation
"""
variance_of_max_cond_n = []
for n in range(N):
# for a given n, estimate variance with \sum(p(x) * (x-mu)^2), where mu is \sum(p(x) * x).
cur_var = 0
for i in range(N):
cur_var += (cur_data[i] - expected_max_cond_n[n])**2 * pdfs[n][i]
cur_var = np.sqrt(cur_var)
variance_of_max_cond_n.append(cur_var)
return variance_of_max_cond_n
# this implementation assumes sampling with replacement for computing the empirical cdf
def samplemax(validation_performance, with_replacement=True):
validation_performance = list(validation_performance)
validation_performance.sort()
N = len(validation_performance)
pdfs = []
for n in range(1,N+1):
# the CDF of the max
F_Y_of_y = []
for i in range(1,N+1):
if with_replacement:
F_Y_of_y.append(_cdf_with_replacement(i,n,N))
else:
F_Y_of_y.append(_cdf_without_replacement(i,n,N))
f_Y_of_y = []
cur_cdf_val = 0
for i in range(len(F_Y_of_y)):
f_Y_of_y.append(F_Y_of_y[i] - cur_cdf_val)
cur_cdf_val = F_Y_of_y[i]
pdfs.append(f_Y_of_y)
expected_max_cond_n = []
for n in range(N):
# for a given n, estimate expected value with \sum(x * p(x)), where p(x) is prob x is max.
cur_expected = 0
for i in range(N):
cur_expected += validation_performance[i] * pdfs[n][i]
expected_max_cond_n.append(cur_expected)
var_of_max_cond_n = _compute_variance(N, validation_performance, expected_max_cond_n, pdfs)
return {"mean":expected_max_cond_n, "var":var_of_max_cond_n, "max": np.max(validation_performance)}
def td_format(td_object):
seconds = int(td_object.total_seconds())
periods = [
('yr', 60*60*24*365),
('mo', 60*60*24*30),
('d', 60*60*24),
('h', 60*60),
('min', 60),
('sec', 1)
]
strings=[]
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value , seconds = divmod(seconds, period_seconds)
has_s = 's' if period_value > 1 and period_name not in ['min', 'sec', 'd', 'h'] else ''
strings.append("%s%s%s" % (period_value, period_name, has_s))
res = ", ".join(strings)
if res == '60min':
res = '1h'
elif res == '24h':
res = '1d'
elif res == '30d':
res = '1mo'
return res
def _one_plot(
data: pd.DataFrame,
avg_time: pd.DataFrame,
data_size: int,
cur_ax: matplotlib.axis,
data_name: str = "SST5",
linestyle: str = "-",
linewidth: int = 3,
logx: bool = False,
plot_errorbar: bool = False,
errorbar_kind: str = 'shade',
errorbar_alpha: float = 0.1,
x_axis_time: bool = False,
legend_location: str = 'lower right',
relabel_logx_scalar: List[int] = None,
rename_labels: Dict[str, str] = None,
reported_accuracy: List[float] = None,
encoder_name: str = None,
show_xticks: bool = False,
fontsize: int = 16,
xlim: List[int] = None,
model_order: List[str] = None,
performance_metric: str = "accuracy",
x_axis_rot: int = 0,
line_colors: List[str] = ["#8c564b", '#1f77b4', '#ff7f0e', '#17becf'],
errorbar_colors: List[str] = ['#B22222', "#089FFF", "#228B22"]):
cur_ax.set_title(data_name, fontsize=fontsize)
if model_order:
models = model_order
else:
models = data.index.levels[0].tolist()
models.sort()
max_first_point = 0
cur_ax.set_ylabel("Expected validation " + performance_metric, fontsize=fontsize)
if x_axis_time:
cur_ax.set_xlabel("Training duration",fontsize=fontsize)
else:
cur_ax.set_xlabel("Hyperparameter assignments",fontsize=fontsize)
if logx:
cur_ax.set_xscale('log')
for ix, model in enumerate(models):
means = data[model]['mean']
vars = data[model]['var']
max_acc = data[model]['max']
if x_axis_time:
x_axis = [avg_time[model] * (i+1) for i in range(len(means))]
else:
x_axis = [i+1 for i in range(len(means))]
if rename_labels:
model_name = rename_labels.get(model, model)
else:
model_name = model
if reported_accuracy:
cur_ax.plot([0, 6.912e+6],
[reported_accuracy[model],
reported_accuracy[model]],
linestyle='--',
linewidth=linewidth,
color=line_colors[ix])
plt.text(6.912e+6-3600000,
reported_accuracy[model] + 0.01,
f'reported {model_name} {performance_metric}',
ha='right',
style='italic',
fontsize=fontsize-5,
color=line_colors[ix])
if encoder_name:
model_name = encoder_name + " " + model_name
if plot_errorbar:
if errorbar_kind == 'shade':
minus_vars = np.array(means)-np.array(vars)
plus_vars = [x + y if (x + y) <= max_acc else max_acc for x,y in zip(means, vars)]
plt.fill_between(x_axis,
minus_vars,
plus_vars,
alpha=errorbar_alpha,
facecolor=errorbar_colors[ix])
else:
line = cur_ax.errorbar(x_axis,
means,
yerr=vars,
label=model_name,
linestyle=linestyle,
linewidth=linewidth,
color=line_colors[ix])
line = cur_ax.plot(x_axis,
means,
label=model_name,
linestyle=linestyle,
linewidth=linewidth,
color=line_colors[ix])
left, right = cur_ax.get_xlim()
if xlim:
cur_ax.set_xlim(xlim)
# cur_ax.xaxis.set_ticks(np.arange(xlim[0], xlim[1]+5, 10))
for tick in cur_ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for tick in cur_ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
plt.locator_params(axis='y', nbins=10)
if relabel_logx_scalar:
for axis in [cur_ax.xaxis]:
axis.set_ticks(relabel_logx_scalar)
axis.set_major_formatter(ScalarFormatter())
plt.xticks(rotation=x_axis_rot)
if show_xticks:
cur_ax.tick_params(which="both", bottom=True)
if x_axis_time:
def timeTicks(x, pos):
d = datetime.timedelta(seconds=float(x))
d = td_format(d)
return str(d)
formatter = matplotlib.ticker.FuncFormatter(timeTicks)
cur_ax.xaxis.set_major_formatter(formatter)
cur_ax.legend(loc=legend_location, fontsize=fontsize)
plt.tight_layout()
def plotter(args: argparse.Namespace):
config = vars(args)
subplots = tuple(config.pop("subplots"))
figsize = tuple(config.pop("figsize"))
_ = config.pop('func')
expected_max_performance_data = {}
average_times = {}
output_file = config.pop("output_file")
config = {config.pop("result_file"): config}
f, axes = plt.subplots(subplots[0], subplots[1], figsize=figsize)
if subplots != (1, 1):
axes_iter = zip(config.items(), np.ndenumerate(axes))
else:
axes_iter = zip(config.items(), enumerate([axes]))
for ((data_file, _), (index, _)) in axes_iter:
duration_field = config[data_file].pop('duration_field')
model_field = config[data_file].pop('model_field')
performance_metric_field = config[data_file].pop('performance_metric_field')
master = pd.read_json(data_file, lines=True)
data_sizes = [10000]
for data_size in data_sizes:
df = master
avg_time = df.groupby(model_field)[duration_field].mean()
sample_maxes = df.groupby(model_field)[performance_metric_field].apply(samplemax)
expected_max_performance_data[data_file] = {data_size: sample_maxes}
average_times[data_file] = {data_size: avg_time}
if subplots == (1,1):
axis = axes
elif subplots[1] > 1:
axis = axes[index[0], index[1]]
else:
axis = axes[index[0]]
_one_plot(sample_maxes,
avg_time,
data_size,
axis,
**config[data_file])
print("saving to {}".format(output_file))
plt.savefig(output_file, dpi=300)
| allentune-master | allentune/commands/plot.py |
from typing import Dict
import argparse
import logging
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allentune.commands.report import Report
from allentune.commands.search import Search
from allentune.commands.plot import Plot
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ArgumentParserWithDefaults(argparse.ArgumentParser):
"""
Custom argument parser that will display the default value for an argument
in the help message.
"""
_action_defaults_to_ignore = {"help", "store_true", "store_false", "store_const"}
@staticmethod
def _is_empty_default(default):
if default is None:
return True
if isinstance(default, (str, list, tuple, set)):
return not bool(default)
return False
@overrides
def add_argument(self, *args, **kwargs):
# pylint: disable=arguments-differ
# Add default value to the help message when the default is meaningful.
default = kwargs.get("default")
if kwargs.get("action") not in self._action_defaults_to_ignore and not self._is_empty_default(default):
description = kwargs.get("help") or ""
kwargs["help"] = f"{description} (default = {default})"
super().add_argument(*args, **kwargs)
def main(prog: str = None) -> None:
"""
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag.
"""
# pylint: disable=dangerous-default-value
parser = ArgumentParserWithDefaults(description="Run AllenTune", usage='%(prog)s', prog=prog)
subparsers = parser.add_subparsers(title='Commands', metavar='')
subcommands = {
# Default commands
"search": Search(),
"report": Report(),
"plot": Plot()
}
for name, subcommand in subcommands.items():
subparser = subcommand.add_subparser(name, subparsers)
args = parser.parse_args()
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if 'func' in dir(args):
args.func(args)
else:
parser.print_help() | allentune-master | allentune/commands/__init__.py |
#!/usr/bin/env python
import sys
import logging
import os
import argparse
from allentune.modules import AllenNlpRunner
from allentune.modules import RayExecutor
from allentune.commands.subcommand import Subcommand
if os.environ.get("ALLENTUNE_DEBUG"):
LEVEL = logging.DEBUG
else:
LEVEL = logging.INFO
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL
)
class Search(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
subparser = parser.add_parser(
name, description="search with RayTune", help='Perform hyperparameter search')
subparser.add_argument(
"--experiment-name",
type=str,
required=True,
help="a name for the experiment",
)
subparser.add_argument(
"--num-cpus",
type=int,
default=1,
help="number of CPUs available to the experiment",
)
subparser.add_argument(
"--num-gpus",
type=int,
default=1,
help="number of GPUs available to the experiment",
)
subparser.add_argument(
"--cpus-per-trial",
type=int,
default=1,
help="number of CPUs dedicated to a single trial",
)
subparser.add_argument(
"--gpus-per-trial",
type=int,
default=1,
help="number of GPUs dedicated to a single trial",
)
subparser.add_argument(
"--log-dir",
type=str,
default="./logs",
help="directory in which to store trial logs and results",
)
subparser.add_argument(
"--with-server",
action="store_true",
default=False,
help="start the Ray server",
)
subparser.add_argument(
"--server-port",
type=int,
default=10000,
help="port for Ray server to listens on",
)
subparser.add_argument(
"--search-strategy",
type=str,
default="variant-generation",
help="hyperparameter search strategy used by Ray-Tune",
)
subparser.add_argument(
"--search-space",
"-e",
type=os.path.abspath,
required=True,
help="name of dict describing the hyperparameter search space",
)
subparser.add_argument(
"--num-samples",
type=int,
default=1,
help="Number of times to sample from the hyperparameter space. "
+ "If grid_search is provided as an argument, the grid will be "
+ "repeated num_samples of times.",
)
subparser.add_argument(
"--base-config",
dest='base_config',
required=True,
type=os.path.abspath,
help="path to parameter file describing the model to be trained",
)
subparser.add_argument(
"--include-package",
type=str,
action="append",
default=[],
help="additional packages to include",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help="a JSON structure used to override the experiment configuration",
)
subparser.set_defaults(func=search_from_args)
return subparser
def search_from_args(args: argparse.Namespace):
runner = AllenNlpRunner()
executor = RayExecutor(runner)
executor.run(args)
| allentune-master | allentune/commands/search.py |
"""
Base class for subcommands under ``allentune.run``.
"""
import argparse
class Subcommand:
"""
An abstract class representing subcommands for allentune.run.
If you wanted to (for example) create your own custom `special-evaluate` command to use like
``allentune special-evaluate ...``
you would create a ``Subcommand`` subclass and then pass it as an override to
:func:`~allennlp.commands.main` .
"""
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
raise NotImplementedError
| allentune-master | allentune/commands/subcommand.py |
import argparse
import glob
import json
import logging
import os
import re
import sys
from collections import ChainMap
from typing import Optional
import pandas as pd
from tabulate import tabulate
from allentune.commands.subcommand import Subcommand
logger = logging.getLogger(__name__)
class Report(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
subparser = parser.add_parser(
name, description="generate report from experiment", help='Generate a report from hyperparameter search experiments.')
subparser.add_argument(
"--log-dir",
required=True,
)
subparser.add_argument(
'--performance-metric',
required=False,
type=str
)
subparser.add_argument(
'--model',
required=False,
type=str
)
subparser.set_defaults(func=generate_report)
return subparser
def generate_report(args: argparse.Namespace):
experiment_dir = os.path.abspath(args.log_dir)
dirs = glob.glob(experiment_dir + '/run_*/trial/')
master = []
for dir in dirs:
try:
with open(os.path.join(dir, "metrics.json"), 'r') as metrics_file:
metric = json.load(metrics_file)
with open(os.path.join(dir, "config.json"), 'r') as config_file:
config = json.load(config_file)
with open(os.path.join(dir, "stdout.log"), 'r') as stdout_file:
stdout = stdout_file.read()
random_seed = re.search("random_seed = (\d+)", stdout)
pytorch_seed = re.search("pytorch_seed = (\d+)", stdout)
numpy_seed = re.search("numpy_seed = (\d+)", stdout)
if random_seed:
seeds = {"random_seed": random_seed.group(1), "pytorch_seed": pytorch_seed.group(1), "numpy_seed": numpy_seed.group(1)}
else:
seeds = {"random_seed": None, "pytorch_seed": None, "numpy_seed": None}
directory = {"directory": dir}
master.append((metric, config, seeds, directory))
except:
continue
master_dicts = [dict(ChainMap(*item)) for item in master]
df = pd.json_normalize(master_dicts)
try:
df['training_duration'] = pd.to_timedelta(df['training_duration']).dt.total_seconds()
except KeyError:
logger.error(f"No finished experiments found in {args.log_dir}")
sys.exit(0)
if args.model:
df['model'] = args.model
output_file = os.path.join(experiment_dir, "results.jsonl")
df.to_json(output_file, lines=True, orient='records')
logger.info("results written to {}".format(output_file))
try:
best_performance = df[args.performance_metric].max()
median_performance = df[args.performance_metric].median()
worst_performance = df[args.performance_metric].min()
mean_performance = df[args.performance_metric].mean()
std_performance = df[args.performance_metric].std()
iqr_performance = df[args.performance_metric].quantile(0.75) - df[args.performance_metric].quantile(0.25)
except KeyError:
logger.error(f"No performance metric {args.performance_metric} found in results of {args.log_dir}")
sys.exit(0)
results = [
["Model Name", args.model],
["Performance Metric", args.performance_metric],
['Total Experiments', f"{df.shape[0]}"],
["Best Performance", f"{best_performance}"],
["Min Performance", f"{median_performance} +- {iqr_performance}"],
["Mean +- STD Performance", f"{mean_performance} +- {std_performance}"],
["Median +- IQR Performance", f"{median_performance} +- {iqr_performance}"],
["Best Model Directory Path", f"{df.iloc[df[args.performance_metric].idxmax()]['directory']}"],
]
logger.info('\n' + tabulate(results))
| allentune-master | allentune/commands/report.py |
import argparse
import json
import logging
import os
import random
from typing import Any, Callable, Dict, Optional
import numpy as np
import ray
from ray.tune import function, register_trainable, run_experiments, sample_from
from ray.tune.function_runner import StatusReporter
from allentune.modules.allennlp_runner import AllenNlpRunner
from allentune.util.random_search import RandomSearch
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RayExecutor(object):
name = "Ray"
def __init__(self, runner: AllenNlpRunner) -> None:
self._runner = runner
def parse_search_config(self, search_config: Dict) -> Dict:
for hyperparameter, val in search_config.items():
if not isinstance(val, dict):
ray_sampler = val
elif val['sampling strategy'] == 'loguniform':
low, high = val['bounds'][0], val['bounds'][1]
ray_sampler = RandomSearch.random_loguniform(low, high)
elif val['sampling strategy'] == 'integer':
low, high = val['bounds'][0], val['bounds'][1]
ray_sampler = RandomSearch.random_integer(low, high)
elif val['sampling strategy'] == 'choice':
ray_sampler = RandomSearch.random_choice(val['choices'])
elif val['sampling strategy'] == 'uniform':
low, high = val['bounds'][0], val['bounds'][1]
ray_sampler = RandomSearch.random_uniform(low, high)
else:
raise KeyError(f"sampling strategy {val['sampling strategy']} does not exist")
search_config[hyperparameter] = ray_sampler
return search_config
def run_distributed(
self,
run_func: Callable[[Dict[str, Any], StatusReporter], None],
args: argparse.Namespace,
) -> None:
logger.info(
f"Init Ray with {args.num_cpus} CPUs "
+ f"and {args.num_gpus} GPUs."
)
ray.init(num_cpus=args.num_cpus, num_gpus=args.num_gpus)
run_func = self._runner.get_run_func(args)
register_trainable("run", run_func)
with open(args.search_space) as f:
search_config = json.load(f)
search_config = self.parse_search_config(search_config)
experiments_config = {
args.experiment_name: {
"run": "run",
"resources_per_trial": {
"cpu": args.cpus_per_trial,
"gpu": args.gpus_per_trial,
},
"config": search_config,
"local_dir": args.log_dir,
"num_samples": args.num_samples,
}
}
logger.info(f"Run Configuration: {experiments_config}")
try:
run_experiments(
experiments=experiments_config,
scheduler=None,
with_server=args.with_server,
server_port=args.server_port,
)
except ray.tune.TuneError as e:
logger.error(
f"Error during run of experiment '{args.experiment_name}': {e}"
)
def run(self, args: argparse.Namespace) -> None:
setattr(args, "cwd", os.getcwd())
run_func = self._runner.get_run_func(args)
self.run_distributed(run_func, args) | allentune-master | allentune/modules/ray_executor.py |
from allentune.modules.allennlp_runner import AllenNlpRunner
from allentune.modules.ray_executor import RayExecutor | allentune-master | allentune/modules/__init__.py |
import argparse
import glob
import json
import logging
import os
from collections import ChainMap
from datetime import datetime
from typing import Optional
import pandas as pd
import torch
from allennlp.commands.train import train_model
from allennlp.common.params import Params, parse_overrides, with_fallback
from allennlp.common.util import import_module_and_submodules
import _jsonnet
from allentune.util.random_search import HyperparameterSearch
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class AllenNlpRunner(object):
name = "AllenNLP"
def get_run_func(
self,
args: argparse.Namespace,
):
if args is None:
raise ValueError("No run arguments found for AllenNLP runner.")
with open(args.base_config, "r") as parameter_f:
parameter_file_snippet = parameter_f.read()
def train_func(config, reporter):
logger.debug(f"CUDA_VISIBLE_DEVICES: {os.environ['CUDA_VISIBLE_DEVICES']}")
for package_name in getattr(args, "include_package", ()):
import_module_and_submodules(package_name)
search_space = HyperparameterSearch(**config)
sample = search_space.sample()
for k, v in sample.items():
config[k] = str(v)
params_dict = json.loads(
_jsonnet.evaluate_snippet(
"config", parameter_file_snippet, tla_codes={}, ext_vars=config
)
)
if args.num_gpus == 0:
logger.warning(f"No GPU specified, using CPU.")
params_dict["trainer"]["cuda_device"] = -1
if args.cpus_per_trial > 0:
torch.set_num_threads(args.cpus_per_trial)
params = Params(params_dict)
logger.debug(f"AllenNLP Configuration: {params.as_dict()}")
train_model(params=params, serialization_dir="trial")
reporter(done=True)
return train_func
| allentune-master | allentune/modules/allennlp_runner.py |
from allentune.modules import AllenNlpRunner, RayExecutor
import pytest
import argparse
import os
import shutil
import pathlib
class TestExampleRun(object):
def test_run(self):
runner = AllenNlpRunner()
executor = RayExecutor(runner)
args = argparse.Namespace()
PROJECT_ROOT = (pathlib.Path(__file__).parent / ".." / "..").resolve() # pylint: disable=no-member
MODULE_ROOT = PROJECT_ROOT / "allentune"
TESTS_ROOT = MODULE_ROOT / "tests"
FIXTURES_ROOT = TESTS_ROOT / "fixtures"
args.experiment_name = "test"
args.num_cpus = 1
args.num_gpus = 0
args.cpus_per_trial = 1
args.gpus_per_trial = 0
args.base_config = FIXTURES_ROOT / "classifier.jsonnet"
args.search_space = FIXTURES_ROOT / "search_space.json"
args.log_dir = TESTS_ROOT / "logs"
args.num_samples = 1
args.with_server = False
args.server_port = 1000
args.search_strategy = "variant-generation"
executor.run(args)
assert os.path.isdir(TESTS_ROOT / "logs")
shutil.rmtree(TESTS_ROOT / "logs/") | allentune-master | tests/test_example_run.py |
from allentune.util.random_search import RandomSearch
import pytest
import numpy as np
import string
class TestRandomSearch(object):
def test_random_choice(self):
random_search = RandomSearch()
items = range(100)
sampler = random_search.random_choice(items)
for _ in range(3):
res = sampler()
assert res in items
def test_random_integer(self):
random_search = RandomSearch()
lower_bound = np.random.choice(range(100))
upper_bound = np.random.choice(range(100, 200))
sampler = random_search.random_integer(lower_bound, upper_bound)
for _ in range(3):
res = sampler()
assert res >= lower_bound and res <= upper_bound
def test_random_loguniform(self):
random_search = RandomSearch()
sampler = random_search.random_loguniform(1e-5, 1e-1)
for _ in range(3):
res = sampler()
assert res >= 1e-5 and res <= 1e-1
def test_random_uniform(self):
random_search = RandomSearch()
sampler = random_search.random_uniform(0, 1)
for _ in range(3):
res = sampler()
assert res >= 0 and res <= 1 | allentune-master | tests/test_random_search.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'allenai/alexafsm'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| alexafsm-master | travis_pypi_setup.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
with open('HISTORY.md') as history_file:
history = history_file.read()
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
readme = ''
requirements = [
'transitions==0.5.0',
'voicelabs==0.0.10'
]
test_requirements = [
'elasticsearch==5.1.0',
'elasticsearch-dsl==5.1.0'
]
setup(
name='alexafsm',
version='0.1.11',
description="Finite-state machine library for building complex Alexa conversations",
long_description=readme + '\n\n' + history,
author="Allen AI",
author_email='[email protected]',
url='https://github.com/allenai/alexafsm',
packages=[
'alexafsm',
],
package_dir={'alexafsm':
'alexafsm'},
include_package_data=True,
install_requires=requirements,
license="Apache Software License 2.0",
zip_safe=False,
keywords='alexafsm, alexa skill, finite-state machine, fsm, dialog, dialog state management',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
| alexafsm-master | setup.py |
import pytest
import json
from tests.skillsearch.policy import Policy
from alexafsm.utils import validate, events_states_transitions, unused_events_states_transitions
from alexafsm.test_helpers import get_requests_responses
from tests.skillsearch.skill_settings import SkillSettings
def test_validate_policy():
policy = Policy.initialize()
validate(policy=policy,
schema_file='./tests/skillsearch/speech/alexa-schema.json',
ignore_intents={'DontUnderstand'})
policy_states = policy.machine.states
policy_stop_states = \
policy.states.EXIT_ON_STOP_STATES + \
policy.states.CONTINUE_ON_STOP_STATES + \
policy.states.PROMPT_ON_STOP_STATES
# "exiting" state does not need any outgoing transitions
missing = set(policy_states) - set(policy_stop_states) - {'exiting'}
assert not missing, f'Some states do not handle STOP/CANCEL intents: {missing}'
def the_test_playback(measure_coverage: bool = False):
"""Play back recorded responses to check that the system is still behaving the same
Change to test_playback to actually run this test once a recording is made."""
policy = Policy.initialize()
SkillSettings().playback = True
record_file = SkillSettings().get_record_file()
for request, expected_response in get_requests_responses(record_file):
actual_response = json.loads(json.dumps(policy.handle(request)))
assert expected_response == actual_response
if measure_coverage:
policy = SkillSettings().get_policy()
all_events, all_states, all_transitions = events_states_transitions(policy)
unused_events, unused_states, unused_transitions = \
unused_events_states_transitions(policy, get_requests_responses(record_file))
print(f"Summary: "
f"{len(unused_events)}/{len(all_events)} unused events, "
f"{len(unused_states)}/{len(all_states)} unused states, "
f"{len(unused_transitions)}/{len(all_transitions)} unused transitions ")
print(f"Unused events: {unused_events}")
print(f"Unused states: {unused_states}")
print(f"Unused transitions: {unused_transitions}")
if __name__ == '__main__':
pytest.main([__file__])
| alexafsm-master | tests/test_skillsearch.py |
from collections import namedtuple
from alexafsm.session_attributes import SessionAttributes as ISessionAttributes, INITIAL_STATE
import alexafsm.make_json_serializable # NOQA
Slots = namedtuple('Slots', ['love', 'money'])
class SessionAttributes(ISessionAttributes):
slots_cls = Slots
not_sent_fields = ['intent']
request = {
'session': {
'attributes': {
'state': 'blissful',
'slots': ['loving', 'null']
},
},
'request': {
'type': 'IntentRequest',
'intent': {
'name': 'Search',
'slots': {
'Love': {
'name': 'Love'
},
'Money': {
'name': 'Money',
'value': 'lots'
}
}
}
}
}
def test_none_request():
s = SessionAttributes.from_request(None)
assert s.intent is None
assert s.state == INITIAL_STATE
assert s.slots == Slots(love=None, money=None)
def test_request():
s = SessionAttributes.from_request(request)
assert s.intent == 'Search'
assert s.slots == Slots(love=None, money='lots')
assert s.state == 'blissful'
def test_json_to_alexa():
s = SessionAttributes.from_request(request)
js = s.to_json()
assert 'intent' not in js
assert js['state'] == 'blissful'
assert js['slots'] == Slots(love=None, money='lots')
def test_json_to_alexa_and_back():
import json
s = SessionAttributes.from_request(request)
js = json.dumps(s)
request2 = {
'request': {'intent': {'name': 'foo'}},
'session': {'attributes': json.loads(js)}
}
s2 = SessionAttributes.from_request(request2)
assert s2.intent == 'foo'
assert s2.state == s.state
assert s2.slots == Slots(love=None, money=None) # new request has no slots
def test_empty_attributes():
import json
empty_attrs_request = {
'session': {
'attributes': {},
},
'request': {
'type': 'IntentRequest',
'intent': {
'name': 'Search',
'slots': {
'Love': {
'name': 'Love'
},
'Money': {
'name': 'Money',
'value': 'lots'
}
}
}
}
}
s = SessionAttributes.from_request(empty_attrs_request)
js = json.dumps(s)
request2 = {
'request': {'intent': {'name': 'foo'}},
'session': {'attributes': json.loads(js)}
}
s2 = SessionAttributes.from_request(request2)
assert s2.intent == 'foo'
assert s2.state == s.state
assert s2.slots == Slots(love=None, money=None) # new request has no slots
| alexafsm-master | tests/test_session_attributes.py |
# -*- coding: utf-8 -*-
| alexafsm-master | tests/__init__.py |
"""
Representation of the Skill type in Elasticsearch
"""
from elasticsearch_dsl import DocType, Text, Keyword, Double, Integer
INDEX = 'chat_prod'
class Skill(DocType):
"""
Representation of a skill inside ES
"""
name = Text(fields={'raw': Keyword()})
creator = Keyword()
category = Keyword()
url = Text()
description = Text()
short_description = Text()
avg_rating = Double()
num_ratings = Integer()
html = Text()
usages = Text(fields={'raw': Keyword()})
image_url = Text()
keyphrases = Text(fields={'raw': Keyword()})
class Meta:
"""
Metadata about where this data type resides
"""
index = INDEX
@classmethod
def set_index(cls, new_index: str):
cls._doc_type.index = new_index
@classmethod
def get_index(cls):
return cls._doc_type.index
def to_json(self):
"""
Provide a JSON representation of this Skill
"""
doc = self.meta.to_dict()
doc['_source'] = self.to_dict()
return doc
| alexafsm-master | tests/skillsearch/skill.py |
"""
Intent constants for this skill
"""
NEW_SEARCH = 'NewSearch'
NTH_SKILL = 'NthSkill'
PREVIOUS_SKILL = 'PreviousSkill'
NEXT_SKILL = 'NextSkill'
DESCRIBE_RATINGS = 'DescribeRatings'
| alexafsm-master | tests/skillsearch/intent.py |
"""This demonstrates a Flask server that uses alexafsm-based skill search"""
import getopt
import json
import logging
import sys
from elasticsearch_dsl.connections import connections
from flask import Flask, request as flask_request
from livereload import Server
from voicelabs.voicelabs import VoiceInsights
from tests.skillsearch.policy import Policy
from tests.skillsearch.skill_settings import SkillSettings
app = Flask(__name__)
logger = logging.getLogger(__name__)
settings = SkillSettings()
port = 8888
@app.route('/', methods=['POST'])
def main():
req = flask_request.json
policy = Policy.initialize()
return json.dumps(policy.handle(req, settings.vi)).encode('utf-8')
def _usage():
print(f"Usage: alexa-listener.py"
f" -s --es-server <ES cluster address [your.es_server]>"
f" -i --use-voice-insight <use-voice-insight? (y/[N])>")
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:i",
["help", "es-server=", "use-voice-insight"])
except getopt.GetoptError:
_usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
_usage()
sys.exit()
if opt in ('-s', '--es-server'):
settings.es_server = arg
if opt in ('-i', '--voice-insight'):
print("Activating VoiceInsight")
settings.vi = VoiceInsights()
log_file = f"alexa.log"
print(f"Logging to {log_file} (append)")
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s',
filename=log_file,
filemode='a',
level=logging.INFO)
print(f"Connecting to elasticsearch server on {settings.es_server}")
connections.create_connection(hosts=[settings.es_server])
print(f"Now listening for Alexa requests on port #: {port}")
server = Server(app.wsgi_app)
server.serve(host='0.0.0.0', port=port)
| alexafsm-master | tests/skillsearch/server.py |
"""Interface to DynamoDB"""
import boto3
class DynamoDB:
table = None
def __init__(self, table_name: str = None):
if not DynamoDB.table:
assert table_name is not None, 'Using DynamoDB without initializing it!'
DynamoDB.table = boto3.resource('dynamodb').Table(table_name)
def register_new_user(self, user_id: str):
DynamoDB.table.put_item(Item={
'userId': user_id
})
def get_user_info(self, user_id: str) -> dict:
return DynamoDB.table.get_item(Key={'userId': user_id}).get('Item')
def set_user_info(self, user_id: str, **kwargs):
DynamoDB.table.update_item(
Key={
'userId': user_id
},
UpdateExpression='SET ' + ', '.join([f'{k} = :{k}' for k in kwargs.keys()]),
ExpressionAttributeValues=dict((':' + k, v) for k, v in DynamoDB.table.items())
)
| alexafsm-master | tests/skillsearch/dynamodb.py |
"""Settings for Alexa skills app"""
class SkillSettings:
"""Singleton settings for app"""
settings = None
class SkillSettingsImpl:
# how far back in time a request can be, in seconds; cannot be greater than 150 according to
# https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/developing-an-alexa-skill-as-a-web-service#timestamp
REQUEST_TIMEOUT = 100
es_server = 'ES_SERVER'
dynamodb = 'chat-dev'
vi = None
record = False
playback = False
def get_record_dir(self):
"""Get the directory where replays should be saved"""
return f'tests/skillsearch/playback'
def get_record_file(self):
"""Get the file where replays should be saved"""
return f'{self.get_record_dir()}/recordings.json'
def __init__(self):
if not SkillSettings.settings:
SkillSettings.settings = SkillSettings.SkillSettingsImpl()
def __getattr__(self, name):
return getattr(self.settings, name)
def __setattr__(self, key, value):
return setattr(self.settings, key, value)
| alexafsm-master | tests/skillsearch/skill_settings.py |
import logging
from alexafsm.policy import Policy as PolicyBase
from tests.skillsearch.clients import get_es_skills, get_user_info, register_new_user
from tests.skillsearch.states import States, MAX_SKILLS
logger = logging.getLogger(__name__)
class Policy(PolicyBase):
def __init__(self, states: States, request: dict, with_graph: bool = False):
super().__init__(states, request, with_graph)
if request:
user_id = request['session']['user']['userId']
user_info = get_user_info(user_id, request['request']['requestId'])
self.states.attributes.first_time = not bool(user_info)
if self.attributes.first_time:
register_new_user(user_id)
states_cls = States
def _valid_search(self) -> bool:
if not self.attributes.slots.query:
return False
current_query = self.attributes.slots.query
if current_query == 'find':
self.states.attributes.slots = self.states.attributes.slots._replace(query='')
elif current_query.startswith('find '):
self.states.attributes.slots = \
self.states.attributes.slots._replace(query=current_query[5:])
return self.attributes.slots.query and not self.m_searching_for_exit()
def m_searching_for_exit(self) -> bool:
# sometimes Amazon misinterprets "exit" as a search intent for the term "exit" instead of
# the exit intent. Let's take care of that on behalf of the user
return self.attributes.slots.query == 'exit'
def m_search(self) -> None:
"""Search for skills matching user's query"""
attributes = self.states.attributes
if attributes.searched:
return # don't search more than once
if not self._valid_search():
return
self.states.attributes.query = attributes.slots.query
es_query = self.states.attributes.query
if self.states.attributes.query == 'skills':
es_query = 'search for skills' # get our own skill
number_of_hits, skills = get_es_skills(es_query, MAX_SKILLS)
logger.info(f"Searching for {self.attributes.query}, got {number_of_hits} hits.")
attributes.skills = skills
attributes.number_of_hits = number_of_hits
attributes.skill_cursor = 0 if skills else None
attributes.searched = True
attributes.first_time_presenting_results = True
def m_no_query_search(self) -> bool:
"""Amazon sent us a search intent without a query
or maybe the user said "I want to find ..." and took too long to finish"""
return not self.attributes.slots.query or self.attributes.slots.query == 'find'
def m_no_result(self) -> bool:
return self.attributes.query and not self.m_has_result()
def m_has_result(self) -> bool:
return self.attributes.query is not None and self.attributes.skills is not None and len(
self.attributes.skills) > 0
def m_has_result_and_query(self) -> bool:
return self.m_has_result() and not self.m_no_query_search()
def m_has_nth(self) -> bool:
return self.m_has_result() and \
len(self.attributes.skills) > self.attributes.nth_as_index >= 0
def m_set_nth(self) -> None:
self.attributes.skill_cursor = self.attributes.nth_as_index
self.attributes.first_time_presenting_results = False
def m_set_next(self) -> None:
"""Go to next skill"""
self.attributes.skill_cursor += 1
self.attributes.first_time_presenting_results = False
def m_has_next(self) -> bool:
return self.m_has_result() and \
self.attributes.skill_cursor + 1 < len(self.attributes.skills)
def m_set_previous(self) -> None:
"""Go to previous skill"""
self.attributes.skill_cursor -= 1
self.attributes.first_time_presenting_results = False
def m_has_previous(self) -> bool:
return self.m_has_result() and self.attributes.skill_cursor > 0
| alexafsm-master | tests/skillsearch/policy.py |
# -*- coding: utf-8 -*-
| alexafsm-master | tests/skillsearch/__init__.py |
"""Client that handles query to elasticsearch"""
import string
from typing import List
from elasticsearch_dsl import Search
from alexafsm.test_helpers import recordable as rec
from elasticsearch_dsl.response import Response
from tests.skillsearch.skill_settings import SkillSettings
from tests.skillsearch.skill import Skill, INDEX
from tests.skillsearch.dynamodb import DynamoDB
es_search: Search = Search(index=INDEX).source(excludes=['html'])
def get_es_skills(query: str, top_n: int, category: str = None, keyphrase: str = None) -> (int, List[Skill]):
"""Return the total number of hits and the top_n skills"""
result = get_es_results(query, category, keyphrase).to_dict()
return result['hits']['total'], [Skill.from_es(h) for h in result['hits']['hits'][:top_n]]
def recordable(func):
def _get_record_dir():
return SkillSettings().get_record_dir()
def _is_playback():
return SkillSettings().playback
def _is_record():
return SkillSettings().record
return rec(_get_record_dir, _is_playback, _is_record)(func)
@recordable
def get_es_results(query: str, category: str, keyphrase: str) -> Response:
results = _get_es_results(query, category, keyphrase, strict=True)
if len(results.hits) == 0:
# relax constraints a little
return _get_es_results(query, category, keyphrase, strict=False)
else:
return results
def _get_es_results(query: str, category: str, keyphrase: str, strict: bool) -> Response:
skill_search = es_search
if category:
skill_search = skill_search.query('match',
category=string.capwords(category)
.replace(' And ', ' & ')
.replace('Movies & Tv', 'Movies & TV'))
if keyphrase:
skill_search = skill_search.query('match', keyphrases=keyphrase)
if query:
operator = 'and' if strict else 'or'
skill_search = skill_search.query('multi_match',
query=query,
fields=['name', 'description', 'usages', 'keyphrases'],
minimum_should_match='50%',
operator=operator) \
.highlight('description', order='score', pre_tags=['*'], post_tags=['*']) \
.highlight('title', order='score', pre_tags=['*'], post_tags=['*']) \
.highlight('usages', order='score', pre_tags=['*'], post_tags=['*'])
return skill_search.execute()
@recordable
def get_user_info(user_id: str, request_id: str) -> dict: # NOQA
"""Get information of user with user_id from dynamodb. request_id is simply there so that we can
record different responses from dynamodb for the same user during playback"""
return DynamoDB().get_user_info(user_id)
@recordable
def register_new_user(user_id: str):
DynamoDB().register_new_user(user_id)
| alexafsm-master | tests/skillsearch/clients.py |
from alexafsm.states import with_transitions, States as StatesBase
from alexafsm import response
from alexafsm import amazon_intent
from tests.skillsearch.skill import Skill
from tests.skillsearch.intent import NTH_SKILL, PREVIOUS_SKILL, NEXT_SKILL, NEW_SEARCH, \
DESCRIBE_RATINGS
from tests.skillsearch.session_attributes import SessionAttributes, ENGLISH_NUMBERS
MAX_SKILLS = 6
SKILL_NAME = "Skill Search"
DEFAULT_PROMPT = "What skill would you like to find?"
HELP = f"{SKILL_NAME} helps you search for skills. You can ask questions such as:" \
f" how do i order pizza, or, I want to meditate." \
f" For each question, {SKILL_NAME} only retrieves the most relevant skills." \
f" In order to use a skill you find, you must first exit {SKILL_NAME} and then tell Alexa" \
f" to open that skill." \
f" {DEFAULT_PROMPT}"
HEAR_MORE = "Would you like to hear more about it?"
IS_THAT_ALL = "Will that be all?"
def _you_asked_for(query: str):
return f"You asked for {query}. "
def _get_verbal_skill(skill: Skill) -> str:
"""Get the natural language representation of a skill """
return skill.name
def _get_verbal_ratings(skill: Skill, say_no_reviews: bool = True) -> str:
"""Get a verbal description of the rating for a skill
say_no_reviews: if there are no reviews, this will mention that explicitly
"""
if skill.num_ratings > 0:
return f"has an average rating of {skill.avg_rating} from {skill.num_ratings} reviews"
if say_no_reviews: # there are no reviews, and we want to tell the user that explicitly
return "has no reviews at this time"
return "" # there are no reviews, but we don't need to tell the user that
def _get_highlights(skill: Skill):
"""Get highlights for a skill"""
if 'highlight' in skill.meta:
return '\n'.join([h for _, hs in skill.meta.highlight.to_dict().items() for h in hs])
return skill.description
class States(StatesBase):
"""
A collection of static methods that generate responses based on the current session attributes.
Each method corresponds to a state of the FSM.
"""
session_attributes_cls = SessionAttributes
skill_name = SKILL_NAME
default_prompt = DEFAULT_PROMPT
# states to exit on when user requests Alexa to stop talking
EXIT_ON_STOP_STATES = ['no_result', 'search_prompt', 'is_that_all', 'bad_navigate',
'no_query_search']
# states to continue on when user requests Alexa to stop talking
CONTINUE_ON_STOP_STATES = ['describing', 'has_result', 'describe_ratings']
# states to prompt user for new search when user requests Alexa to stop talking
PROMPT_ON_STOP_STATES = ['initial', 'helping']
# initial is its own special thing -- don't exit when interrupting the initial help message
def initial(self) -> response.Response:
if self.attributes.first_time:
welcome_speech = f"Welcome to {self.skill_name}. {HELP}"
else:
welcome_speech = f"Welcome to {self.skill_name}, {self.default_prompt}"
return response.Response(
speech=welcome_speech,
reprompt=self.default_prompt
)
@with_transitions({'trigger': amazon_intent.HELP, 'source': '*'})
def helping(self) -> response.Response:
return response.Response(
speech=HELP,
reprompt=DEFAULT_PROMPT
)
@with_transitions(
{
'trigger': NEW_SEARCH,
'source': '*',
'conditions': 'm_no_query_search'
}
)
def no_query_search(self) -> response.Response:
"""No query specified, ask for query"""
return response.Response(
speech=f"Please say what it is that you want to do. For example, 'I want to buy "
f"flowers'. Or, 'I want to get a ride.'",
reprompt=DEFAULT_PROMPT
)
@with_transitions(
{
'trigger': NEW_SEARCH,
'source': '*',
'prepare': 'm_search',
'conditions': 'm_no_result'
})
def no_result(self) -> response.Response:
"""No results, ask for rephrase or help"""
return response.Response(
speech=f"{_you_asked_for(self.attributes.query)},"
f" I could not find any such skills. Please rephrase, or say"
f" help me, for help.",
reprompt=DEFAULT_PROMPT
)
@with_transitions(
{
'trigger': NEW_SEARCH,
'source': '*',
'prepare': 'm_search',
'conditions': 'm_has_result_and_query'
},
{
'trigger': NTH_SKILL,
'source': '*',
'conditions': 'm_has_nth',
'after': 'm_set_nth'
},
{
'trigger': PREVIOUS_SKILL,
'source': '*',
'conditions': 'm_has_previous',
'after': 'm_set_previous'
},
{
'trigger': NEXT_SKILL,
'source': '*',
'conditions': 'm_has_next',
'after': 'm_set_next'
},
{
'trigger': amazon_intent.NO,
'source': 'has_result',
'conditions': 'm_has_next',
'after': 'm_set_next'
}
)
def has_result(self) -> response.Response:
"""Offer a preview of a skill"""
attributes = self.attributes
query = attributes.query
skill = attributes.skill
asked_for_speech = ''
if attributes.first_time_presenting_results:
asked_for_speech = _you_asked_for(query)
if attributes.number_of_hits == 1:
skill_position_speech = 'The only skill I found is'
else:
skill_position_speech = f'The {ENGLISH_NUMBERS[attributes.skill_cursor]} skill is'
if attributes.first_time_presenting_results:
if attributes.number_of_hits > 6:
num_hits = f'Here are the top {MAX_SKILLS} results.'
else:
num_hits = f'I found {len(attributes.skills)} skills.'
skill_position_speech = f'{num_hits} {skill_position_speech}'
return response.Response(
speech=f"{asked_for_speech} "
f" {skill_position_speech} {_get_verbal_skill(skill)}."
f" {HEAR_MORE}",
card=f"Search for {query}",
card_content=f"""
Top result: {skill.name}
{_get_highlights(skill)}
""",
reprompt=DEFAULT_PROMPT
)
@with_transitions(
{
'trigger': NTH_SKILL,
'source': '*',
'unless': 'm_has_nth'
},
{
'trigger': PREVIOUS_SKILL,
'source': '*',
'unless': 'm_has_previous'
},
{
'trigger': NEXT_SKILL,
'source': '*',
'unless': 'm_has_next'
},
{
'trigger': amazon_intent.NO,
'source': 'has_result',
'unless': 'm_has_next'
}
)
def bad_navigate(self) -> response.Response:
"""Bad navigation (first, second, third, previous, next)"""
attributes = self.attributes
if not attributes.skills:
if attributes.query:
speech = f"I did not find any skills for query {attributes.query}."
else:
speech = f"To navigate to a skill, please search first. {HELP}"
elif attributes.intent == PREVIOUS_SKILL:
speech = "There is no previous skill. I am currently at skill number one."
elif attributes.intent == NEXT_SKILL:
speech = f"Sorry, there is no next skill. How else can I help you?"
elif attributes.intent == amazon_intent.NO:
speech = f"There are no more results. Please try a different search phrase."
else: # nth skill
nth = attributes.nth_as_index
if nth >= 0:
speech = f"You asked for skill {nth + 1}. I found only " \
f"{len(attributes.skills)} skills for the query {attributes.query}."
else:
speech = f"Sorry, I'm not sure which skill you want to go to. Please rephrase. " \
f"For example, tell me about skill 3."
return response.Response(
speech=speech,
reprompt=DEFAULT_PROMPT
)
@with_transitions(
{
'trigger': DESCRIBE_RATINGS,
'source': '*',
'conditions': 'm_has_result'
}
)
def describe_ratings(self):
"""
when we've found a skill that the user might like and the user wants to know how
well-liked it is
"""
skill = self.attributes.skill
return response.Response(
speech=f"{skill.name} {_get_verbal_ratings(skill)}."
f" Would you like to hear more about this skill?",
reprompt="Would you like to hear more about this skill?"
)
@with_transitions(
{
'trigger': amazon_intent.NO,
'source': ['describing', 'is_that_all'],
},
{
'trigger': amazon_intent.CANCEL,
'source': PROMPT_ON_STOP_STATES
},
{
'trigger': amazon_intent.STOP,
'source': PROMPT_ON_STOP_STATES
}
)
def search_prompt(self) -> response.Response:
"""when we're asking the user to conduct a new search"""
return response.Response(
speech=DEFAULT_PROMPT,
reprompt=DEFAULT_PROMPT
)
@with_transitions(
{
'trigger': amazon_intent.YES,
'source': ['has_result', 'describe_ratings']
}
)
def describing(self) -> response.Response:
"""Describe a skill, used in response generator"""
skill = self.attributes.skill
if skill.num_ratings > 0:
rating_str = f"{skill.avg_rating} (from {skill.num_ratings} reviews)"
else:
rating_str = "No reviews yet"
interrupt_hint = ""
if not self.attributes.said_interrupt:
interrupt_hint = "Okay, interrupt me anytime by saying 'Alexa.'"
self.attributes.said_interrupt = True
return response.Response(
speech=f"{interrupt_hint} {skill.name}."
f" {skill.short_description}",
card=skill.name,
card_content=f"""
Creator: {skill.creator}
Category: {skill.category}
Average rating: {rating_str}
{skill.description}
""",
image=skill.image_url,
reprompt=IS_THAT_ALL
)
@with_transitions(
{
'trigger': amazon_intent.NO,
'source': 'describe_ratings'
},
{
'trigger': amazon_intent.CANCEL,
'source': CONTINUE_ON_STOP_STATES
},
{
'trigger': amazon_intent.STOP,
'source': CONTINUE_ON_STOP_STATES
}
)
def is_that_all(self) -> response.Response:
"""when we want to see if the user is done with the skill"""
return response.Response(
speech=f"Okay, {IS_THAT_ALL}",
reprompt=IS_THAT_ALL
)
@with_transitions(
{
'trigger': amazon_intent.YES,
'source': ['describing', 'is_that_all']
},
{
'trigger': amazon_intent.CANCEL,
'source': EXIT_ON_STOP_STATES
},
{
'trigger': amazon_intent.STOP,
'source': EXIT_ON_STOP_STATES
},
{
'trigger': NEW_SEARCH,
'source': '*',
'conditions': 'm_searching_for_exit'
}
)
def exiting(self) -> response.Response:
return response.end(SKILL_NAME)
| alexafsm-master | tests/skillsearch/states.py |
from collections import namedtuple
from typing import List
from alexafsm.session_attributes import SessionAttributes as SessionAttributesBase, INITIAL_STATE
from tests.skillsearch.skill import Skill
Slots = namedtuple('Slots', ['query', 'nth'])
NUMBER_SUFFIXES = {'st', 'nd', 'rd', 'th'}
ENGLISH_NUMBERS = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth',
'ninth', 'tenth']
class SessionAttributes(SessionAttributesBase):
slots_cls = Slots
not_sent_fields = ['searched', 'first_time']
def __init__(self,
intent: str = None,
slots=None,
state: str = INITIAL_STATE,
query: str = None,
skills: List[Skill] = None,
number_of_hits: int = None,
skill_cursor: int = None,
searched: bool = False,
first_time: bool = True,
first_time_presenting_results: bool = False,
said_interrupt: bool = False):
super().__init__(intent, slots, state)
self.query = query
if skills:
self.skills = [Skill.from_es(skill) for skill in skills]
else:
self.skills = None
self.number_of_hits = number_of_hits
self.skill_cursor = skill_cursor
self.searched = searched
self.first_time = first_time
self.first_time_presenting_results = first_time_presenting_results
self.said_interrupt = said_interrupt
@property
def nth_as_index(self):
"""Return -1 if we cannot figure out what index was actually meant by the user"""
if not self.slots.nth: # Amazon's intent system might not give us anything
return -1
elif self.slots.nth in ENGLISH_NUMBERS:
return ENGLISH_NUMBERS.index(self.slots.nth)
else:
try:
# Amazon SOMETIMES gives us "5th" instead of "fifth", so we can try to parse it!
# this is not always the case -- it gives us "second" instead of 2nd
if self.slots.nth[-2:] in NUMBER_SUFFIXES:
return int(self.slots.nth[:-2]) - 1
else:
# otherwise probably directly a number in string format
return int(self.slots.nth) - 1
except ValueError:
return -1
@property
def skill(self):
return self.skills[self.skill_cursor]
| alexafsm-master | tests/skillsearch/session_attributes.py |
from alexafsm.utils import print_machine
from tests.skillsearch.policy import Policy
if __name__ == '__main__':
print_machine(Policy.initialize())
| alexafsm-master | tests/skillsearch/bin/print_machine.py |
import sys
from alexafsm.utils import graph
from tests.skillsearch.policy import Policy
if __name__ == '__main__':
png_file = sys.argv[1]
print(f"Drawing FSM graph for {Policy} to {png_file}")
graph(Policy, png_file)
| alexafsm-master | tests/skillsearch/bin/graph.py |
"""
Module that monkey-patches json module when it's imported so JSONEncoder.default() automatically
checks for a special "to_json()" method and uses it to encode the object if found.
See http://stackoverflow.com/a/18561055/257583
"""
from json import JSONEncoder
def _default(self, obj):
return getattr(obj.__class__, "to_json", _default.default)(obj)
def nested_get_obj_or_json(obj):
if hasattr(obj, 'to_json'):
return nested_get_obj_or_json(obj.to_json())
elif isinstance(obj, (list, tuple)):
return [nested_get_obj_or_json(e) for e in obj]
elif isinstance(obj, dict):
return {k: nested_get_obj_or_json(v) for k, v in obj.items()}
else:
return obj
def _iterencode(self, obj, _one_shot=False):
gen = _iterencode.iterencode(nested_get_obj_or_json(obj), _one_shot)
for chunk in gen:
yield chunk
_default.default = JSONEncoder().default # Save unmodified default.
_iterencode.iterencode = JSONEncoder().iterencode
JSONEncoder.default = _default # replacement
JSONEncoder.iterencode = _iterencode # replacement
| alexafsm-master | alexafsm/make_json_serializable.py |
import importlib
import logging
import os
import json
from transitions import MachineError
from voicelabs import VoiceInsights
from alexafsm import response
from alexafsm.session_attributes import SessionAttributes
from alexafsm.states import States
logger = logging.getLogger(__name__)
class Policy:
"""
Finite state machine that describes how to interact with user.
Use a lightweight FSM library at https://github.com/tyarkoni/transitions
"""
# "Abstract" class properties to be overwritten/set in inherited classes.
states_cls = None
def __init__(self, states: States, request: dict = None, with_graph: bool = False):
self.states = states
self.state = states.attributes.state
state_names, transitions = type(states).get_states_transitions()
machine_cls = \
importlib.import_module('transitions.extensions').GraphMachine if with_graph else \
importlib.import_module('transitions').Machine
self.machine = machine_cls(
model=self,
states=state_names,
initial=states.attributes.state,
auto_transitions=False
)
for transition in transitions:
self.machine.add_transition(**transition)
@property
def attributes(self) -> SessionAttributes:
return self.states.attributes
@classmethod
def initialize(cls, request: dict = None, with_graph: bool = False):
"""Construct a policy in initial state"""
states = cls.states_cls.from_request(request=request)
return cls(states, request, with_graph)
def get_current_state_response(self) -> response.Response:
resp_function = getattr(type(self.states), self.state)
return resp_function(self.states)
def execute(self) -> response.Response:
"""Called when the user specifies an intent for this skill"""
intent = self.attributes.intent
previous_state = self.state
# backup attributes in case of invalid FSM transition
attributes_backup = self.attributes
try:
# trigger is added by transitions library
self.trigger(intent)
current_state = self.state
logger.info(f"Changed from {previous_state} to {current_state} through {intent}")
self.attributes.state = current_state
return self.get_current_state_response()
except MachineError as exception:
logger.error(str(exception))
# reset attributes
self.states.attributes = attributes_backup
return response.NOT_UNDERSTOOD
def handle(self, request: dict, voice_insights: VoiceInsights = None,
record_filename: str = None):
"""
Method that handles Alexa post request in json format
If record_dir is specified, this will record the request in the given directory for later
playback for testing purposes
"""
(req, session) = (request['request'], request['session'])
logger.info(f"applicationId = {session['application']['applicationId']}")
request_type = req['type']
logger.info(
f"{request_type}, requestId: {req['requestId']}, sessionId: {session['sessionId']}")
if voice_insights:
app_token = os.environ['VOICELABS_API_KEY']
voice_insights.initialize(app_token, session)
if request_type == 'LaunchRequest':
resp = self.get_current_state_response()
elif request_type == 'IntentRequest':
intent = req['intent']
self.states.attributes = type(self.states.attributes).from_request(request)
self.state = self.attributes.state
resp = self.execute()
resp = resp._replace(session_attributes=self.states.attributes)
if voice_insights:
voice_insights.track(intent_name=intent['name'], intent_request=req,
response=resp.to_json())
elif request_type == 'SessionEndedRequest':
resp = response.end(self.states.skill_name)
else:
raise Exception(f'Unknown request type {request_type}')
if record_filename:
with open(record_filename, 'a') as record_file:
record_file.write(json.dumps([request, resp]) + '\n')
return resp
| alexafsm-master | alexafsm/policy.py |
# -*- coding: utf-8 -*-
__author__ = """Allen AI"""
__email__ = '[email protected]'
__version__ = '0.1.11'
| alexafsm-master | alexafsm/__init__.py |
from collections import namedtuple
from alexafsm.session_attributes import SessionAttributes
class Response(namedtuple('Response', ['speech', 'card', 'card_content', 'reprompt', 'should_end',
'image', 'session_attributes'])):
"""Pythonic representation of the response to be sent to Alexa"""
def __new__(cls, speech: str, reprompt: str, card: str = None, should_end: bool = False,
card_content: str = None, image: str = None,
session_attributes: SessionAttributes = SessionAttributes()):
if not card_content:
card_content = speech
return super(Response, cls) \
.__new__(cls, speech=speech, card=card, reprompt=reprompt, should_end=should_end,
card_content=card_content.strip(), image=image,
session_attributes=session_attributes)
def to_json(self):
"""Build entire Alexa response as a JSON-serializable dictionary"""
card = None
if self.card:
if self.image:
card = {
'type': 'Standard',
'image': {
'largeImageUrl': self.image
},
'title': self.card,
'text': self.card_content
}
else:
card = {
'type': 'Simple',
'title': self.card,
'content': self.card_content
}
resp = {
'outputSpeech': {
'type': 'PlainText',
'text': self.speech
},
'card': card,
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': self.reprompt
}
},
'shouldEndSession': self.should_end
}
if not resp['card']:
del resp['card']
return {
'version': '1.0',
'sessionAttributes': self.session_attributes,
'response': resp
}
def end(skill_name: str) -> Response:
return Response(
speech=f"Thank you for using {skill_name}",
reprompt="",
should_end=True)
NOT_UNDERSTOOD = Response(
speech="I did not understand your response, please say it differently.",
reprompt="Please respond in a different way."
)
| alexafsm-master | alexafsm/response.py |
"""
Amazon built-in intents
"""
YES = 'AMAZON.YesIntent'
NO = 'AMAZON.NoIntent'
HELP = 'AMAZON.HelpIntent'
STOP = 'AMAZON.StopIntent'
CANCEL = 'AMAZON.CancelIntent'
| alexafsm-master | alexafsm/amazon_intent.py |
import inspect
import json
from typing import Set
from alexafsm.policy import Policy
from alexafsm.session_attributes import INITIAL_STATE
def validate(policy: Policy, schema_file: str, ignore_intents: Set[str] = ()):
"""Check for inconsistencies in policy definition"""
schema = {}
with open(schema_file, mode='r') as f:
schema = json.loads(f.read())
intents = [intent['intent'] for intent in schema['intents']
if intent['intent'] not in ignore_intents]
states = policy.machine.states
events = []
states_have_out_transitions = set()
states_have_in_transitions = set()
funcs = [func for func, _ in inspect.getmembers(type(policy), predicate=inspect.isfunction)]
def _validate_transition(tran):
assert tran.source in states, f"Invalid source state: {tran.source}!!"
assert tran.dest in states, f"Invalid dest state: {tran.dest}!!"
assert all(prep in funcs for prep in tran.prepare), \
f"Invalid prepare function: {tran.prepare}!!"
assert all(cond.func in funcs for cond in tran.conditions), \
f"Invalid conditions function: {tran.conditions}!!"
assert all(after in funcs for after in tran.after), \
f"Invalid after function: {tran.after}!!"
states_have_in_transitions.add(tran.dest)
states_have_out_transitions.add(tran.source)
def _validate_ambiguous_transition(event, source, trans):
unconditional_trans = [tran for tran in trans if not tran.conditions]
assert len(unconditional_trans) < 2,\
f"Event {event} for source {source} has multiple unconditional out-bound transitions:" \
f" {', '.join([tran.dest for tran in trans])}"
for _, event in policy.machine.events.items():
assert event.name in intents, f"Invalid event/trigger: {event.name}!"
events.append(event.name)
for source, trans in event.transitions.items():
for transition in trans:
assert source in states, f"Invalid source state: {source}!!"
_validate_transition(transition)
_validate_ambiguous_transition(event.name, source, trans)
intent_diff = set(intents) - set(events)
assert not intent_diff, f"Some intents are not handled: {intent_diff}"
in_diff = set(states) - states_have_in_transitions - {INITIAL_STATE}
out_diff = set(states) - states_have_out_transitions - set('exiting')
assert not in_diff, f"Some states have no inbound transitions: {in_diff}"
assert not out_diff, f"Some states have no outbound transitions: {out_diff}"
def print_machine(policy: Policy):
def _print_transition(tran):
print(f"\t\t{tran.source} -> {tran.dest}", end='')
if tran.prepare:
print(f", prepare: {tran.prepare}", end='')
if tran.conditions:
print(f", conditions: {[cond.func for cond in tran.conditions]}", end='')
print()
print(f"Machine states:\n\t{', '.join(policy.machine.states)}")
print("\nEvents and transitions:\n")
for _, event in policy.machine.events.items():
print(f"Event: {event.name}")
for source, trans in event.transitions.items():
print(f"\tSource: {source}")
for transition in trans:
_print_transition(transition)
def graph(policy_cls, png_file):
policy = policy_cls.initialize(with_graph=True)
policy.graph.draw(png_file, prog='dot')
def get_dialogs(request, response):
"""Return key information about a conversation turn as stored in a pair of request & response"""
request_id = request['request']['requestId']
# there are no attributes when starting a new conversation from the alexa device
session_attributes = request['session'].get('attributes', {})
from_state = session_attributes.get('state', INITIAL_STATE)
intent = response['sessionAttributes'].get('intent', None)
slots = response['sessionAttributes'].get('slots', None)
to_state = response['sessionAttributes'].get('state', None)
speech = response['response']['outputSpeech']['text']
return request_id, from_state, intent, slots, to_state, speech
def events_states_transitions(policy: Policy):
"""
Return events, states, and transitions of a policy.
Initial and exiting states are excluded
"""
all_states = set(policy.machine.states.keys())
all_states.remove(INITIAL_STATE)
all_states.remove('exiting')
all_events = set()
all_transitions = set()
for _, e in policy.machine.events.items():
all_events.add(e.name)
for source, transitions in e.transitions.items():
for transition in transitions:
all_transitions.add((source, transition.dest))
return all_events, all_states, all_transitions
def used_events_states_transitions(recorded_requests_responses):
"""Based on recorded data, compute and return the used events, states, and transitions"""
used_events = set()
used_states = set()
used_transitions = set()
dialog_data = [get_dialogs(request, response)
for request, response in recorded_requests_responses]
for request_id, from_state, intent, slots, to_state, speech in dialog_data:
used_events.add(intent)
used_states.add(from_state)
used_states.add(to_state)
used_transitions.add((from_state, to_state))
if INITIAL_STATE in used_states:
used_states.remove(INITIAL_STATE)
return used_events, used_states, used_transitions
def unused_events_states_transitions(policy, recorded_requests_responses):
"""Based on recorded data and a policy, compute and return the unused events, states, and
transitions"""
all_events, all_states, all_transitions = events_states_transitions(policy)
used_events, used_states, used_transitions = used_events_states_transitions(recorded_requests_responses)
unused_states = all_states - used_states
unused_events = all_events - used_events
unused_transitions = all_transitions - used_transitions
return unused_events, unused_states, unused_transitions
| alexafsm-master | alexafsm/utils.py |
import hashlib
import pickle
import json
import inspect
def recordable(record_dir_function, is_playback, is_record):
"""
Record results of functions that depend on external resources
record_dir_function is a function specifying which directory to save results to/read results
from
Pass record=True to the function to save results
Pass playback=True to the function call to load saved results
"""
def real_decorator(external_resource_function):
def cache_filename(args, kwargs):
args_as_str = str(args)
kwargs_as_str = str(sorted(kwargs.items()))
full_args = f"{args_as_str}{kwargs_as_str}"
hashed_args = hashlib.md5(full_args.encode('utf-8')).hexdigest()
return f'{external_resource_function.__name__}_{hashed_args}.pickle'
def wrapper(*args, **kwargs):
# handle default kwargs where some kwarg may or may not be set with default values
fullargspec = inspect.getfullargspec(external_resource_function)
arguments, defaults = fullargspec.args, fullargspec.defaults
if defaults:
default_kwargs = {k: v for k, v in zip(arguments[-len(defaults):], defaults)}
full_kwargs = {**default_kwargs, **kwargs}
else:
full_kwargs = kwargs
filename = f'{record_dir_function()}/{cache_filename(args, full_kwargs)}'
if is_playback():
# pickle should already exist, read from disk
with open(filename, 'rb') as pickle_file:
return pickle.load(pickle_file)
elif is_record():
# pickle doesn't yet exist, cache it
result = external_resource_function(*args, **kwargs)
with open(filename, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
return result
else:
return external_resource_function(*args, **kwargs)
return wrapper
return real_decorator
def get_requests_responses(record_file: str):
"""
Return the (json) requests and expected responses from previous recordings.
These are returned in the same order they were recorded in.
"""
with open(record_file) as f:
lines = f.readlines()
return [tuple(json.loads(line)) for line in lines]
| alexafsm-master | alexafsm/test_helpers.py |
import inspect
from alexafsm.session_attributes import SessionAttributes, INITIAL_STATE
TRANSITIONS = 'transitions'
def with_transitions(*transitions):
"""
Add the provided in-bound transitions to the state
"""
def decorate(state):
def transition_enabled_state(*args):
return state(*args)
full_transitions = []
for transition in transitions:
if 'dest' in transition:
assert 'source' not in transition, f"Expected no source to be specified:" \
f" {transition['source']}"
transition['source'] = state.__name__
else:
assert 'dest' not in transition, f"Expected no dest to be specified: " \
f"{transition['dest']}"
transition['dest'] = state.__name__
full_transitions.append(transition)
setattr(transition_enabled_state, TRANSITIONS, full_transitions)
return transition_enabled_state
return decorate
class States:
"""
A collection of static methods that generate responses based on the current session attributes
Each method corresponds to a state of the FSM
"""
# "Abstract" class property to be overwritten/set in inherited classes.
session_attributes_cls = None
skill_name = "Allen A.I."
default_prompt = "How can I help?"
def __init__(self, attributes: SessionAttributes):
self.attributes = attributes
@classmethod
def from_request(cls, request):
"""
Factory constructor from intent and session.
"""
attributes = cls.session_attributes_cls.from_request(request)
return cls(attributes)
@classmethod
def get_states_transitions(cls):
"""
Get all states & transitions specified in the states via with_transitions decoration
"""
states = []
transitions = []
for state, method in inspect.getmembers(cls, predicate=inspect.isfunction):
if state != '__init__':
states.append(state)
transitions += getattr(method, TRANSITIONS, [])
states.append(INITIAL_STATE)
return states, transitions
| alexafsm-master | alexafsm/states.py |
INITIAL_STATE = 'initial'
class SessionAttributes:
"""Base class for all session attributes that keep track of the state of conversation"""
# "Abstract" class properties to be overridden/set in inherited classes
# Inherited classes should override this like so:
# Slots = namedtuple('Slots', ['foo', 'bar'])
#
# slots_cls = Slots
slots_cls = None
# List of (big) fields we don't want to send back to Alexa
not_sent_fields = []
def __init__(self, intent: str = None, slots=None, state: str = INITIAL_STATE):
self.intent = intent
self.slots = slots
self.state = state
@classmethod
def from_request(cls, request: dict) -> 'SessionAttributes':
"""Construct session attributes object from request"""
slots_cls = cls.slots_cls
none_slots = _slots_from_dict(slots_cls, slots=None)
if not request:
return cls(slots=none_slots)
res = cls(**(request['session'].get('attributes', {})))
if 'intent' not in request['request']: # e.g., when starting skill at beginning of session
return res
intent = request['request']['intent']
res.intent = intent['name']
if res.state is None:
res.state = INITIAL_STATE
# namedtuple deserialization from list of values
old_slots = slots_cls._make(res.slots) if res.slots else none_slots
# Construct new slots from the request
new_slots = _slots_from_dict(slots_cls, intent.get('slots'))
# Update the slots attribute, using new slot values regardless if they exist or not (skill
# should be able to tell if Amazon successfully extracted the intent slot or not)
def _extract(f):
v = getattr(new_slots, f)
return v
res.slots = slots_cls(**{f: _extract(f) for f in old_slots._fields})
return res
def to_json(self) -> dict:
"""
When sending the payload to Alexa, do not send fields that are too big.
"""
return {k: v for k, v in self.__dict__.items()
if k not in self.not_sent_fields and v is not None}
def _slots_from_dict(slots_cls, slots: dict):
"""
Given the definition for Slots that Amazon gives us, return the Slots tuple
>>> from collections import namedtuple
>>> Slots = namedtuple('Slots', ['love', 'money'])
>>> slots = {'Love': {'name': 'Love'}, 'Money': {'name': 'Money', 'value': 'lots'}}
>>> _slots_from_dict(Slots, slots)
Slots(love=None, money='lots')
>>> _slots_from_dict(Slots, None)
Slots(love=None, money=None)
>>> _slots_from_dict(Slots, {})
Slots(love=None, money=None)
"""
def _value_of(some_dict: dict) -> str:
return some_dict['value'] if some_dict and 'value' in some_dict else None
# Construct a dict with lower-cased slotnames as keys and values as values
kwargs = dict((k.lower(), _value_of(v)) for k, v in slots.items()) if slots else {}
# Construct a not-None namedtuple Slot object where attributes can be None
return slots_cls(**{field: kwargs.get(field, None) for field in slots_cls._fields})
| alexafsm-master | alexafsm/session_attributes.py |
from datetime import datetime
from pathlib import Path
with open("VERSION") as version_file:
VERSION = version_file.read().strip()
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int = -1
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unreleased"):
insert_index = i + 1
elif line.startswith(f"## [v{VERSION}]"):
print("CHANGELOG already up-to-date")
return
elif line.startswith("## [v"):
break
if insert_index < 0:
raise RuntimeError("Couldn't find 'Unreleased' section")
lines.insert(insert_index, "\n")
lines.insert(
insert_index + 1,
f"## [v{VERSION}](https://github.com/allenai/beaker-action/releases/tag/v{VERSION}) - "
f"{datetime.now().strftime('%Y-%m-%d')}\n",
)
with changelog.open("w") as f:
f.writelines(lines)
if __name__ == "__main__":
main()
| beaker-action-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List, Optional
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
new_version = packaging.version.parse(TAG)
# Get all tags sorted by version, latest first.
all_tags = os.popen("git tag -l --sort=-version:refname 'v*'").read().split("\n")
# Out of `all_tags`, find the latest previous version so that we can collect all
# commits between that version and the new version we're about to publish.
# Note that we ignore pre-releases unless the new version is also a pre-release.
last_tag: Optional[str] = None
for tag in all_tags:
if not tag.strip(): # could be blank line
continue
version = packaging.version.parse(tag)
if new_version.pre is None and version.pre is not None:
continue
if version < new_version:
last_tag = tag
break
if last_tag is not None:
commits = os.popen(f"git log {last_tag}..{TAG}^ --oneline --first-parent").read()
else:
commits = os.popen("git log --oneline --first-parent").read()
return "## Commits\n\n" + commits
def main():
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| beaker-action-main | scripts/release_notes.py |
import numpy as np
from collections import Counter
import string
import re
import argparse
import os
import json
import nltk
from matplotlib_venn import venn2
from matplotlib import pyplot as plt
class Question:
def __init__(self, id, question_text, ground_truth, model_names):
self.id = id
self.question_text = self.normalize_answer(question_text)
self.question_head_ngram = []
self.question_tokens = nltk.word_tokenize(self.question_text)
for nc in range(3):
self.question_head_ngram.append(' '.join(self.question_tokens[0:nc]))
self.ground_truth = ground_truth
self.model_names = model_names
self.em = np.zeros(2)
self.f1 = np.zeros(2)
self.answer_text = []
def add_answers(self, answer_model_1, answer_model_2):
self.answer_text.append(answer_model_1)
self.answer_text.append(answer_model_2)
self.eval()
def eval(self):
for model_count in range(2):
self.em[model_count] = self.metric_max_over_ground_truths(self.exact_match_score, self.answer_text[model_count], self.ground_truth)
self.f1[model_count] = self.metric_max_over_ground_truths(self.f1_score, self.answer_text[model_count], self.ground_truth)
def normalize_answer(self, s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(self, prediction, ground_truth):
prediction_tokens = self.normalize_answer(prediction).split()
ground_truth_tokens = self.normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(self, prediction, ground_truth):
return (self.normalize_answer(prediction) == self.normalize_answer(ground_truth))
def metric_max_over_ground_truths(self, metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def safe_dict_access(in_dict, in_key, default_string='some junk string'):
if in_key in in_dict:
return in_dict[in_key]
else:
return default_string
def aggregate_metrics(questions):
total = len(questions)
exact_match = np.zeros(2)
f1_scores = np.zeros(2)
for mc in range(2):
exact_match[mc] = 100 * np.sum(np.array([questions[x].em[mc] for x in questions])) / total
f1_scores[mc] = 100 * np.sum(np.array([questions[x].f1[mc] for x in questions])) / total
model_names = questions[list(questions.keys())[0]].model_names
print('\nAggregate Scores:')
for model_count in range(2):
print('Model {0} EM = {1:.2f}'.format(model_names[model_count], exact_match[model_count]))
print('Model {0} F1 = {1:.2f}'.format(model_names[model_count], f1_scores[model_count]))
def venn_diagram(questions, output_dir):
em_model1_ids = [x for x in questions if questions[x].em[0] == 1]
em_model2_ids = [x for x in questions if questions[x].em[1] == 1]
model_names = questions[list(questions.keys())[0]].model_names
print('\nVenn diagram')
correct_model1 = em_model1_ids
correct_model2 = em_model2_ids
correct_model1_and_model2 = list(set(em_model1_ids).intersection(set(em_model2_ids)))
correct_model1_and_not_model2 = list(set(em_model1_ids) - set(em_model2_ids))
correct_model2_and_not_model1 = list(set(em_model2_ids) - set(em_model1_ids))
print('{0} answers correctly = {1}'.format(model_names[0], len(correct_model1)))
print('{0} answers correctly = {1}'.format(model_names[1], len(correct_model2)))
print('Both answer correctly = {1}'.format(model_names[0], len(correct_model1_and_model2)))
print('{0} correct & {1} incorrect = {2}'.format(model_names[0], model_names[1], len(correct_model1_and_not_model2)))
print('{0} correct & {1} incorrect = {2}'.format(model_names[1], model_names[0], len(correct_model2_and_not_model1)))
plt.clf()
venn_diagram_plot = venn2(
subsets=(len(correct_model1_and_not_model2), len(correct_model2_and_not_model1), len(correct_model1_and_model2)),
set_labels=('{0} correct'.format(model_names[0]), '{0} correct'.format(model_names[1]), 'Both correct'),
set_colors=('r', 'b'),
alpha=0.3,
normalize_to=1
)
plt.savefig(os.path.join(output_dir, 'venn_diagram.png'))
plt.close()
return correct_model1, correct_model2, correct_model1_and_model2, correct_model1_and_not_model2, correct_model2_and_not_model1
def get_head_ngrams(questions, num_grams):
head_ngrams = []
for question in questions.values():
head_ngrams.append(question.question_head_ngram[num_grams])
return head_ngrams
def get_head_ngram_frequencies(questions, head_ngrams, num_grams):
head_ngram_frequencies = {}
for current_ngram in head_ngrams:
head_ngram_frequencies[current_ngram] = 0
for question in questions.values():
head_ngram_frequencies[question.question_head_ngram[num_grams]] += 1
return head_ngram_frequencies
def get_head_ngram_statistics(questions, correct_model1, correct_model2, correct_model1_and_model2, correct_model1_and_not_model2, correct_model2_and_not_model1, output_dir, num_grams=2, top_count=25):
# Head ngram statistics
head_ngrams = get_head_ngrams(questions, num_grams)
# Get head_ngram_frequencies (hnf)
hnf_all = get_head_ngram_frequencies(questions, head_ngrams, num_grams)
hnf_correct_model1 = get_head_ngram_frequencies({qid: questions[qid] for qid in correct_model1}, head_ngrams, num_grams)
hnf_correct_model2 = get_head_ngram_frequencies({qid: questions[qid] for qid in correct_model2}, head_ngrams, num_grams)
hnf_correct_model1_and_model2 = get_head_ngram_frequencies({qid: questions[qid] for qid in correct_model1_and_model2}, head_ngrams, num_grams)
hnf_correct_model1_and_not_model2 = get_head_ngram_frequencies({qid: questions[qid] for qid in correct_model1_and_not_model2}, head_ngrams, num_grams)
hnf_correct_model2_and_not_model1 = get_head_ngram_frequencies({qid: questions[qid] for qid in correct_model2_and_not_model1}, head_ngrams, num_grams)
sorted_bigrams_all = sorted(hnf_all.items(), key=lambda x: x[1], reverse=True)
top_bigrams = [x[0] for x in sorted_bigrams_all[0:top_count]]
counts_total = [hnf_all[x] for x in top_bigrams]
counts_model1 = [hnf_correct_model1[x] for x in top_bigrams]
counts_model2 = [hnf_correct_model2[x] for x in top_bigrams]
counts_model1_and_model2 = [hnf_correct_model1_and_model2[x] for x in top_bigrams]
counts_model1_and_not_model2 = [hnf_correct_model1_and_not_model2[x] for x in top_bigrams]
counts_model2_and_not_model1 = [hnf_correct_model2_and_not_model1[x] for x in top_bigrams]
top_bigrams_with_counts = []
for cc in range(len(top_bigrams)):
top_bigrams_with_counts.append('{0} ({1})'.format(top_bigrams[cc], counts_total[cc]))
plt.clf()
fig, ax = plt.subplots(figsize=(6, 10))
ylocs = list(range(top_count))
counts_model1_percent = 100 * np.array(counts_model1) / np.array(counts_total)
plt.barh([top_count - x for x in ylocs], counts_model1_percent, height=0.4, alpha=0.5, color='#EE3224', label=top_bigrams)
counts_model2_percent = 100 * np.array(counts_model2) / np.array(counts_total)
plt.barh([top_count - x+0.4 for x in ylocs], counts_model2_percent, height=0.4, alpha=0.5, color='#2432EE', label=top_bigrams )
ax.set_yticks([top_count - x + 0.4 for x in ylocs])
ax.set_yticklabels(top_bigrams_with_counts)
ax.set_ylim([0.5, top_count+1])
ax.set_xlim([0, 100])
plt.subplots_adjust(left=0.28, right=0.9, top=0.9, bottom=0.1)
plt.xlabel('Percentage of questions with correct answers')
plt.ylabel('Top N-grams')
plt.savefig(os.path.join(output_dir, 'ngram_stats_{0}.png'.format(num_grams)))
plt.close()
def read_json(filename):
with open(filename) as filepoint:
data = json.load(filepoint)
return data
def compare_models(dataset_file, predictions_m1_file, predictions_m2_file, output_dir, name_m1='Model 1', name_m2='Model 2'):
dataset = read_json(dataset_file)['data']
predictions_m1 = read_json(predictions_m1_file)
predictions_m2 = read_json(predictions_m2_file)
# Read in data
total = 0
questions = {}
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
current_question = Question(id=qa['id'], question_text=qa['question'], ground_truth=list(map(lambda x: x['text'], qa['answers'])), model_names=[name_m1, name_m2])
current_question.add_answers(answer_model_1=safe_dict_access(predictions_m1, qa['id']), answer_model_2=safe_dict_access(predictions_m2, qa['id']))
questions[current_question.id] = current_question
total += 1
model_names = questions[list(questions.keys())[0]].model_names
print('Read in {0} questions'.format(total))
# Aggregate scores
aggregate_metrics(questions)
# Venn diagram
correct_model1, correct_model2, correct_model1_and_model2, correct_model1_and_not_model2, correct_model2_and_not_model1 = venn_diagram(questions, output_dir=output_dir)
# Head Unigram statistics
get_head_ngram_statistics(questions, correct_model1, correct_model2, correct_model1_and_model2, correct_model1_and_not_model2,
correct_model2_and_not_model1, output_dir, num_grams=1, top_count=10)
# Head Bigram statistics
get_head_ngram_statistics(questions, correct_model1, correct_model2, correct_model1_and_model2, correct_model1_and_not_model2,
correct_model2_and_not_model1, output_dir, num_grams=2, top_count=10)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare two QA models')
parser.add_argument('-dataset', action='store', dest='dataset', required=True, help='Dataset file')
parser.add_argument('-model1', action='store', dest='predictions_m1', required=True, help='Prediction file for model 1')
parser.add_argument('-model2', action='store', dest='predictions_m2', required=True, help='Prediction file for model 2')
parser.add_argument('-name1', action='store', dest='name_m1', help='Name for model 1')
parser.add_argument('-name2', action='store', dest='name_m2', help='Name for model 2')
parser.add_argument('-output', action='store', dest='output_dir', help='Output directory for visualizations')
results = parser.parse_args()
if results.name_m1 is not None and results.name_m2 is not None:
compare_models(dataset_file=results.dataset, predictions_m1_file=results.predictions_m1, predictions_m2_file=results.predictions_m2, output_dir=results.output_dir, name_m1=results.name_m1, name_m2=results.name_m2)
else:
compare_models(dataset_file=results.dataset, predictions_m1_file=results.predictions_m1, predictions_m2_file=results.predictions_m2, output_dir=results.output_dir)
| bi-att-flow-master | visualization/compare_models.py |
import json
from json import encoder
import os
import tensorflow as tf
from tree.evaluator import Evaluation
from my.utils import short_floats
class GraphHandler(object):
def __init__(self, config):
self.config = config
self.saver = tf.train.Saver()
self.writer = None
self.save_path = os.path.join(config.save_dir, config.model_name)
def initialize(self, sess):
if self.config.load:
self._load(sess)
else:
sess.run(tf.initialize_all_variables())
if self.config.mode == 'train':
self.writer = tf.train.SummaryWriter(self.config.log_dir, graph=tf.get_default_graph())
def save(self, sess, global_step=None):
self.saver.save(sess, self.save_path, global_step=global_step)
def _load(self, sess):
config = self.config
if config.load_step > 0:
save_path = os.path.join(config.save_dir, "{}-{}".format(config.model_name, config.load_step))
else:
save_dir = config.save_dir
checkpoint = tf.train.get_checkpoint_state(save_dir)
assert checkpoint is not None, "cannot load checkpoint at {}".format(save_dir)
save_path = checkpoint.model_checkpoint_path
print("Loading saved model from {}".format(save_path))
self.saver.restore(sess, save_path)
def add_summary(self, summary, global_step):
self.writer.add_summary(summary, global_step)
def add_summaries(self, summaries, global_step):
for summary in summaries:
self.add_summary(summary, global_step)
def dump_eval(self, e, precision=2):
assert isinstance(e, Evaluation)
path = os.path.join(self.config.eval_dir, "{}-{}.json".format(e.data_type, str(e.global_step).zfill(6)))
with open(path, 'w') as fh:
json.dump(short_floats(e.dict, precision), fh)
| bi-att-flow-master | tree/graph_handler.py |
bi-att-flow-master | tree/__init__.py |
|
import nltk
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import BasicLSTMCell
from my.nltk_utils import tree2matrix, find_max_f1_subtree, load_compressed_tree, set_span
from tree.read_data import DataSet
from my.tensorflow import exp_mask, get_initializer
from my.tensorflow.nn import linear
from my.tensorflow.rnn import bidirectional_dynamic_rnn, dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, NoOpCell, TreeRNNCell
class Model(object):
def __init__(self, config):
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, M, JX, JQ, VW, VC, W, H = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
self.x = tf.placeholder('int32', [None, M, JX], name='x')
self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
self.q = tf.placeholder('int32', [None, JQ], name='q')
self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
self.is_train = tf.placeholder('bool', [], name='is_train')
# Define misc
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.ema_op = self._get_ema_op()
self.summary = tf.merge_all_summaries()
def _build_forward(self):
config = self.config
N, M, JX, JQ, VW, VC, d, dc, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.char_emb_size, config.max_word_size
H = config.max_tree_height
x_mask = self.x > 0
q_mask = self.q > 0
tx_mask = self.tx > 0 # [N, M, H, JX]
with tf.variable_scope("char_emb"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
filter = tf.get_variable("filter", shape=[1, config.char_filter_height, dc, d], dtype='float')
bias = tf.get_variable("bias", shape=[d], dtype='float')
strides = [1, 1, 1, 1]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
xxc = tf.nn.conv2d(Acx, filter, strides, "VALID") + bias # [N*M, JX, W/filter_stride, d]
qqc = tf.nn.conv2d(Acq, filter, strides, "VALID") + bias # [N, JQ, W/filter_stride, d]
xxc = tf.reshape(tf.reduce_max(tf.nn.relu(xxc), 2), [-1, M, JX, d])
qqc = tf.reshape(tf.reduce_max(tf.nn.relu(qqc), 2), [-1, JQ, d])
with tf.variable_scope("word_emb"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, config.word_emb_size], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, config.word_emb_size], dtype='float')
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
# Ax = linear([Ax], d, False, scope='Ax_reshape')
# Aq = linear([Aq], d, False, scope='Aq_reshape')
xx = tf.concat(3, [xxc, Ax]) # [N, M, JX, 2d]
qq = tf.concat(2, [qqc, Aq]) # [N, JQ, 2d]
D = d + config.word_emb_size
with tf.variable_scope("pos_emb"):
pos_emb_mat = tf.get_variable("pos_emb_mat", shape=[config.pos_vocab_size, d], dtype='float')
Atx = tf.nn.embedding_lookup(pos_emb_mat, self.tx) # [N, M, H, JX, d]
cell = BasicLSTMCell(D, state_is_tuple=True)
cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(q_mask, 'int32'), 1) # [N]
with tf.variable_scope("rnn"):
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='start') # [N, M, JX, 2d]
tf.get_variable_scope().reuse_variables()
(fw_us, bw_us), (_, (fw_u, bw_u)) = bidirectional_dynamic_rnn(cell, cell, qq, q_len, dtype='float', scope='start') # [N, J, d], [N, d]
u = (fw_u + bw_u) / 2.0
h = (fw_h + bw_h) / 2.0
with tf.variable_scope("h"):
no_op_cell = NoOpCell(D)
tree_rnn_cell = TreeRNNCell(no_op_cell, d, tf.reduce_max)
initial_state = tf.reshape(h, [N*M*JX, D]) # [N*M*JX, D]
inputs = tf.concat(4, [Atx, tf.cast(self.tx_edge_mask, 'float')]) # [N, M, H, JX, d+JX]
inputs = tf.reshape(tf.transpose(inputs, [0, 1, 3, 2, 4]), [N*M*JX, H, d + JX]) # [N*M*JX, H, d+JX]
length = tf.reshape(tf.reduce_sum(tf.cast(tx_mask, 'int32'), 2), [N*M*JX])
# length = tf.reshape(tf.reduce_sum(tf.cast(tf.transpose(tx_mask, [0, 1, 3, 2]), 'float'), 3), [-1])
h, _ = dynamic_rnn(tree_rnn_cell, inputs, length, initial_state=initial_state) # [N*M*JX, H, D]
h = tf.transpose(tf.reshape(h, [N, M, JX, H, D]), [0, 1, 3, 2, 4]) # [N, M, H, JX, D]
u = tf.expand_dims(tf.expand_dims(tf.expand_dims(u, 1), 1), 1) # [N, 1, 1, 1, 4d]
dot = linear(h * u, 1, True, squeeze=True, scope='dot') # [N, M, H, JX]
# self.logits = tf.reshape(dot, [N, M * H * JX])
self.logits = tf.reshape(exp_mask(dot, tx_mask), [N, M * H * JX]) # [N, M, H, JX]
self.yp = tf.reshape(tf.nn.softmax(self.logits), [N, M, H, JX])
def _build_loss(self):
config = self.config
N, M, JX, JQ, VW, VC = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size
H = config.max_tree_height
ce_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
self.logits, tf.cast(tf.reshape(self.y, [N, M * H * JX]), 'float')))
tf.add_to_collection('losses', ce_loss)
self.loss = tf.add_n(tf.get_collection('losses'), name='loss')
tf.scalar_summary(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _get_ema_op(self):
ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema_op = ema.apply(tf.get_collection("ema/scalar") + tf.get_collection("ema/histogram"))
for var in tf.get_collection("ema/scalar"):
ema_var = ema.average(var)
tf.scalar_summary(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/histogram"):
ema_var = ema.average(var)
tf.histogram_summary(ema_var.op.name, ema_var)
return ema_op
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, M, JX, JQ, VW, VC, d, W, H = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size, \
config.max_tree_height
feed_dict = {}
x = np.zeros([N, M, JX], dtype='int32')
cx = np.zeros([N, M, JX, W], dtype='int32')
q = np.zeros([N, JQ], dtype='int32')
cq = np.zeros([N, JQ, W], dtype='int32')
tx = np.zeros([N, M, H, JX], dtype='int32')
tx_edge_mask = np.zeros([N, M, H, JX, JX], dtype='bool')
feed_dict[self.x] = x
feed_dict[self.cx] = cx
feed_dict[self.q] = q
feed_dict[self.cq] = cq
feed_dict[self.tx] = tx
feed_dict[self.tx_edge_mask] = tx_edge_mask
feed_dict[self.is_train] = is_train
def _get_word(word):
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
return 1
def _get_char(char):
d = batch.shared['char2idx']
if char in d:
return d[char]
return 1
def _get_pos(tree):
d = batch.shared['pos2idx']
if tree.label() in d:
return d[tree.label()]
return 1
for i, xi in enumerate(batch.data['x']):
for j, xij in enumerate(xi):
for k, xijk in enumerate(xij):
x[i, j, k] = _get_word(xijk)
for i, cxi in enumerate(batch.data['cx']):
for j, cxij in enumerate(cxi):
for k, cxijk in enumerate(cxij):
for l, cxijkl in enumerate(cxijk):
cx[i, j, k, l] = _get_char(cxijkl)
if l + 1 == config.max_word_size:
break
for i, qi in enumerate(batch.data['q']):
for j, qij in enumerate(qi):
q[i, j] = _get_word(qij)
for i, cqi in enumerate(batch.data['cq']):
for j, cqij in enumerate(cqi):
for k, cqijk in enumerate(cqij):
cq[i, j, k] = _get_char(cqijk)
if k + 1 == config.max_word_size:
break
for i, txi in enumerate(batch.data['stx']):
for j, txij in enumerate(txi):
txij_mat, txij_mask = tree2matrix(nltk.tree.Tree.fromstring(txij), _get_pos, row_size=H, col_size=JX)
tx[i, j, :, :], tx_edge_mask[i, j, :, :, :] = txij_mat, txij_mask
if supervised:
y = np.zeros([N, M, H, JX], dtype='bool')
feed_dict[self.y] = y
for i, yi in enumerate(batch.data['y']):
start_idx, stop_idx = yi
sent_idx = start_idx[0]
if start_idx[0] == stop_idx[0]:
span = [start_idx[1], stop_idx[1]]
else:
span = [start_idx[1], len(batch.data['x'][sent_idx])]
tree = nltk.tree.Tree.fromstring(batch.data['stx'][i][sent_idx])
set_span(tree)
best_subtree = find_max_f1_subtree(tree, span)
def _get_y(t):
return t == best_subtree
yij, _ = tree2matrix(tree, _get_y, H, JX, dtype='bool')
y[i, sent_idx, :, :] = yij
return feed_dict
| bi-att-flow-master | tree/model.py |
import os
from pprint import pprint
import tensorflow as tf
from tree.main import main as m
flags = tf.app.flags
flags.DEFINE_string("model_name", "tree", "Model name [tree]")
flags.DEFINE_string("data_dir", "data/squad", "Data dir [data/squad]")
flags.DEFINE_integer("run_id", 0, "Run ID [0]")
flags.DEFINE_integer("batch_size", 128, "Batch size [128]")
flags.DEFINE_float("init_lr", 0.5, "Initial learning rate [0.5]")
flags.DEFINE_integer("num_epochs", 50, "Total number of epochs for training [50]")
flags.DEFINE_integer("num_steps", 0, "Number of steps [0]")
flags.DEFINE_integer("eval_num_batches", 100, "eval num batches [100]")
flags.DEFINE_integer("load_step", 0, "load step [0]")
flags.DEFINE_integer("early_stop", 4, "early stop [4]")
flags.DEFINE_string("mode", "test", "train | test | forward [test]")
flags.DEFINE_boolean("load", True, "load saved data? [True]")
flags.DEFINE_boolean("progress", True, "Show progress? [True]")
flags.DEFINE_integer("log_period", 100, "Log period [100]")
flags.DEFINE_integer("eval_period", 1000, "Eval period [1000]")
flags.DEFINE_integer("save_period", 1000, "Save Period [1000]")
flags.DEFINE_float("decay", 0.9, "Exponential moving average decay [0.9]")
flags.DEFINE_boolean("draft", False, "Draft for quick testing? [False]")
flags.DEFINE_integer("hidden_size", 32, "Hidden size [32]")
flags.DEFINE_float("input_keep_prob", 0.5, "Input keep prob [0.5]")
flags.DEFINE_integer("char_emb_size", 8, "Char emb size [8]")
flags.DEFINE_integer("char_filter_height", 5, "Char filter height [5]")
flags.DEFINE_float("wd", 0.0001, "Weight decay [0.001]")
flags.DEFINE_bool("lower_word", True, "lower word [True]")
flags.DEFINE_bool("dump_eval", True, "dump eval? [True]")
flags.DEFINE_integer("word_count_th", 100, "word count th [100]")
flags.DEFINE_integer("char_count_th", 500, "char count th [500]")
flags.DEFINE_integer("sent_size_th", 64, "sent size th [64]")
flags.DEFINE_integer("num_sents_th", 8, "num sents th [8]")
flags.DEFINE_integer("ques_size_th", 64, "ques size th [64]")
flags.DEFINE_integer("word_size_th", 16, "word size th [16]")
flags.DEFINE_integer("tree_height_th", 16, "tree height th [16]")
def main(_):
config = flags.FLAGS
config.out_dir = os.path.join("out", config.model_name, str(config.run_id).zfill(2))
m(config)
if __name__ == "__main__":
tf.app.run()
| bi-att-flow-master | tree/cli.py |
import json
import os
import random
import itertools
import math
import nltk
from my.nltk_utils import load_compressed_tree
from my.utils import index
class DataSet(object):
def __init__(self, data, data_type, shared=None, valid_idxs=None):
total_num_examples = len(next(iter(data.values())))
self.data = data # e.g. {'X': [0, 1, 2], 'Y': [2, 3, 4]}
self.data_type = data_type
self.shared = shared
self.valid_idxs = range(total_num_examples) if valid_idxs is None else valid_idxs
self.num_examples = len(self.valid_idxs)
def get_batches(self, batch_size, num_batches=None, shuffle=False):
num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size))
if num_batches is None:
num_batches = num_batches_per_epoch
num_epochs = int(math.ceil(num_batches / num_batches_per_epoch))
idxs = itertools.chain.from_iterable(random.sample(self.valid_idxs, len(self.valid_idxs))
if shuffle else self.valid_idxs
for _ in range(num_epochs))
for _ in range(num_batches):
batch_idxs = tuple(itertools.islice(idxs, batch_size))
batch_data = {}
for key, val in self.data.items():
if key.startswith('*'):
assert self.shared is not None
shared_key = key[1:]
batch_data[shared_key] = [index(self.shared[shared_key], val[idx]) for idx in batch_idxs]
else:
batch_data[key] = list(map(val.__getitem__, batch_idxs))
batch_ds = DataSet(batch_data, self.data_type, shared=self.shared)
yield batch_idxs, batch_ds
class SquadDataSet(DataSet):
def __init__(self, data, data_type, shared=None, valid_idxs=None):
super(SquadDataSet, self).__init__(data, data_type, shared=shared, valid_idxs=valid_idxs)
def load_metadata(config, data_type):
metadata_path = os.path.join(config.data_dir, "metadata_{}.json".format(data_type))
with open(metadata_path, 'r') as fh:
metadata = json.load(fh)
for key, val in metadata.items():
config.__setattr__(key, val)
return metadata
def read_data(config, data_type, ref, data_filter=None):
data_path = os.path.join(config.data_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(config.data_dir, "shared_{}.json".format(data_type))
with open(data_path, 'r') as fh:
data = json.load(fh)
with open(shared_path, 'r') as fh:
shared = json.load(fh)
num_examples = len(next(iter(data.values())))
if data_filter is None:
valid_idxs = range(num_examples)
else:
mask = []
keys = data.keys()
values = data.values()
for vals in zip(*values):
each = {key: val for key, val in zip(keys, vals)}
mask.append(data_filter(each, shared))
valid_idxs = [idx for idx in range(len(mask)) if mask[idx]]
print("Loaded {}/{} examples from {}".format(len(valid_idxs), num_examples, data_type))
shared_path = os.path.join(config.out_dir, "shared.json")
if not ref:
word_counter = shared['lower_word_counter'] if config.lower_word else shared['word_counter']
char_counter = shared['char_counter']
pos_counter = shared['pos_counter']
shared['word2idx'] = {word: idx + 2 for idx, word in
enumerate(word for word, count in word_counter.items()
if count > config.word_count_th)}
shared['char2idx'] = {char: idx + 2 for idx, char in
enumerate(char for char, count in char_counter.items()
if count > config.char_count_th)}
shared['pos2idx'] = {pos: idx + 2 for idx, pos in enumerate(pos_counter.keys())}
NULL = "-NULL-"
UNK = "-UNK-"
shared['word2idx'][NULL] = 0
shared['word2idx'][UNK] = 1
shared['char2idx'][NULL] = 0
shared['char2idx'][UNK] = 1
shared['pos2idx'][NULL] = 0
shared['pos2idx'][UNK] = 1
json.dump({'word2idx': shared['word2idx'], 'char2idx': shared['char2idx'],
'pos2idx': shared['pos2idx']}, open(shared_path, 'w'))
else:
new_shared = json.load(open(shared_path, 'r'))
for key, val in new_shared.items():
shared[key] = val
data_set = DataSet(data, data_type, shared=shared, valid_idxs=valid_idxs)
return data_set
def get_squad_data_filter(config):
def data_filter(data_point, shared):
assert shared is not None
rx, rcx, q, cq, y = (data_point[key] for key in ('*x', '*cx', 'q', 'cq', 'y'))
x, cx, stx = shared['x'], shared['cx'], shared['stx']
if len(q) > config.ques_size_th:
return False
xi = x[rx[0]][rx[1]]
if len(xi) > config.num_sents_th:
return False
if any(len(xij) > config.sent_size_th for xij in xi):
return False
stxi = stx[rx[0]][rx[1]]
if any(nltk.tree.Tree.fromstring(s).height() > config.tree_height_th for s in stxi):
return False
return True
return data_filter
def update_config(config, data_sets):
config.max_num_sents = 0
config.max_sent_size = 0
config.max_ques_size = 0
config.max_word_size = 0
config.max_tree_height = 0
for data_set in data_sets:
data = data_set.data
shared = data_set.shared
for idx in data_set.valid_idxs:
rx = data['*x'][idx]
q = data['q'][idx]
sents = shared['x'][rx[0]][rx[1]]
trees = map(nltk.tree.Tree.fromstring, shared['stx'][rx[0]][rx[1]])
config.max_tree_height = max(config.max_tree_height, max(tree.height() for tree in trees))
config.max_num_sents = max(config.max_num_sents, len(sents))
config.max_sent_size = max(config.max_sent_size, max(map(len, sents)))
config.max_word_size = max(config.max_word_size, max(len(word) for sent in sents for word in sent))
if len(q) > 0:
config.max_ques_size = max(config.max_ques_size, len(q))
config.max_word_size = max(config.max_word_size, max(len(word) for word in q))
config.max_word_size = min(config.max_word_size, config.word_size_th)
config.char_vocab_size = len(data_sets[0].shared['char2idx'])
config.word_emb_size = len(next(iter(data_sets[0].shared['word2vec'].values())))
config.word_vocab_size = len(data_sets[0].shared['word2idx'])
config.pos_vocab_size = len(data_sets[0].shared['pos2idx'])
| bi-att-flow-master | tree/read_data.py |
import tensorflow as tf
from tree.model import Model
class Trainer(object):
def __init__(self, config, model):
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdagradOptimizer(config.init_lr)
self.loss = model.get_loss()
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.ema_op = model.ema_op
self.summary = model.summary
self.grads = self.opt.compute_gradients(self.loss, var_list=self.var_list)
opt_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
# Define train op
with tf.control_dependencies([opt_op]):
self.train_op = tf.group(self.ema_op)
def get_train_op(self):
return self.train_op
def step(self, sess, batch, get_summary=False):
assert isinstance(sess, tf.Session)
feed_dict = self.model.get_feed_dict(batch, True)
if get_summary:
loss, summary, train_op = \
sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
return loss, summary, train_op
| bi-att-flow-master | tree/trainer.py |
import argparse
import json
import math
import os
import shutil
from pprint import pprint
import tensorflow as tf
from tqdm import tqdm
import numpy as np
from tree.evaluator import AccuracyEvaluator2, Evaluator
from tree.graph_handler import GraphHandler
from tree.model import Model
from tree.trainer import Trainer
from tree.read_data import load_metadata, read_data, get_squad_data_filter, update_config
def main(config):
set_dirs(config)
if config.mode == 'train':
_train(config)
elif config.mode == 'test':
_test(config)
elif config.mode == 'forward':
_forward(config)
else:
raise ValueError("invalid value for 'mode': {}".format(config.mode))
def _config_draft(config):
if config.draft:
config.num_steps = 10
config.eval_period = 10
config.log_period = 1
config.save_period = 10
config.eval_num_batches = 1
def _train(config):
# load_metadata(config, 'train') # this updates the config file according to metadata file
data_filter = get_squad_data_filter(config)
train_data = read_data(config, 'train', config.load, data_filter=data_filter)
dev_data = read_data(config, 'dev', True, data_filter=data_filter)
update_config(config, [train_data, dev_data])
_config_draft(config)
word2vec_dict = train_data.shared['lower_word2vec'] if config.lower_word else train_data.shared['word2vec']
word2idx_dict = train_data.shared['word2idx']
idx2vec_dict = {word2idx_dict[word]: vec for word, vec in word2vec_dict.items() if word in word2idx_dict}
print("{}/{} unique words have corresponding glove vectors.".format(len(idx2vec_dict), len(word2idx_dict)))
emb_mat = np.array([idx2vec_dict[idx] if idx in idx2vec_dict
else np.random.multivariate_normal(np.zeros(config.word_emb_size), np.eye(config.word_emb_size))
for idx in range(config.word_vocab_size)])
config.emb_mat = emb_mat
# construct model graph and variables (using default graph)
pprint(config.__flags, indent=2)
model = Model(config)
trainer = Trainer(config, model)
evaluator = AccuracyEvaluator2(config, model)
graph_handler = GraphHandler(config) # controls all tensors and variables in the graph, including loading /saving
# Variables
sess = tf.Session()
graph_handler.initialize(sess)
# begin training
num_steps = config.num_steps or int(config.num_epochs * train_data.num_examples / config.batch_size)
max_acc = 0
noupdate_count = 0
global_step = 0
for _, batch in tqdm(train_data.get_batches(config.batch_size, num_batches=num_steps, shuffle=True), total=num_steps):
global_step = sess.run(model.global_step) + 1 # +1 because all calculations are done after step
get_summary = global_step % config.log_period == 0
loss, summary, train_op = trainer.step(sess, batch, get_summary=get_summary)
if get_summary:
graph_handler.add_summary(summary, global_step)
# Occasional evaluation and saving
if global_step % config.save_period == 0:
graph_handler.save(sess, global_step=global_step)
if global_step % config.eval_period == 0:
num_batches = math.ceil(dev_data.num_examples / config.batch_size)
if 0 < config.eval_num_batches < num_batches:
num_batches = config.eval_num_batches
e = evaluator.get_evaluation_from_batches(
sess, tqdm(dev_data.get_batches(config.batch_size, num_batches=num_batches), total=num_batches))
graph_handler.add_summaries(e.summaries, global_step)
if e.acc > max_acc:
max_acc = e.acc
noupdate_count = 0
else:
noupdate_count += 1
if noupdate_count == config.early_stop:
break
if config.dump_eval:
graph_handler.dump_eval(e)
if global_step % config.save_period != 0:
graph_handler.save(sess, global_step=global_step)
def _test(config):
test_data = read_data(config, 'test', True)
update_config(config, [test_data])
_config_draft(config)
pprint(config.__flags, indent=2)
model = Model(config)
evaluator = AccuracyEvaluator2(config, model)
graph_handler = GraphHandler(config) # controls all tensors and variables in the graph, including loading /saving
sess = tf.Session()
graph_handler.initialize(sess)
num_batches = math.ceil(test_data.num_examples / config.batch_size)
if 0 < config.eval_num_batches < num_batches:
num_batches = config.eval_num_batches
e = evaluator.get_evaluation_from_batches(sess, tqdm(test_data.get_batches(config.batch_size, num_batches=num_batches), total=num_batches))
print(e)
if config.dump_eval:
graph_handler.dump_eval(e)
def _forward(config):
forward_data = read_data(config, 'forward', True)
_config_draft(config)
pprint(config.__flag, indent=2)
model = Model(config)
evaluator = Evaluator(config, model)
graph_handler = GraphHandler(config) # controls all tensors and variables in the graph, including loading /saving
sess = tf.Session()
graph_handler.initialize(sess)
num_batches = math.ceil(forward_data.num_examples / config.batch_size)
if 0 < config.eval_num_batches < num_batches:
num_batches = config.eval_num_batches
e = evaluator.get_evaluation_from_batches(sess, tqdm(forward_data.get_batches(config.batch_size, num_batches=num_batches), total=num_batches))
print(e)
if config.dump_eval:
graph_handler.dump_eval(e)
def set_dirs(config):
# create directories
if not config.load and os.path.exists(config.out_dir):
shutil.rmtree(config.out_dir)
config.save_dir = os.path.join(config.out_dir, "save")
config.log_dir = os.path.join(config.out_dir, "log")
config.eval_dir = os.path.join(config.out_dir, "eval")
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if not os.path.exists(config.save_dir):
os.mkdir(config.save_dir)
if not os.path.exists(config.log_dir):
os.mkdir(config.eval_dir)
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("config_path")
return parser.parse_args()
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def _run():
args = _get_args()
with open(args.config_path, 'r') as fh:
config = Config(**json.load(fh))
main(config)
if __name__ == "__main__":
_run()
| bi-att-flow-master | tree/main.py |
import numpy as np
import tensorflow as tf
from tree.read_data import DataSet
from my.nltk_utils import span_f1
class Evaluation(object):
def __init__(self, data_type, global_step, idxs, yp):
self.data_type = data_type
self.global_step = global_step
self.idxs = idxs
self.yp = yp
self.num_examples = len(yp)
self.dict = {'data_type': data_type,
'global_step': global_step,
'yp': yp,
'idxs': idxs,
'num_examples': self.num_examples}
self.summaries = None
def __repr__(self):
return "{} step {}".format(self.data_type, self.global_step)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_idxs = self.idxs + other.idxs
return Evaluation(self.data_type, self.global_step, new_idxs, new_yp)
def __radd__(self, other):
return self.__add__(other)
class LabeledEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, y):
super(LabeledEvaluation, self).__init__(data_type, global_step, idxs, yp)
self.y = y
self.dict['y'] = y
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_idxs = self.idxs + other.idxs
return LabeledEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y)
class AccuracyEvaluation(LabeledEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, correct, loss):
super(AccuracyEvaluation, self).__init__(data_type, global_step, idxs, yp, y)
self.loss = loss
self.correct = correct
self.acc = sum(correct) / len(correct)
self.dict['loss'] = loss
self.dict['correct'] = correct
self.dict['acc'] = self.acc
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='dev/loss', simple_value=self.loss)])
acc_summary = tf.Summary(value=[tf.Summary.Value(tag='dev/acc', simple_value=self.acc)])
self.summaries = [loss_summary, acc_summary]
def __repr__(self):
return "{} step {}: accuracy={}, loss={}".format(self.data_type, self.global_step, self.acc, self.loss)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
return AccuracyEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_correct, new_loss)
class Evaluator(object):
def __init__(self, config, model):
self.config = config
self.model = model
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp = sess.run([self.model.global_step, self.model.yp], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
e = Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist())
return e
def get_evaluation_from_batches(self, sess, batches):
e = sum(self.get_evaluation(sess, batch) for batch in batches)
return e
class LabeledEvaluator(Evaluator):
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp = sess.run([self.model.global_step, self.model.yp], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
y = feed_dict[self.model.y]
e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist())
return e
class AccuracyEvaluator(LabeledEvaluator):
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, loss = sess.run([self.model.global_step, self.model.yp, self.model.loss], feed_dict=feed_dict)
y = feed_dict[self.model.y]
yp = yp[:data_set.num_examples]
correct = [self.__class__.compare(yi, ypi) for yi, ypi in zip(y, yp)]
e = AccuracyEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist(), correct, float(loss))
return e
@staticmethod
def compare(yi, ypi):
return int(np.argmax(yi)) == int(np.argmax(ypi))
class AccuracyEvaluator2(AccuracyEvaluator):
@staticmethod
def compare(yi, ypi):
i = int(np.argmax(yi.flatten()))
j = int(np.argmax(ypi.flatten()))
# print(i, j, i == j)
return i == j
class TempEvaluation(AccuracyEvaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, y, y2, correct, loss, f1s):
super(TempEvaluation, self).__init__(data_type, global_step, idxs, yp, y, correct, loss)
self.y2 = y2
self.yp2 = yp2
self.f1s = f1s
self.f1 = float(np.mean(f1s))
self.dict['y2'] = y2
self.dict['yp2'] = yp2
self.dict['f1s'] = f1s
self.dict['f1'] = self.f1
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='dev/f1', simple_value=self.f1)])
self.summaries.append(f1_summary)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_y = self.y + other.y
new_y2 = self.y2 + other.y2
new_correct = self.correct + other.correct
new_f1s = self.f1s + other.f1s
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
return TempEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_y, new_y2, new_correct, new_loss, new_f1s)
class TempEvaluator(LabeledEvaluator):
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, yp2, loss = sess.run([self.model.global_step, self.model.yp, self.model.yp2, self.model.loss], feed_dict=feed_dict)
y, y2 = feed_dict[self.model.y], feed_dict[self.model.y2]
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
correct = [self.__class__.compare(yi, y2i, ypi, yp2i) for yi, y2i, ypi, yp2i in zip(y, y2, yp, yp2)]
f1s = [self.__class__.span_f1(yi, y2i, ypi, yp2i) for yi, y2i, ypi, yp2i in zip(y, y2, yp, yp2)]
e = TempEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y.tolist(), y2.tolist(), correct, float(loss), f1s)
return e
@staticmethod
def compare(yi, y2i, ypi, yp2i):
i = int(np.argmax(yi.flatten()))
j = int(np.argmax(ypi.flatten()))
k = int(np.argmax(y2i.flatten()))
l = int(np.argmax(yp2i.flatten()))
# print(i, j, i == j)
return i == j and k == l
@staticmethod
def span_f1(yi, y2i, ypi, yp2i):
true_span = (np.argmax(yi.flatten()), np.argmax(y2i.flatten())+1)
pred_span = (np.argmax(ypi.flatten()), np.argmax(yp2i.flatten())+1)
f1 = span_f1(true_span, pred_span)
return f1
| bi-att-flow-master | tree/evaluator.py |
import shutil
from collections import OrderedDict
import http.server
import socketserver
import argparse
import json
import os
import numpy as np
from tqdm import tqdm
from jinja2 import Environment, FileSystemLoader
def bool_(string):
if string == 'True':
return True
elif string == 'False':
return False
else:
raise Exception()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default='basic')
parser.add_argument("--data_type", type=str, default='dev')
parser.add_argument("--step", type=int, default=5000)
parser.add_argument("--template_name", type=str, default="visualizer.html")
parser.add_argument("--num_per_page", type=int, default=100)
parser.add_argument("--data_dir", type=str, default="data/squad")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--open", type=str, default='False')
parser.add_argument("--run_id", type=str, default="0")
args = parser.parse_args()
return args
def _decode(decoder, sent):
return " ".join(decoder[idx] for idx in sent)
def accuracy2_visualizer(args):
model_name = args.model_name
data_type = args.data_type
num_per_page = args.num_per_page
data_dir = args.data_dir
run_id = args.run_id.zfill(2)
step = args.step
eval_path =os.path.join("out", model_name, run_id, "eval", "{}-{}.json".format(data_type, str(step).zfill(6)))
eval_ = json.load(open(eval_path, 'r'))
_id = 0
html_dir = "/tmp/list_results%d" % _id
while os.path.exists(html_dir):
_id += 1
html_dir = "/tmp/list_results%d" % _id
if os.path.exists(html_dir):
shutil.rmtree(html_dir)
os.mkdir(html_dir)
cur_dir = os.path.dirname(os.path.realpath(__file__))
templates_dir = os.path.join(cur_dir, 'templates')
env = Environment(loader=FileSystemLoader(templates_dir))
env.globals.update(zip=zip, reversed=reversed)
template = env.get_template(args.template_name)
data_path = os.path.join(data_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(data_dir, "shared_{}.json".format(data_type))
data = json.load(open(data_path, 'r'))
shared = json.load(open(shared_path, 'r'))
rows = []
for i, (idx, yi, ypi) in enumerate(zip(*[eval_[key] for key in ('idxs', 'y', 'yp')])):
id_, q, rx = (data[key][idx] for key in ('ids', 'q', '*x'))
x = shared['x'][rx[0]][rx[1]]
ques = [" ".join(q)]
para = [[word for word in sent] for sent in x]
row = {
'id': id_,
'title': "Hello world!",
'ques': ques,
'para': para,
'y': yi,
'y2': yi,
'yp': ypi,
'yp2': ypi,
'a': ""
}
rows.append(row)
if i % num_per_page == 0:
html_path = os.path.join(html_dir, "%s.html" % str(i).zfill(8))
if (i + 1) % num_per_page == 0 or (i + 1) == len(eval_['y']):
var_dict = {'title': "Accuracy Visualization",
'rows': rows
}
with open(html_path, "wb") as f:
f.write(template.render(**var_dict).encode('UTF-8'))
rows = []
os.chdir(html_dir)
port = args.port
host = args.host
# Overriding to suppress log message
class MyHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
handler = MyHandler
httpd = socketserver.TCPServer((host, port), handler)
if args.open == 'True':
os.system("open http://%s:%d" % (args.host, args.port))
print("serving at %s:%d" % (host, port))
httpd.serve_forever()
if __name__ == "__main__":
ARGS = get_args()
accuracy2_visualizer(ARGS) | bi-att-flow-master | tree/visualizer.py |
import sys
import json
from collections import Counter, defaultdict
import re
def key_func(pair):
return pair[1]
def get_func(vals, probs):
counter = Counter(vals)
# return max(zip(vals, probs), key=lambda pair: pair[1])[0]
# return max(zip(vals, probs), key=lambda pair: pair[1] * counter[pair[0]] / len(counter) - 999 * (len(pair[0]) == 0) )[0]
# return max(zip(vals, probs), key=lambda pair: pair[1] + 0.7 * counter[pair[0]] / len(counter) - 999 * (len(pair[0]) == 0) )[0]
d = defaultdict(float)
for val, prob in zip(vals, probs):
d[val] += prob
d[''] = 0
return max(d.items(), key=lambda pair: pair[1])[0]
third_path = sys.argv[1]
other_paths = sys.argv[2:]
others = [json.load(open(path, 'r')) for path in other_paths]
c = {}
assert min(map(len, others)) == max(map(len, others)), list(map(len, others))
for key in others[0].keys():
if key == 'scores':
continue
probs = [other['scores'][key] for other in others]
vals = [other[key] for other in others]
largest_val = get_func(vals, probs)
c[key] = largest_val
json.dump(c, open(third_path, 'w')) | bi-att-flow-master | basic/ensemble_fast.py |
import gzip
import json
from json import encoder
import os
import tensorflow as tf
from basic.evaluator import Evaluation, F1Evaluation
from my.utils import short_floats
import pickle
class GraphHandler(object):
def __init__(self, config, model):
self.config = config
self.model = model
self.saver = tf.train.Saver(max_to_keep=config.max_to_keep)
self.writer = None
self.save_path = os.path.join(config.save_dir, config.model_name)
def initialize(self, sess):
sess.run(tf.initialize_all_variables())
if self.config.load:
self._load(sess)
if self.config.mode == 'train':
self.writer = tf.train.SummaryWriter(self.config.log_dir, graph=tf.get_default_graph())
def save(self, sess, global_step=None):
saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
saver.save(sess, self.save_path, global_step=global_step)
def _load(self, sess):
config = self.config
vars_ = {var.name.split(":")[0]: var for var in tf.all_variables()}
if config.load_ema:
ema = self.model.var_ema
for var in tf.trainable_variables():
del vars_[var.name.split(":")[0]]
vars_[ema.average_name(var)] = var
saver = tf.train.Saver(vars_, max_to_keep=config.max_to_keep)
if config.load_path:
save_path = config.load_path
elif config.load_step > 0:
save_path = os.path.join(config.save_dir, "{}-{}".format(config.model_name, config.load_step))
else:
save_dir = config.save_dir
checkpoint = tf.train.get_checkpoint_state(save_dir)
assert checkpoint is not None, "cannot load checkpoint at {}".format(save_dir)
save_path = checkpoint.model_checkpoint_path
print("Loading saved model from {}".format(save_path))
saver.restore(sess, save_path)
def add_summary(self, summary, global_step):
self.writer.add_summary(summary, global_step)
def add_summaries(self, summaries, global_step):
for summary in summaries:
self.add_summary(summary, global_step)
def dump_eval(self, e, precision=2, path=None):
assert isinstance(e, Evaluation)
if self.config.dump_pickle:
path = path or os.path.join(self.config.eval_dir, "{}-{}.pklz".format(e.data_type, str(e.global_step).zfill(6)))
with gzip.open(path, 'wb', compresslevel=3) as fh:
pickle.dump(e.dict, fh)
else:
path = path or os.path.join(self.config.eval_dir, "{}-{}.json".format(e.data_type, str(e.global_step).zfill(6)))
with open(path, 'w') as fh:
json.dump(short_floats(e.dict, precision), fh)
def dump_answer(self, e, path=None):
assert isinstance(e, Evaluation)
path = path or os.path.join(self.config.answer_dir, "{}-{}.json".format(e.data_type, str(e.global_step).zfill(6)))
with open(path, 'w') as fh:
json.dump(e.id2answer_dict, fh)
| bi-att-flow-master | basic/graph_handler.py |
bi-att-flow-master | basic/__init__.py |
|
import random
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import BasicLSTMCell
from basic.read_data import DataSet
from my.tensorflow import get_initializer
from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
def get_multi_gpu_models(config):
models = []
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)):
model = Model(config, scope, rep=gpu_idx == 0)
tf.get_variable_scope().reuse_variables()
models.append(model)
return models
class Model(object):
def __init__(self, config, scope, rep=True):
self.scope = scope
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, M, JX, JQ, VW, VC, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, None, None], name='x')
self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
self.q = tf.placeholder('int32', [N, None], name='q')
self.cq = tf.placeholder('int32', [N, None, W], name='cq')
self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
self.y = tf.placeholder('bool', [N, None, None], name='y')
self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
# Define misc
self.tensor_dict = {}
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if config.mode == 'train':
self._build_ema()
self.summary = tf.merge_all_summaries()
self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.max_word_size
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
M = tf.shape(self.x)[1]
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
with tf.variable_scope("emb"):
if config.use_char_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(0, [word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(3, [xx, Ax]) # [N, M, JX, di]
qq = tf.concat(2, [qq, Aq]) # [N, JQ, di]
else:
xx = Ax
qq = Aq
# highway network
if config.highway:
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell = BasicLSTMCell(d, state_is_tuple=True)
d_cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N]
with tf.variable_scope("prepro"):
(fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f)) = bidirectional_dynamic_rnn(d_cell, d_cell, qq, q_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u = tf.concat(2, [fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='u1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
else:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='h1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope("main"):
if config.dynamic_att:
p0 = h
u = tf.reshape(tf.tile(tf.expand_dims(u, 1), [1, M, 1, 1]), [N * M, JQ, 2 * d])
q_mask = tf.reshape(tf.tile(tf.expand_dims(self.q_mask, 1), [1, M, 1]), [N * M, JQ])
first_cell = AttentionCell(cell, u, mask=q_mask, mapper='sim',
input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
else:
p0 = attention_layer(config, self.is_train, h, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
first_cell = d_cell
(fw_g0, bw_g0), _ = bidirectional_dynamic_rnn(first_cell, first_cell, p0, x_len, dtype='float', scope='g0') # [N, M, JX, 2d]
g0 = tf.concat(3, [fw_g0, bw_g0])
(fw_g1, bw_g1), _ = bidirectional_dynamic_rnn(first_cell, first_cell, g0, x_len, dtype='float', scope='g1') # [N, M, JX, 2d]
g1 = tf.concat(3, [fw_g1, bw_g1])
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
(fw_g2, bw_g2), _ = bidirectional_dynamic_rnn(d_cell, d_cell, tf.concat(3, [p0, g1, a1i, g1 * a1i]),
x_len, dtype='float', scope='g2') # [N, M, JX, 2d]
g2 = tf.concat(3, [fw_g2, bw_g2])
logits2 = get_logits([g2, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=self.x_mask,
is_train=self.is_train, func=config.answer_func, scope='logits2')
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [-1, M, JX])
flat_logits2 = tf.reshape(logits2, [-1, M * JX])
flat_yp2 = tf.nn.softmax(flat_logits2)
yp2 = tf.reshape(flat_yp2, [-1, M, JX])
self.tensor_dict['g1'] = g1
self.tensor_dict['g2'] = g2
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[2]
M = tf.shape(self.x)[1]
JQ = tf.shape(self.q)[1]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = tf.nn.softmax_cross_entropy_with_logits(
self.logits, tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
ce_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
self.logits2, tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float')))
tf.add_to_collection("losses", ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.scalar_summary(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope)
ema_op = ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
tf.scalar_summary(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = ema.average(var)
tf.histogram_summary(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size
feed_dict = {}
if config.len_opt:
"""
Note that this optimization results in variable GPU RAM usage (i.e. can cause OOM in the middle of training.)
First test without len_opt and make sure no OOM, and use len_opt
"""
if sum(len(sent) for para in batch.data['x'] for sent in para) == 0:
new_JX = 1
else:
new_JX = max(len(sent) for para in batch.data['x'] for sent in para)
JX = min(JX, new_JX)
if sum(len(ques) for ques in batch.data['q']) == 0:
new_JQ = 1
else:
new_JQ = max(len(ques) for ques in batch.data['q'])
JQ = min(JQ, new_JQ)
if config.cpu_opt:
if sum(len(para) for para in batch.data['x']) == 0:
new_M = 1
else:
new_M = max(len(para) for para in batch.data['x'])
M = min(M, new_M)
x = np.zeros([N, M, JX], dtype='int32')
cx = np.zeros([N, M, JX, W], dtype='int32')
x_mask = np.zeros([N, M, JX], dtype='bool')
q = np.zeros([N, JQ], dtype='int32')
cq = np.zeros([N, JQ, W], dtype='int32')
q_mask = np.zeros([N, JQ], dtype='bool')
feed_dict[self.x] = x
feed_dict[self.x_mask] = x_mask
feed_dict[self.cx] = cx
feed_dict[self.q] = q
feed_dict[self.cq] = cq
feed_dict[self.q_mask] = q_mask
feed_dict[self.is_train] = is_train
if config.use_glove_for_unk:
feed_dict[self.new_emb_mat] = batch.shared['new_emb_mat']
X = batch.data['x']
CX = batch.data['cx']
if supervised:
y = np.zeros([N, M, JX], dtype='bool')
y2 = np.zeros([N, M, JX], dtype='bool')
feed_dict[self.y] = y
feed_dict[self.y2] = y2
for i, (xi, cxi, yi) in enumerate(zip(X, CX, batch.data['y'])):
start_idx, stop_idx = random.choice(yi)
j, k = start_idx
j2, k2 = stop_idx
if config.single:
X[i] = [xi[j]]
CX[i] = [cxi[j]]
j, j2 = 0, 0
if config.squash:
offset = sum(map(len, xi[:j]))
j, k = 0, k + offset
offset = sum(map(len, xi[:j2]))
j2, k2 = 0, k2 + offset
y[i, j, k] = True
y2[i, j2, k2-1] = True
def _get_word(word):
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
if config.use_glove_for_unk:
d2 = batch.shared['new_word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d2:
return d2[each] + len(d)
return 1
def _get_char(char):
d = batch.shared['char2idx']
if char in d:
return d[char]
return 1
for i, xi in enumerate(X):
if self.config.squash:
xi = [list(itertools.chain(*xi))]
for j, xij in enumerate(xi):
if j == config.max_num_sents:
break
for k, xijk in enumerate(xij):
if k == config.max_sent_size:
break
each = _get_word(xijk)
assert isinstance(each, int), each
x[i, j, k] = each
x_mask[i, j, k] = True
for i, cxi in enumerate(CX):
if self.config.squash:
cxi = [list(itertools.chain(*cxi))]
for j, cxij in enumerate(cxi):
if j == config.max_num_sents:
break
for k, cxijk in enumerate(cxij):
if k == config.max_sent_size:
break
for l, cxijkl in enumerate(cxijk):
if l == config.max_word_size:
break
cx[i, j, k, l] = _get_char(cxijkl)
for i, qi in enumerate(batch.data['q']):
for j, qij in enumerate(qi):
q[i, j] = _get_word(qij)
q_mask[i, j] = True
for i, cqi in enumerate(batch.data['cq']):
for j, cqij in enumerate(cqi):
for k, cqijk in enumerate(cqij):
cq[i, j, k] = _get_char(cqijk)
if k + 1 == config.max_word_size:
break
return feed_dict
def bi_attention(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "bi_attention"):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
h_aug = tf.tile(tf.expand_dims(h, 3), [1, 1, 1, JQ, 1])
u_aug = tf.tile(tf.expand_dims(tf.expand_dims(u, 1), 1), [1, M, JX, 1, 1])
if h_mask is None:
hu_mask = None
else:
h_mask_aug = tf.tile(tf.expand_dims(h_mask, 3), [1, 1, 1, JQ])
u_mask_aug = tf.tile(tf.expand_dims(tf.expand_dims(u_mask, 1), 1), [1, M, JX, 1])
hu_mask = h_mask_aug & u_mask_aug
u_logits = get_logits([h_aug, u_aug], None, True, wd=config.wd, mask=hu_mask,
is_train=is_train, func=config.logit_func, scope='u_logits') # [N, M, JX, JQ]
u_a = softsel(u_aug, u_logits) # [N, M, JX, d]
h_a = softsel(h, tf.reduce_max(u_logits, 3)) # [N, M, d]
h_a = tf.tile(tf.expand_dims(h_a, 2), [1, 1, JX, 1])
if tensor_dict is not None:
a_u = tf.nn.softmax(u_logits) # [N, M, JX, JQ]
a_h = tf.nn.softmax(tf.reduce_max(u_logits, 3))
tensor_dict['a_u'] = a_u
tensor_dict['a_h'] = a_h
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope=tf.get_variable_scope().name)
for var in variables:
tensor_dict[var.name] = var
return u_a, h_a
def attention_layer(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "attention_layer"):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
if config.q2c_att or config.c2q_att:
u_a, h_a = bi_attention(config, is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
if not config.c2q_att:
u_a = tf.tile(tf.expand_dims(tf.expand_dims(tf.reduce_mean(u, 1), 1), 1), [1, M, JX, 1])
if config.q2c_att:
p0 = tf.concat(3, [h, u_a, h * u_a, h * h_a])
else:
p0 = tf.concat(3, [h, u_a, h * u_a])
return p0
| bi-att-flow-master | basic/model.py |
import os
import tensorflow as tf
from basic.main import main as m
flags = tf.app.flags
# Names and directories
flags.DEFINE_string("model_name", "basic", "Model name [basic]")
flags.DEFINE_string("data_dir", "data/squad", "Data dir [data/squad]")
flags.DEFINE_string("run_id", "0", "Run ID [0]")
flags.DEFINE_string("out_base_dir", "out", "out base dir [out]")
flags.DEFINE_string("forward_name", "single", "Forward name [single]")
flags.DEFINE_string("answer_path", "", "Answer path []")
flags.DEFINE_string("eval_path", "", "Eval path []")
flags.DEFINE_string("load_path", "", "Load path []")
flags.DEFINE_string("shared_path", "", "Shared path []")
# Device placement
flags.DEFINE_string("device", "/cpu:0", "default device for summing gradients. [/cpu:0]")
flags.DEFINE_string("device_type", "gpu", "device for computing gradients (parallelization). cpu | gpu [gpu]")
flags.DEFINE_integer("num_gpus", 1, "num of gpus or cpus for computing gradients [1]")
# Essential training and test options
flags.DEFINE_string("mode", "test", "trains | test | forward [test]")
flags.DEFINE_boolean("load", True, "load saved data? [True]")
flags.DEFINE_bool("single", False, "supervise only the answer sentence? [False]")
flags.DEFINE_boolean("debug", False, "Debugging mode? [False]")
flags.DEFINE_bool('load_ema', True, "load exponential average of variables when testing? [True]")
flags.DEFINE_bool("eval", True, "eval? [True]")
# Training / test parameters
flags.DEFINE_integer("batch_size", 60, "Batch size [60]")
flags.DEFINE_integer("val_num_batches", 100, "validation num batches [100]")
flags.DEFINE_integer("test_num_batches", 0, "test num batches [0]")
flags.DEFINE_integer("num_epochs", 12, "Total number of epochs for training [12]")
flags.DEFINE_integer("num_steps", 20000, "Number of steps [20000]")
flags.DEFINE_integer("load_step", 0, "load step [0]")
flags.DEFINE_float("init_lr", 0.5, "Initial learning rate [0.5]")
flags.DEFINE_float("input_keep_prob", 0.8, "Input keep prob for the dropout of LSTM weights [0.8]")
flags.DEFINE_float("keep_prob", 0.8, "Keep prob for the dropout of Char-CNN weights [0.8]")
flags.DEFINE_float("wd", 0.0, "L2 weight decay for regularization [0.0]")
flags.DEFINE_integer("hidden_size", 100, "Hidden size [100]")
flags.DEFINE_integer("char_out_size", 100, "char-level word embedding size [100]")
flags.DEFINE_integer("char_emb_size", 8, "Char emb size [8]")
flags.DEFINE_string("out_channel_dims", "100", "Out channel dims of Char-CNN, separated by commas [100]")
flags.DEFINE_string("filter_heights", "5", "Filter heights of Char-CNN, separated by commas [5]")
flags.DEFINE_bool("finetune", False, "Finetune word embeddings? [False]")
flags.DEFINE_bool("highway", True, "Use highway? [True]")
flags.DEFINE_integer("highway_num_layers", 2, "highway num layers [2]")
flags.DEFINE_bool("share_cnn_weights", True, "Share Char-CNN weights [True]")
flags.DEFINE_bool("share_lstm_weights", True, "Share pre-processing (phrase-level) LSTM weights [True]")
flags.DEFINE_float("var_decay", 0.999, "Exponential moving average decay for variables [0.999]")
# Optimizations
flags.DEFINE_bool("cluster", False, "Cluster data for faster training [False]")
flags.DEFINE_bool("len_opt", False, "Length optimization? [False]")
flags.DEFINE_bool("cpu_opt", False, "CPU optimization? GPU computation can be slower [False]")
# Logging and saving options
flags.DEFINE_boolean("progress", True, "Show progress? [True]")
flags.DEFINE_integer("log_period", 100, "Log period [100]")
flags.DEFINE_integer("eval_period", 1000, "Eval period [1000]")
flags.DEFINE_integer("save_period", 1000, "Save Period [1000]")
flags.DEFINE_integer("max_to_keep", 20, "Max recent saves to keep [20]")
flags.DEFINE_bool("dump_eval", True, "dump eval? [True]")
flags.DEFINE_bool("dump_answer", True, "dump answer? [True]")
flags.DEFINE_bool("vis", False, "output visualization numbers? [False]")
flags.DEFINE_bool("dump_pickle", True, "Dump pickle instead of json? [True]")
flags.DEFINE_float("decay", 0.9, "Exponential moving average decay for logging values [0.9]")
# Thresholds for speed and less memory usage
flags.DEFINE_integer("word_count_th", 10, "word count th [100]")
flags.DEFINE_integer("char_count_th", 50, "char count th [500]")
flags.DEFINE_integer("sent_size_th", 400, "sent size th [64]")
flags.DEFINE_integer("num_sents_th", 8, "num sents th [8]")
flags.DEFINE_integer("ques_size_th", 30, "ques size th [32]")
flags.DEFINE_integer("word_size_th", 16, "word size th [16]")
flags.DEFINE_integer("para_size_th", 256, "para size th [256]")
# Advanced training options
flags.DEFINE_bool("lower_word", True, "lower word [True]")
flags.DEFINE_bool("squash", False, "squash the sentences into one? [False]")
flags.DEFINE_bool("swap_memory", True, "swap memory? [True]")
flags.DEFINE_string("data_filter", "max", "max | valid | semi [max]")
flags.DEFINE_bool("use_glove_for_unk", True, "use glove for unk [False]")
flags.DEFINE_bool("known_if_glove", True, "consider as known if present in glove [False]")
flags.DEFINE_string("logit_func", "tri_linear", "logit func [tri_linear]")
flags.DEFINE_string("answer_func", "linear", "answer logit func [linear]")
flags.DEFINE_string("sh_logit_func", "tri_linear", "sh logit func [tri_linear]")
# Ablation options
flags.DEFINE_bool("use_char_emb", True, "use char emb? [True]")
flags.DEFINE_bool("use_word_emb", True, "use word embedding? [True]")
flags.DEFINE_bool("q2c_att", True, "question-to-context attention? [True]")
flags.DEFINE_bool("c2q_att", True, "context-to-question attention? [True]")
flags.DEFINE_bool("dynamic_att", False, "Dynamic attention [False]")
def main(_):
config = flags.FLAGS
config.out_dir = os.path.join(config.out_base_dir, config.model_name, str(config.run_id).zfill(2))
m(config)
if __name__ == "__main__":
tf.app.run()
| bi-att-flow-master | basic/cli.py |
import argparse
import functools
import gzip
import json
import pickle
from collections import defaultdict
from operator import mul
from tqdm import tqdm
from squad.utils import get_phrase, get_best_span
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='+')
parser.add_argument('-o', '--out', default='ensemble.json')
parser.add_argument("--data_path", default="data/squad/data_test.json")
parser.add_argument("--shared_path", default="data/squad/shared_test.json")
args = parser.parse_args()
return args
def ensemble(args):
e_list = []
for path in tqdm(args.paths):
with gzip.open(path, 'r') as fh:
e = pickle.load(fh)
e_list.append(e)
with open(args.data_path, 'r') as fh:
data = json.load(fh)
with open(args.shared_path, 'r') as fh:
shared = json.load(fh)
out = {}
for idx, (id_, rx) in tqdm(enumerate(zip(data['ids'], data['*x'])), total=len(e['yp'])):
if idx >= len(e['yp']):
# for debugging purpose
break
context = shared['p'][rx[0]][rx[1]]
wordss = shared['x'][rx[0]][rx[1]]
yp_list = [e['yp'][idx] for e in e_list]
yp2_list = [e['yp2'][idx] for e in e_list]
answer = ensemble3(context, wordss, yp_list, yp2_list)
out[id_] = answer
with open(args.out, 'w') as fh:
json.dump(out, fh)
def ensemble1(context, wordss, y1_list, y2_list):
"""
:param context: Original context
:param wordss: tokenized words (nested 2D list)
:param y1_list: list of start index probs (each element corresponds to probs form single model)
:param y2_list: list of stop index probs
:return:
"""
sum_y1 = combine_y_list(y1_list)
sum_y2 = combine_y_list(y2_list)
span, score = get_best_span(sum_y1, sum_y2)
return get_phrase(context, wordss, span)
def ensemble2(context, wordss, y1_list, y2_list):
start_dict = defaultdict(float)
stop_dict = defaultdict(float)
for y1, y2 in zip(y1_list, y2_list):
span, score = get_best_span(y1, y2)
start_dict[span[0]] += y1[span[0][0]][span[0][1]]
stop_dict[span[1]] += y2[span[1][0]][span[1][1]]
start = max(start_dict.items(), key=lambda pair: pair[1])[0]
stop = max(stop_dict.items(), key=lambda pair: pair[1])[0]
best_span = (start, stop)
return get_phrase(context, wordss, best_span)
def ensemble3(context, wordss, y1_list, y2_list):
d = defaultdict(float)
for y1, y2 in zip(y1_list, y2_list):
span, score = get_best_span(y1, y2)
phrase = get_phrase(context, wordss, span)
d[phrase] += score
return max(d.items(), key=lambda pair: pair[1])[0]
def combine_y_list(y_list, op='*'):
if op == '+':
func = sum
elif op == '*':
def func(l): return functools.reduce(mul, l)
else:
func = op
return [[func(yij_list) for yij_list in zip(*yi_list)] for yi_list in zip(*y_list)]
def main():
args = get_args()
ensemble(args)
if __name__ == "__main__":
main()
| bi-att-flow-master | basic/ensemble.py |
import json
import os
import random
import itertools
import math
from collections import defaultdict
import numpy as np
from my.tensorflow import grouper
from my.utils import index
class Data(object):
def get_size(self):
raise NotImplementedError()
def get_by_idxs(self, idxs):
"""
Efficient way to obtain a batch of items from filesystem
:param idxs:
:return dict: {'X': [,], 'Y', }
"""
data = defaultdict(list)
for idx in idxs:
each_data = self.get_one(idx)
for key, val in each_data.items():
data[key].append(val)
return data
def get_one(self, idx):
raise NotImplementedError()
def get_empty(self):
raise NotImplementedError()
def __add__(self, other):
raise NotImplementedError()
class DataSet(object):
def __init__(self, data, data_type, shared=None, valid_idxs=None):
self.data = data # e.g. {'X': [0, 1, 2], 'Y': [2, 3, 4]}
self.data_type = data_type
self.shared = shared
total_num_examples = self.get_data_size()
self.valid_idxs = range(total_num_examples) if valid_idxs is None else valid_idxs
self.num_examples = len(self.valid_idxs)
def _sort_key(self, idx):
rx = self.data['*x'][idx]
x = self.shared['x'][rx[0]][rx[1]]
return max(map(len, x))
def get_data_size(self):
if isinstance(self.data, dict):
return len(next(iter(self.data.values())))
elif isinstance(self.data, Data):
return self.data.get_size()
raise Exception()
def get_by_idxs(self, idxs):
if isinstance(self.data, dict):
out = defaultdict(list)
for key, val in self.data.items():
out[key].extend(val[idx] for idx in idxs)
return out
elif isinstance(self.data, Data):
return self.data.get_by_idxs(idxs)
raise Exception()
def get_batches(self, batch_size, num_batches=None, shuffle=False, cluster=False):
"""
:param batch_size:
:param num_batches:
:param shuffle:
:param cluster: cluster examples by their lengths; this might give performance boost (i.e. faster training).
:return:
"""
num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size))
if num_batches is None:
num_batches = num_batches_per_epoch
num_epochs = int(math.ceil(num_batches / num_batches_per_epoch))
if shuffle:
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
if cluster:
sorted_idxs = sorted(random_idxs, key=self._sort_key)
sorted_grouped = lambda: list(grouper(sorted_idxs, batch_size))
grouped = lambda: random.sample(sorted_grouped(), num_batches_per_epoch)
else:
random_grouped = lambda: list(grouper(random_idxs, batch_size))
grouped = random_grouped
else:
raw_grouped = lambda: list(grouper(self.valid_idxs, batch_size))
grouped = raw_grouped
batch_idx_tuples = itertools.chain.from_iterable(grouped() for _ in range(num_epochs))
for _ in range(num_batches):
batch_idxs = tuple(i for i in next(batch_idx_tuples) if i is not None)
batch_data = self.get_by_idxs(batch_idxs)
shared_batch_data = {}
for key, val in batch_data.items():
if key.startswith('*'):
assert self.shared is not None
shared_key = key[1:]
shared_batch_data[shared_key] = [index(self.shared[shared_key], each) for each in val]
batch_data.update(shared_batch_data)
batch_ds = DataSet(batch_data, self.data_type, shared=self.shared)
yield batch_idxs, batch_ds
def get_multi_batches(self, batch_size, num_batches_per_step, num_steps=None, shuffle=False, cluster=False):
batch_size_per_step = batch_size * num_batches_per_step
batches = self.get_batches(batch_size_per_step, num_batches=num_steps, shuffle=shuffle, cluster=cluster)
multi_batches = (tuple(zip(grouper(idxs, batch_size, shorten=True, num_groups=num_batches_per_step),
data_set.divide(num_batches_per_step))) for idxs, data_set in batches)
return multi_batches
def get_empty(self):
if isinstance(self.data, dict):
data = {key: [] for key in self.data}
elif isinstance(self.data, Data):
data = self.data.get_empty()
else:
raise Exception()
return DataSet(data, self.data_type, shared=self.shared)
def __add__(self, other):
if isinstance(self.data, dict):
data = {key: val + other.data[key] for key, val in self.data.items()}
elif isinstance(self.data, Data):
data = self.data + other.data
else:
raise Exception()
valid_idxs = list(self.valid_idxs) + [valid_idx + self.num_examples for valid_idx in other.valid_idxs]
return DataSet(data, self.data_type, shared=self.shared, valid_idxs=valid_idxs)
def divide(self, integer):
batch_size = int(math.ceil(self.num_examples / integer))
idxs_gen = grouper(self.valid_idxs, batch_size, shorten=True, num_groups=integer)
data_gen = (self.get_by_idxs(idxs) for idxs in idxs_gen)
ds_tuple = tuple(DataSet(data, self.data_type, shared=self.shared) for data in data_gen)
return ds_tuple
def load_metadata(config, data_type):
metadata_path = os.path.join(config.data_dir, "metadata_{}.json".format(data_type))
with open(metadata_path, 'r') as fh:
metadata = json.load(fh)
for key, val in metadata.items():
config.__setattr__(key, val)
return metadata
def read_data(config, data_type, ref, data_filter=None):
data_path = os.path.join(config.data_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(config.data_dir, "shared_{}.json".format(data_type))
with open(data_path, 'r') as fh:
data = json.load(fh)
with open(shared_path, 'r') as fh:
shared = json.load(fh)
num_examples = len(next(iter(data.values())))
if data_filter is None:
valid_idxs = range(num_examples)
else:
mask = []
keys = data.keys()
values = data.values()
for vals in zip(*values):
each = {key: val for key, val in zip(keys, vals)}
mask.append(data_filter(each, shared))
valid_idxs = [idx for idx in range(len(mask)) if mask[idx]]
print("Loaded {}/{} examples from {}".format(len(valid_idxs), num_examples, data_type))
shared_path = config.shared_path or os.path.join(config.out_dir, "shared.json")
if not ref:
word2vec_dict = shared['lower_word2vec'] if config.lower_word else shared['word2vec']
word_counter = shared['lower_word_counter'] if config.lower_word else shared['word_counter']
char_counter = shared['char_counter']
if config.finetune:
shared['word2idx'] = {word: idx + 2 for idx, word in
enumerate(word for word, count in word_counter.items()
if count > config.word_count_th or (config.known_if_glove and word in word2vec_dict))}
else:
assert config.known_if_glove
assert config.use_glove_for_unk
shared['word2idx'] = {word: idx + 2 for idx, word in
enumerate(word for word, count in word_counter.items()
if count > config.word_count_th and word not in word2vec_dict)}
shared['char2idx'] = {char: idx + 2 for idx, char in
enumerate(char for char, count in char_counter.items()
if count > config.char_count_th)}
NULL = "-NULL-"
UNK = "-UNK-"
shared['word2idx'][NULL] = 0
shared['word2idx'][UNK] = 1
shared['char2idx'][NULL] = 0
shared['char2idx'][UNK] = 1
json.dump({'word2idx': shared['word2idx'], 'char2idx': shared['char2idx']}, open(shared_path, 'w'))
else:
new_shared = json.load(open(shared_path, 'r'))
for key, val in new_shared.items():
shared[key] = val
if config.use_glove_for_unk:
# create new word2idx and word2vec
word2vec_dict = shared['lower_word2vec'] if config.lower_word else shared['word2vec']
new_word2idx_dict = {word: idx for idx, word in enumerate(word for word in word2vec_dict.keys() if word not in shared['word2idx'])}
shared['new_word2idx'] = new_word2idx_dict
offset = len(shared['word2idx'])
word2vec_dict = shared['lower_word2vec'] if config.lower_word else shared['word2vec']
new_word2idx_dict = shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for word, idx in new_word2idx_dict.items()}
# print("{}/{} unique words have corresponding glove vectors.".format(len(idx2vec_dict), len(word2idx_dict)))
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
shared['new_emb_mat'] = new_emb_mat
data_set = DataSet(data, data_type, shared=shared, valid_idxs=valid_idxs)
return data_set
def get_squad_data_filter(config):
def data_filter(data_point, shared):
assert shared is not None
rx, rcx, q, cq, y = (data_point[key] for key in ('*x', '*cx', 'q', 'cq', 'y'))
x, cx = shared['x'], shared['cx']
if len(q) > config.ques_size_th:
return False
# x filter
xi = x[rx[0]][rx[1]]
if config.squash:
for start, stop in y:
stop_offset = sum(map(len, xi[:stop[0]]))
if stop_offset + stop[1] > config.para_size_th:
return False
return True
if config.single:
for start, stop in y:
if start[0] != stop[0]:
return False
if config.data_filter == 'max':
for start, stop in y:
if stop[0] >= config.num_sents_th:
return False
if start[0] != stop[0]:
return False
if stop[1] >= config.sent_size_th:
return False
elif config.data_filter == 'valid':
if len(xi) > config.num_sents_th:
return False
if any(len(xij) > config.sent_size_th for xij in xi):
return False
elif config.data_filter == 'semi':
"""
Only answer sentence needs to be valid.
"""
for start, stop in y:
if stop[0] >= config.num_sents_th:
return False
if start[0] != start[0]:
return False
if len(xi[start[0]]) > config.sent_size_th:
return False
else:
raise Exception()
return True
return data_filter
def update_config(config, data_sets):
config.max_num_sents = 0
config.max_sent_size = 0
config.max_ques_size = 0
config.max_word_size = 0
config.max_para_size = 0
for data_set in data_sets:
data = data_set.data
shared = data_set.shared
for idx in data_set.valid_idxs:
rx = data['*x'][idx]
q = data['q'][idx]
sents = shared['x'][rx[0]][rx[1]]
config.max_para_size = max(config.max_para_size, sum(map(len, sents)))
config.max_num_sents = max(config.max_num_sents, len(sents))
config.max_sent_size = max(config.max_sent_size, max(map(len, sents)))
config.max_word_size = max(config.max_word_size, max(len(word) for sent in sents for word in sent))
if len(q) > 0:
config.max_ques_size = max(config.max_ques_size, len(q))
config.max_word_size = max(config.max_word_size, max(len(word) for word in q))
if config.mode == 'train':
config.max_num_sents = min(config.max_num_sents, config.num_sents_th)
config.max_sent_size = min(config.max_sent_size, config.sent_size_th)
config.max_para_size = min(config.max_para_size, config.para_size_th)
config.max_word_size = min(config.max_word_size, config.word_size_th)
config.char_vocab_size = len(data_sets[0].shared['char2idx'])
config.word_emb_size = len(next(iter(data_sets[0].shared['word2vec'].values())))
config.word_vocab_size = len(data_sets[0].shared['word2idx'])
if config.single:
config.max_num_sents = 1
if config.squash:
config.max_sent_size = config.max_para_size
config.max_num_sents = 1
| bi-att-flow-master | basic/read_data.py |
import tensorflow as tf
from basic.model import Model
from my.tensorflow import average_gradients
class Trainer(object):
def __init__(self, config, model):
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.loss = model.get_loss()
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.grads = self.opt.compute_gradients(self.loss, var_list=self.var_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
def get_train_op(self):
return self.train_op
def step(self, sess, batch, get_summary=False):
assert isinstance(sess, tf.Session)
_, ds = batch
feed_dict = self.model.get_feed_dict(ds, True)
if get_summary:
loss, summary, train_op = \
sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
return loss, summary, train_op
class MultiGPUTrainer(object):
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
def step(self, sess, batches, get_summary=False):
assert isinstance(sess, tf.Session)
feed_dict = {}
for batch, model in zip(batches, self.models):
_, ds = batch
feed_dict.update(model.get_feed_dict(ds, True))
if get_summary:
loss, summary, train_op = \
sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
return loss, summary, train_op
| bi-att-flow-master | basic/trainer.py |
import argparse
import json
import math
import os
import shutil
from pprint import pprint
import tensorflow as tf
from tqdm import tqdm
import numpy as np
from basic.evaluator import ForwardEvaluator, MultiGPUF1Evaluator
from basic.graph_handler import GraphHandler
from basic.model import get_multi_gpu_models
from basic.trainer import MultiGPUTrainer
from basic.read_data import read_data, get_squad_data_filter, update_config
def main(config):
set_dirs(config)
with tf.device(config.device):
if config.mode == 'train':
_train(config)
elif config.mode == 'test':
_test(config)
elif config.mode == 'forward':
_forward(config)
else:
raise ValueError("invalid value for 'mode': {}".format(config.mode))
def set_dirs(config):
# create directories
assert config.load or config.mode == 'train', "config.load must be True if not training"
if not config.load and os.path.exists(config.out_dir):
shutil.rmtree(config.out_dir)
config.save_dir = os.path.join(config.out_dir, "save")
config.log_dir = os.path.join(config.out_dir, "log")
config.eval_dir = os.path.join(config.out_dir, "eval")
config.answer_dir = os.path.join(config.out_dir, "answer")
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if not os.path.exists(config.save_dir):
os.mkdir(config.save_dir)
if not os.path.exists(config.log_dir):
os.mkdir(config.log_dir)
if not os.path.exists(config.answer_dir):
os.mkdir(config.answer_dir)
if not os.path.exists(config.eval_dir):
os.mkdir(config.eval_dir)
def _config_debug(config):
if config.debug:
config.num_steps = 2
config.eval_period = 1
config.log_period = 1
config.save_period = 1
config.val_num_batches = 2
config.test_num_batches = 2
def _train(config):
data_filter = get_squad_data_filter(config)
train_data = read_data(config, 'train', config.load, data_filter=data_filter)
dev_data = read_data(config, 'dev', True, data_filter=data_filter)
update_config(config, [train_data, dev_data])
_config_debug(config)
word2vec_dict = train_data.shared['lower_word2vec'] if config.lower_word else train_data.shared['word2vec']
word2idx_dict = train_data.shared['word2idx']
idx2vec_dict = {word2idx_dict[word]: vec for word, vec in word2vec_dict.items() if word in word2idx_dict}
emb_mat = np.array([idx2vec_dict[idx] if idx in idx2vec_dict
else np.random.multivariate_normal(np.zeros(config.word_emb_size), np.eye(config.word_emb_size))
for idx in range(config.word_vocab_size)])
config.emb_mat = emb_mat
# construct model graph and variables (using default graph)
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
trainer = MultiGPUTrainer(config, models)
evaluator = MultiGPUF1Evaluator(config, models, tensor_dict=model.tensor_dict if config.vis else None)
graph_handler = GraphHandler(config, model) # controls all tensors and variables in the graph, including loading /saving
# Variables
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
# Begin training
num_steps = config.num_steps or int(math.ceil(train_data.num_examples / (config.batch_size * config.num_gpus))) * config.num_epochs
global_step = 0
for batches in tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus,
num_steps=num_steps, shuffle=True, cluster=config.cluster), total=num_steps):
global_step = sess.run(model.global_step) + 1 # +1 because all calculations are done after step
get_summary = global_step % config.log_period == 0
loss, summary, train_op = trainer.step(sess, batches, get_summary=get_summary)
if get_summary:
graph_handler.add_summary(summary, global_step)
# occasional saving
if global_step % config.save_period == 0:
graph_handler.save(sess, global_step=global_step)
if not config.eval:
continue
# Occasional evaluation
if global_step % config.eval_period == 0:
num_steps = math.ceil(dev_data.num_examples / (config.batch_size * config.num_gpus))
if 0 < config.val_num_batches < num_steps:
num_steps = config.val_num_batches
e_train = evaluator.get_evaluation_from_batches(
sess, tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps)
)
graph_handler.add_summaries(e_train.summaries, global_step)
e_dev = evaluator.get_evaluation_from_batches(
sess, tqdm(dev_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps))
graph_handler.add_summaries(e_dev.summaries, global_step)
if config.dump_eval:
graph_handler.dump_eval(e_dev)
if config.dump_answer:
graph_handler.dump_answer(e_dev)
if global_step % config.save_period != 0:
graph_handler.save(sess, global_step=global_step)
def _test(config):
test_data = read_data(config, 'test', True)
update_config(config, [test_data])
_config_debug(config)
if config.use_glove_for_unk:
word2vec_dict = test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec']
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for word, idx in new_word2idx_dict.items()}
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
evaluator = MultiGPUF1Evaluator(config, models, tensor_dict=models[0].tensor_dict if config.vis else None)
graph_handler = GraphHandler(config, model)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_steps = math.ceil(test_data.num_examples / (config.batch_size * config.num_gpus))
if 0 < config.test_num_batches < num_steps:
num_steps = config.test_num_batches
e = None
for multi_batch in tqdm(test_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps, cluster=config.cluster), total=num_steps):
ei = evaluator.get_evaluation(sess, multi_batch)
e = ei if e is None else e + ei
if config.vis:
eval_subdir = os.path.join(config.eval_dir, "{}-{}".format(ei.data_type, str(ei.global_step).zfill(6)))
if not os.path.exists(eval_subdir):
os.mkdir(eval_subdir)
path = os.path.join(eval_subdir, str(ei.idxs[0]).zfill(8))
graph_handler.dump_eval(ei, path=path)
print(e)
if config.dump_answer:
print("dumping answer ...")
graph_handler.dump_answer(e)
if config.dump_eval:
print("dumping eval ...")
graph_handler.dump_eval(e)
def _forward(config):
assert config.load
test_data = read_data(config, config.forward_name, True)
update_config(config, [test_data])
_config_debug(config)
if config.use_glove_for_unk:
word2vec_dict = test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec']
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for word, idx in new_word2idx_dict.items()}
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
evaluator = ForwardEvaluator(config, model)
graph_handler = GraphHandler(config, model) # controls all tensors and variables in the graph, including loading /saving
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_batches = math.ceil(test_data.num_examples / config.batch_size)
if 0 < config.test_num_batches < num_batches:
num_batches = config.test_num_batches
e = evaluator.get_evaluation_from_batches(sess, tqdm(test_data.get_batches(config.batch_size, num_batches=num_batches), total=num_batches))
print(e)
if config.dump_answer:
print("dumping answer ...")
graph_handler.dump_answer(e, path=config.answer_path)
if config.dump_eval:
print("dumping eval ...")
graph_handler.dump_eval(e, path=config.eval_path)
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("config_path")
return parser.parse_args()
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def _run():
args = _get_args()
with open(args.config_path, 'r') as fh:
config = Config(**json.load(fh))
main(config)
if __name__ == "__main__":
_run()
| bi-att-flow-master | basic/main.py |
import numpy as np
import tensorflow as tf
from basic.read_data import DataSet
from my.nltk_utils import span_f1
from my.tensorflow import padded_reshape
from my.utils import argmax
from squad.utils import get_phrase, get_best_span
class Evaluation(object):
def __init__(self, data_type, global_step, idxs, yp, tensor_dict=None):
self.data_type = data_type
self.global_step = global_step
self.idxs = idxs
self.yp = yp
self.num_examples = len(yp)
self.tensor_dict = None
self.dict = {'data_type': data_type,
'global_step': global_step,
'yp': yp,
'idxs': idxs,
'num_examples': self.num_examples}
if tensor_dict is not None:
self.tensor_dict = {key: val.tolist() for key, val in tensor_dict.items()}
for key, val in self.tensor_dict.items():
self.dict[key] = val
self.summaries = None
def __repr__(self):
return "{} step {}".format(self.data_type, self.global_step)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_idxs = self.idxs + other.idxs
new_tensor_dict = None
if self.tensor_dict is not None:
new_tensor_dict = {key: val + other.tensor_dict[key] for key, val in self.tensor_dict.items()}
return Evaluation(self.data_type, self.global_step, new_idxs, new_yp, tensor_dict=new_tensor_dict)
def __radd__(self, other):
return self.__add__(other)
class LabeledEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, y, tensor_dict=None):
super(LabeledEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.y = y
self.dict['y'] = y
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_idxs = self.idxs + other.idxs
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return LabeledEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, tensor_dict=new_tensor_dict)
class AccuracyEvaluation(LabeledEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=None):
super(AccuracyEvaluation, self).__init__(data_type, global_step, idxs, yp, y, tensor_dict=tensor_dict)
self.loss = loss
self.correct = correct
self.acc = sum(correct) / len(correct)
self.dict['loss'] = loss
self.dict['correct'] = correct
self.dict['acc'] = self.acc
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/loss'.format(data_type), simple_value=self.loss)])
acc_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc'.format(data_type), simple_value=self.acc)])
self.summaries = [loss_summary, acc_summary]
def __repr__(self):
return "{} step {}: accuracy={}, loss={}".format(self.data_type, self.global_step, self.acc, self.loss)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return AccuracyEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_correct, new_loss, tensor_dict=new_tensor_dict)
class Evaluator(object):
def __init__(self, config, model, tensor_dict=None):
self.config = config
self.model = model
self.global_step = model.global_step
self.yp = model.yp
self.tensor_dict = {} if tensor_dict is None else tensor_dict
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), tensor_dict=tensor_dict)
return e
def get_evaluation_from_batches(self, sess, batches):
e = sum(self.get_evaluation(sess, batch) for batch in batches)
return e
class LabeledEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(LabeledEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.y = model.y
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
y = feed_dict[self.y]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist(), tensor_dict=tensor_dict)
return e
class AccuracyEvaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(AccuracyEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, loss, vals = sess.run([self.global_step, self.yp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
yp = yp[:data_set.num_examples]
correct = [self.__class__.compare(yi, ypi) for yi, ypi in zip(y, yp)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = AccuracyEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y, correct, float(loss), tensor_dict=tensor_dict)
return e
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
if start == int(np.argmax(ypi)):
return True
return False
class AccuracyEvaluator2(AccuracyEvaluator):
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
para_start = int(np.argmax(np.max(ypi, 1)))
sent_start = int(np.argmax(ypi[para_start]))
if tuple(start) == (para_start, sent_start):
return True
return False
class ForwardEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, loss, id2answer_dict, tensor_dict=None):
super(ForwardEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.yp2 = yp2
self.loss = loss
self.dict['loss'] = loss
self.dict['yp2'] = yp2
self.id2answer_dict = id2answer_dict
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_yp)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return ForwardEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_loss, new_id2answer_dict, tensor_dict=new_tensor_dict)
def __repr__(self):
return "{} step {}: loss={:.4f}".format(self.data_type, self.global_step, self.loss)
class F1Evaluation(AccuracyEvaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, y, correct, loss, f1s, id2answer_dict, tensor_dict=None):
super(F1Evaluation, self).__init__(data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=tensor_dict)
self.yp2 = yp2
self.f1s = f1s
self.f1 = float(np.mean(f1s))
self.dict['yp2'] = yp2
self.dict['f1s'] = f1s
self.dict['f1'] = self.f1
self.id2answer_dict = id2answer_dict
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/f1'.format(data_type), simple_value=self.f1)])
self.summaries.append(f1_summary)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_f1s = self.f1s + other.f1s
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
return F1Evaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_y, new_correct, new_loss, new_f1s, new_id2answer_dict)
def __repr__(self):
return "{} step {}: accuracy={:.4f}, f1={:.4f}, loss={:.4f}".format(self.data_type, self.global_step, self.acc, self.f1, self.loss)
class F1Evaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(F1Evaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
global_step, yp, yp2, loss, vals = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
if self.config.squash:
new_y = []
for xi, yi in zip(data_set.data['x'], y):
new_yi = []
for start, stop in yi:
start_offset = sum(map(len, xi[:start[0]]))
stop_offset = sum(map(len, xi[:stop[0]]))
new_start = 0, start_offset + start[1]
new_stop = 0, stop_offset + stop[1]
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
if self.config.single:
new_y = []
for yi in y:
new_yi = []
for start, stop in yi:
new_start = 0, start[1]
new_stop = 0, stop[1]
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
spans, scores = zip(*[get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ""
if len(xi[span[0][0]]) <= span[1][1]:
return ""
return get_phrase(context, xi, span)
id2answer_dict = {id_: _get2(context, xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for id_, score in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
correct = [self.__class__.compare2(yi, span) for yi, span in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for yi, span in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y,
correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
return e
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return False
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
class MultiGPUF1Evaluator(F1Evaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUF1Evaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
self.yp = tf.concat(0, [padded_reshape(model.yp, [N, M, JX]) for model in models])
self.yp2 = tf.concat(0, [padded_reshape(model.yp2, [N, M, JX]) for model in models])
self.loss = tf.add_n([model.loss for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
class ForwardEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(ForwardEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, yp2, loss, vals = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
spans, scores = zip(*[get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ""
if len(xi[span[0][0]]) <= span[1][1]:
return ""
return get_phrase(context, xi, span)
id2answer_dict = {id_: _get2(context, xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for id_, score in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = ForwardEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), float(loss), id2answer_dict, tensor_dict=tensor_dict)
return e
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return False
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
| bi-att-flow-master | basic/evaluator.py |
import shutil
from collections import OrderedDict
import http.server
import socketserver
import argparse
import json
import os
import numpy as np
from tqdm import tqdm
from jinja2 import Environment, FileSystemLoader
from basic.evaluator import get_span_score_pairs
from squad.utils import get_best_span, get_span_score_pairs
def bool_(string):
if string == 'True':
return True
elif string == 'False':
return False
else:
raise Exception()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default='basic')
parser.add_argument("--data_type", type=str, default='dev')
parser.add_argument("--step", type=int, default=5000)
parser.add_argument("--template_name", type=str, default="visualizer.html")
parser.add_argument("--num_per_page", type=int, default=100)
parser.add_argument("--data_dir", type=str, default="data/squad")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--open", type=str, default='False')
parser.add_argument("--run_id", type=str, default="0")
args = parser.parse_args()
return args
def _decode(decoder, sent):
return " ".join(decoder[idx] for idx in sent)
def accuracy2_visualizer(args):
model_name = args.model_name
data_type = args.data_type
num_per_page = args.num_per_page
data_dir = args.data_dir
run_id = args.run_id.zfill(2)
step = args.step
eval_path =os.path.join("out", model_name, run_id, "eval", "{}-{}.json".format(data_type, str(step).zfill(6)))
print("loading {}".format(eval_path))
eval_ = json.load(open(eval_path, 'r'))
_id = 0
html_dir = "/tmp/list_results%d" % _id
while os.path.exists(html_dir):
_id += 1
html_dir = "/tmp/list_results%d" % _id
if os.path.exists(html_dir):
shutil.rmtree(html_dir)
os.mkdir(html_dir)
cur_dir = os.path.dirname(os.path.realpath(__file__))
templates_dir = os.path.join(cur_dir, 'templates')
env = Environment(loader=FileSystemLoader(templates_dir))
env.globals.update(zip=zip, reversed=reversed)
template = env.get_template(args.template_name)
data_path = os.path.join(data_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(data_dir, "shared_{}.json".format(data_type))
print("loading {}".format(data_path))
data = json.load(open(data_path, 'r'))
print("loading {}".format(shared_path))
shared = json.load(open(shared_path, 'r'))
rows = []
for i, (idx, yi, ypi, yp2i) in tqdm(enumerate(zip(*[eval_[key] for key in ('idxs', 'y', 'yp', 'yp2')])), total=len(eval_['idxs'])):
id_, q, rx, answers = (data[key][idx] for key in ('ids', 'q', '*x', 'answerss'))
x = shared['x'][rx[0]][rx[1]]
ques = [" ".join(q)]
para = [[word for word in sent] for sent in x]
span = get_best_span(ypi, yp2i)
ap = get_segment(para, span)
score = "{:.3f}".format(ypi[span[0][0]][span[0][1]] * yp2i[span[1][0]][span[1][1]-1])
row = {
'id': id_,
'title': "Hello world!",
'ques': ques,
'para': para,
'y': yi[0][0],
'y2': yi[0][1],
'yp': ypi,
'yp2': yp2i,
'a': answers,
'ap': ap,
'score': score
}
rows.append(row)
if i % num_per_page == 0:
html_path = os.path.join(html_dir, "%s.html" % str(i).zfill(8))
if (i + 1) % num_per_page == 0 or (i + 1) == len(eval_['y']):
var_dict = {'title': "Accuracy Visualization",
'rows': rows
}
with open(html_path, "wb") as f:
f.write(template.render(**var_dict).encode('UTF-8'))
rows = []
os.chdir(html_dir)
port = args.port
host = args.host
# Overriding to suppress log message
class MyHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
handler = MyHandler
httpd = socketserver.TCPServer((host, port), handler)
if args.open == 'True':
os.system("open http://%s:%d" % (args.host, args.port))
print("serving at %s:%d" % (host, port))
httpd.serve_forever()
def get_segment(para, span):
return " ".join(para[span[0][0]][span[0][1]:span[1][1]])
if __name__ == "__main__":
ARGS = get_args()
accuracy2_visualizer(ARGS) | bi-att-flow-master | basic/visualizer.py |
bi-att-flow-master | cnn_dm/__init__.py |
|
import argparse
import json
import os
# data: q, cq, (dq), (pq), y, *x, *cx
# shared: x, cx, (dx), (px), word_counter, char_counter, word2vec
# no metadata
from collections import Counter
from tqdm import tqdm
from my.utils import process_tokens
from squad.utils import get_word_span, process_tokens
def bool_(arg):
if arg == 'True':
return True
elif arg == 'False':
return False
raise Exception(arg)
def main():
args = get_args()
prepro(args)
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser("~")
source_dir = os.path.join(home, "data", "cnn", 'questions')
target_dir = "data/cnn"
glove_dir = os.path.join(home, "data", "glove")
parser.add_argument("--source_dir", default=source_dir)
parser.add_argument("--target_dir", default=target_dir)
parser.add_argument("--glove_dir", default=glove_dir)
parser.add_argument("--glove_corpus", default='6B')
parser.add_argument("--glove_vec_size", default=100, type=int)
parser.add_argument("--debug", default=False, type=bool_)
parser.add_argument("--num_sents_th", default=200, type=int)
parser.add_argument("--ques_size_th", default=30, type=int)
parser.add_argument("--width", default=5, type=int)
# TODO : put more args here
return parser.parse_args()
def prepro(args):
prepro_each(args, 'train')
prepro_each(args, 'dev')
prepro_each(args, 'test')
def para2sents(para, width):
"""
Turn para into double array of words (wordss)
Where each sentence is up to 5 word neighbors of each entity
:param para:
:return:
"""
words = para.split(" ")
sents = []
for i, word in enumerate(words):
if word.startswith("@"):
start = max(i - width, 0)
stop = min(i + width + 1, len(words))
sent = words[start:stop]
sents.append(sent)
return sents
def get_word2vec(args, word_counter):
glove_path = os.path.join(args.glove_dir, "glove.{}.{}d.txt".format(args.glove_corpus, args.glove_vec_size))
sizes = {'6B': int(4e5), '42B': int(1.9e6), '840B': int(2.2e6), '2B': int(1.2e6)}
total = sizes[args.glove_corpus]
word2vec_dict = {}
with open(glove_path, 'r', encoding='utf-8') as fh:
for line in tqdm(fh, total=total):
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in word_counter:
word2vec_dict[word] = vector
elif word.capitalize() in word_counter:
word2vec_dict[word.capitalize()] = vector
elif word.lower() in word_counter:
word2vec_dict[word.lower()] = vector
elif word.upper() in word_counter:
word2vec_dict[word.upper()] = vector
print("{}/{} of word vocab have corresponding vectors in {}".format(len(word2vec_dict), len(word_counter), glove_path))
return word2vec_dict
def prepro_each(args, mode):
source_dir = os.path.join(args.source_dir, mode)
word_counter = Counter()
lower_word_counter = Counter()
ent_counter = Counter()
char_counter = Counter()
max_sent_size = 0
max_word_size = 0
max_ques_size = 0
max_num_sents = 0
file_names = list(os.listdir(source_dir))
if args.debug:
file_names = file_names[:1000]
lens = []
out_file_names = []
for file_name in tqdm(file_names, total=len(file_names)):
if file_name.endswith(".question"):
with open(os.path.join(source_dir, file_name), 'r') as fh:
url = fh.readline().strip()
_ = fh.readline()
para = fh.readline().strip()
_ = fh.readline()
ques = fh.readline().strip()
_ = fh.readline()
answer = fh.readline().strip()
_ = fh.readline()
cands = list(line.strip() for line in fh)
cand_ents = list(cand.split(":")[0] for cand in cands)
sents = para2sents(para, args.width)
ques_words = ques.split(" ")
# Filtering
if len(sents) > args.num_sents_th or len(ques_words) > args.ques_size_th:
continue
max_sent_size = max(max(map(len, sents)), max_sent_size)
max_ques_size = max(len(ques_words), max_ques_size)
max_word_size = max(max(len(word) for sent in sents for word in sent), max_word_size)
max_num_sents = max(len(sents), max_num_sents)
for word in ques_words:
if word.startswith("@"):
ent_counter[word] += 1
word_counter[word] += 1
else:
word_counter[word] += 1
lower_word_counter[word.lower()] += 1
for c in word:
char_counter[c] += 1
for sent in sents:
for word in sent:
if word.startswith("@"):
ent_counter[word] += 1
word_counter[word] += 1
else:
word_counter[word] += 1
lower_word_counter[word.lower()] += 1
for c in word:
char_counter[c] += 1
out_file_names.append(file_name)
lens.append(len(sents))
num_examples = len(out_file_names)
assert len(out_file_names) == len(lens)
sorted_file_names, lens = zip(*sorted(zip(out_file_names, lens), key=lambda each: each[1]))
assert lens[-1] == max_num_sents
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dit = get_word2vec(args, lower_word_counter)
shared = {'word_counter': word_counter, 'ent_counter': ent_counter, 'char_counter': char_counter,
'lower_word_counter': lower_word_counter,
'max_num_sents': max_num_sents, 'max_sent_size': max_sent_size, 'max_word_size': max_word_size,
'max_ques_size': max_ques_size,
'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dit, 'sorted': sorted_file_names,
'num_examples': num_examples}
print("max num sents: {}".format(max_num_sents))
print("max ques size: {}".format(max_ques_size))
if not os.path.exists(args.target_dir):
os.makedirs(args.target_dir)
shared_path = os.path.join(args.target_dir, "shared_{}.json".format(mode))
with open(shared_path, 'w') as fh:
json.dump(shared, fh)
if __name__ == "__main__":
main()
| bi-att-flow-master | cnn_dm/prepro.py |
import json
import os
import sys
root_dir = sys.argv[1]
answer_path = sys.argv[2]
file_names = os.listdir(root_dir)
num_correct = 0
num_wrong = 0
with open(answer_path, 'r') as fh:
id2answer_dict = json.load(fh)
for file_name in file_names:
if not file_name.endswith(".question"):
continue
with open(os.path.join(root_dir, file_name), 'r') as fh:
url = fh.readline().strip()
_ = fh.readline()
para = fh.readline().strip()
_ = fh.readline()
ques = fh.readline().strip()
_ = fh.readline()
answer = fh.readline().strip()
_ = fh.readline()
if file_name in id2answer_dict:
pred = id2answer_dict[file_name]
if pred == answer:
num_correct += 1
else:
num_wrong += 1
else:
num_wrong += 1
total = num_correct + num_wrong
acc = float(num_correct) / total
print("{} = {} / {}".format(acc, num_correct, total)) | bi-att-flow-master | cnn_dm/evaluate.py |
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| bi-att-flow-master | squad/evaluate-v1.1.py |
import argparse
import json
import os
# data: q, cq, (dq), (pq), y, *x, *cx
# shared: x, cx, (dx), (px), word_counter, char_counter, word2vec
# no metadata
from collections import Counter
import nltk
from tqdm import tqdm
from my.nltk_utils import load_compressed_tree
def bool_(arg):
if arg == 'True':
return True
elif arg == 'False':
return False
raise Exception()
def main():
args = get_args()
prepro(args)
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser("~")
source_dir = os.path.join(home, "data", "squad")
target_dir = "data/squad"
glove_dir = os.path.join(home, "data", "glove")
parser.add_argument("--source_dir", default=source_dir)
parser.add_argument("--target_dir", default=target_dir)
parser.add_argument("--debug", default=False, type=bool_)
parser.add_argument("--train_ratio", default=0.9, type=int)
parser.add_argument("--glove_corpus", default="6B")
parser.add_argument("--glove_dir", default=glove_dir)
parser.add_argument("--glove_vec_size", default=100, type=int)
parser.add_argument("--full_train", default=False, type=bool_)
# TODO : put more args here
return parser.parse_args()
def prepro(args):
if not os.path.exists(args.target_dir):
os.makedirs(args.target_dir)
if args.full_train:
data_train, shared_train = prepro_each(args, 'train')
data_dev, shared_dev = prepro_each(args, 'dev')
else:
data_train, shared_train = prepro_each(args, 'train', 0.0, args.train_ratio)
data_dev, shared_dev = prepro_each(args, 'train', args.train_ratio, 1.0)
data_test, shared_test = prepro_each(args, 'dev')
print("saving ...")
save(args, data_train, shared_train, 'train')
save(args, data_dev, shared_dev, 'dev')
save(args, data_test, shared_test, 'test')
def save(args, data, shared, data_type):
data_path = os.path.join(args.target_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(args.target_dir, "shared_{}.json".format(data_type))
json.dump(data, open(data_path, 'w'))
json.dump(shared, open(shared_path, 'w'))
def get_word2vec(args, word_counter):
glove_path = os.path.join(args.glove_dir, "glove.{}.{}d.txt".format(args.glove_corpus, args.glove_vec_size))
sizes = {'6B': int(4e5), '42B': int(1.9e6), '840B': int(2.2e6), '2B': int(1.2e6)}
total = sizes[args.glove_corpus]
word2vec_dict = {}
with open(glove_path, 'r') as fh:
for line in tqdm(fh, total=total):
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in word_counter:
word2vec_dict[word] = vector
elif word.capitalize() in word_counter:
word2vec_dict[word.capitalize()] = vector
elif word.lower() in word_counter:
word2vec_dict[word.lower()] = vector
elif word.upper() in word_counter:
word2vec_dict[word.upper()] = vector
print("{}/{} of word vocab have corresponding vectors in {}".format(len(word2vec_dict), len(word_counter), glove_path))
return word2vec_dict
def prepro_each(args, data_type, start_ratio=0.0, stop_ratio=1.0):
source_path = os.path.join(args.source_dir, "{}-v1.0-aug.json".format(data_type))
source_data = json.load(open(source_path, 'r'))
q, cq, y, rx, rcx, ids, idxs = [], [], [], [], [], [], []
x, cx, tx, stx = [], [], [], []
answerss = []
word_counter, char_counter, lower_word_counter = Counter(), Counter(), Counter()
pos_counter = Counter()
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for ai, article in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
xp, cxp, txp, stxp = [], [], [], []
x.append(xp)
cx.append(cxp)
tx.append(txp)
stx.append(stxp)
for pi, para in enumerate(article['paragraphs']):
xi = []
for dep in para['deps']:
if dep is None:
xi.append([])
else:
xi.append([node[0] for node in dep[0]])
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
txp.append(para['consts'])
stxp.append([str(load_compressed_tree(s)) for s in para['consts']])
trees = map(nltk.tree.Tree.fromstring, para['consts'])
for tree in trees:
for subtree in tree.subtrees():
pos_counter[subtree.label()] += 1
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
dep = qa['dep']
qi = [] if dep is None else [node[0] for node in dep[0]]
cqi = [list(qij) for qij in qi]
yi = []
answers = []
for answer in qa['answers']:
answers.append(answer['text'])
yi0 = answer['answer_word_start'] or [0, 0]
yi1 = answer['answer_word_stop'] or [0, 1]
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
yi.append([yi0, yi1])
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, '*tx': rx, '*stx': rx,
'idxs': idxs, 'ids': ids, 'answerss': answerss}
shared = {'x': x, 'cx': cx, 'tx': tx, 'stx': stx,
'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter,
'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict, 'pos_counter': pos_counter}
return data, shared
if __name__ == "__main__":
main() | bi-att-flow-master | squad/prepro_aug.py |
bi-att-flow-master | squad/__init__.py |
|
import argparse
import json
import os
# data: q, cq, (dq), (pq), y, *x, *cx
# shared: x, cx, (dx), (px), word_counter, char_counter, word2vec
# no metadata
from collections import Counter
from tqdm import tqdm
from squad.utils import get_word_span, get_word_idx, process_tokens
def main():
args = get_args()
prepro(args)
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser("~")
source_dir = os.path.join(home, "data", "squad")
target_dir = "data/squad"
glove_dir = os.path.join(home, "data", "glove")
parser.add_argument('-s', "--source_dir", default=source_dir)
parser.add_argument('-t', "--target_dir", default=target_dir)
parser.add_argument('-d', "--debug", action='store_true')
parser.add_argument("--train_ratio", default=0.9, type=int)
parser.add_argument("--glove_corpus", default="6B")
parser.add_argument("--glove_dir", default=glove_dir)
parser.add_argument("--glove_vec_size", default=100, type=int)
parser.add_argument("--mode", default="full", type=str)
parser.add_argument("--single_path", default="", type=str)
parser.add_argument("--tokenizer", default="PTB", type=str)
parser.add_argument("--url", default="vision-server2.corp.ai2", type=str)
parser.add_argument("--port", default=8000, type=int)
parser.add_argument("--split", action='store_true')
# TODO : put more args here
return parser.parse_args()
def create_all(args):
out_path = os.path.join(args.source_dir, "all-v1.1.json")
if os.path.exists(out_path):
return
train_path = os.path.join(args.source_dir, "train-v1.1.json")
train_data = json.load(open(train_path, 'r'))
dev_path = os.path.join(args.source_dir, "dev-v1.1.json")
dev_data = json.load(open(dev_path, 'r'))
train_data['data'].extend(dev_data['data'])
print("dumping all data ...")
json.dump(train_data, open(out_path, 'w'))
def prepro(args):
if not os.path.exists(args.target_dir):
os.makedirs(args.target_dir)
if args.mode == 'full':
prepro_each(args, 'train', out_name='train')
prepro_each(args, 'dev', out_name='dev')
prepro_each(args, 'dev', out_name='test')
elif args.mode == 'all':
create_all(args)
prepro_each(args, 'dev', 0.0, 0.0, out_name='dev')
prepro_each(args, 'dev', 0.0, 0.0, out_name='test')
prepro_each(args, 'all', out_name='train')
elif args.mode == 'single':
assert len(args.single_path) > 0
prepro_each(args, "NULL", out_name="single", in_path=args.single_path)
else:
prepro_each(args, 'train', 0.0, args.train_ratio, out_name='train')
prepro_each(args, 'train', args.train_ratio, 1.0, out_name='dev')
prepro_each(args, 'dev', out_name='test')
def save(args, data, shared, data_type):
data_path = os.path.join(args.target_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(args.target_dir, "shared_{}.json".format(data_type))
json.dump(data, open(data_path, 'w'))
json.dump(shared, open(shared_path, 'w'))
def get_word2vec(args, word_counter):
glove_path = os.path.join(args.glove_dir, "glove.{}.{}d.txt".format(args.glove_corpus, args.glove_vec_size))
sizes = {'6B': int(4e5), '42B': int(1.9e6), '840B': int(2.2e6), '2B': int(1.2e6)}
total = sizes[args.glove_corpus]
word2vec_dict = {}
with open(glove_path, 'r', encoding='utf-8') as fh:
for line in tqdm(fh, total=total):
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in word_counter:
word2vec_dict[word] = vector
elif word.capitalize() in word_counter:
word2vec_dict[word.capitalize()] = vector
elif word.lower() in word_counter:
word2vec_dict[word.lower()] = vector
elif word.upper() in word_counter:
word2vec_dict[word.upper()] = vector
print("{}/{} of word vocab have corresponding vectors in {}".format(len(word2vec_dict), len(word_counter), glove_path))
return word2vec_dict
def prepro_each(args, data_type, start_ratio=0.0, stop_ratio=1.0, out_name="default", in_path=None):
if args.tokenizer == "PTB":
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace("``", '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, "{}-v1.1.json".format(data_type))
source_data = json.load(open(source_path, 'r'))
q, cq, y, rx, rcx, ids, idxs = [], [], [], [], [], [], []
cy = []
x, cx = [], []
answerss = []
p = []
word_counter, char_counter, lower_word_counter = Counter(), Counter(), Counter()
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for ai, article in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
xp, cxp = [], []
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for pi, para in enumerate(article['paragraphs']):
# wordss
context = para['context']
context = context.replace("''", '" ')
context = context.replace("``", '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi] # process tokens
# given xi, add chars
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
# get words
qi = word_tokenize(qa['question'])
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
# TODO : put some function that gives word_start, word_stop here
yi0, yi1 = get_word_span(context, xi, answer_start, answer_stop)
# yi0 = answer['answer_word_start'] or [0, 0]
# yi1 = answer['answer_word_stop'] or [0, 1]
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1]-1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1]-1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
# print(answer_text, w0[cyi0:], w1[:cyi1+1])
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
# add context here
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy,
'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx}
shared = {'x': x, 'cx': cx, 'p': p,
'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter,
'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print("saving ...")
save(args, data, shared, out_name)
if __name__ == "__main__":
main() | bi-att-flow-master | squad/prepro.py |
import re
def get_2d_spans(text, tokenss):
spanss = []
cur_idx = 0
for tokens in tokenss:
spans = []
for token in tokens:
if text.find(token, cur_idx) < 0:
print(tokens)
print("{} {} {}".format(token, cur_idx, text))
raise Exception()
cur_idx = text.find(token, cur_idx)
spans.append((cur_idx, cur_idx + len(token)))
cur_idx += len(token)
spanss.append(spans)
return spanss
def get_word_span(context, wordss, start, stop):
spanss = get_2d_spans(context, wordss)
idxs = []
for sent_idx, spans in enumerate(spanss):
for word_idx, span in enumerate(spans):
if not (stop <= span[0] or start >= span[1]):
idxs.append((sent_idx, word_idx))
assert len(idxs) > 0, "{} {} {} {}".format(context, spanss, start, stop)
return idxs[0], (idxs[-1][0], idxs[-1][1] + 1)
def get_phrase(context, wordss, span):
"""
Obtain phrase as substring of context given start and stop indices in word level
:param context:
:param wordss:
:param start: [sent_idx, word_idx]
:param stop: [sent_idx, word_idx]
:return:
"""
start, stop = span
flat_start = get_flat_idx(wordss, start)
flat_stop = get_flat_idx(wordss, stop)
words = sum(wordss, [])
char_idx = 0
char_start, char_stop = None, None
for word_idx, word in enumerate(words):
char_idx = context.find(word, char_idx)
assert char_idx >= 0
if word_idx == flat_start:
char_start = char_idx
char_idx += len(word)
if word_idx == flat_stop - 1:
char_stop = char_idx
assert char_start is not None
assert char_stop is not None
return context[char_start:char_stop]
def get_flat_idx(wordss, idx):
return sum(len(words) for words in wordss[:idx[0]]) + idx[1]
def get_word_idx(context, wordss, idx):
spanss = get_2d_spans(context, wordss)
return spanss[idx[0]][idx[1]][0]
def process_tokens(temp_tokens):
tokens = []
for token in temp_tokens:
flag = False
l = ("-", "\u2212", "\u2014", "\u2013", "/", "~", '"', "'", "\u201C", "\u2019", "\u201D", "\u2018", "\u00B0")
# \u2013 is en-dash. Used for number to nubmer
# l = ("-", "\u2212", "\u2014", "\u2013")
# l = ("\u2013",)
tokens.extend(re.split("([{}])".format("".join(l)), token))
return tokens
def get_best_span(ypi, yp2i):
max_val = 0
best_word_span = (0, 1)
best_sent_idx = 0
for f, (ypif, yp2if) in enumerate(zip(ypi, yp2i)):
argmax_j1 = 0
for j in range(len(ypif)):
val1 = ypif[argmax_j1]
if val1 < ypif[j]:
val1 = ypif[j]
argmax_j1 = j
val2 = yp2if[j]
if val1 * val2 > max_val:
best_word_span = (argmax_j1, j)
best_sent_idx = f
max_val = val1 * val2
return ((best_sent_idx, best_word_span[0]), (best_sent_idx, best_word_span[1] + 1)), float(max_val)
def get_span_score_pairs(ypi, yp2i):
span_score_pairs = []
for f, (ypif, yp2if) in enumerate(zip(ypi, yp2i)):
for j in range(len(ypif)):
for k in range(j, len(yp2if)):
span = ((f, j), (f, k+1))
score = ypif[j] * yp2if[k]
span_score_pairs.append((span, score))
return span_score_pairs
| bi-att-flow-master | squad/utils.py |
""" Official evaluation script for v1.1 of the SQuAD dataset. [Changed name for external importing]"""
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| bi-att-flow-master | squad/evaluate.py |
import json
import sys
from tqdm import tqdm
from my.corenlp_interface import CoreNLPInterface
in_path = sys.argv[1]
out_path = sys.argv[2]
url = sys.argv[3]
port = int(sys.argv[4])
data = json.load(open(in_path, 'r'))
h = CoreNLPInterface(url, port)
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
start += len(sub) # use start += 1 to find overlapping matches
def to_hex(s):
return " ".join(map(hex, map(ord, s)))
def handle_nobreak(cand, text):
if cand == text:
return cand
if cand.replace(u'\u00A0', ' ') == text:
return cand
elif cand == text.replace(u'\u00A0', ' '):
return text
raise Exception("{} '{}' {} '{}'".format(cand, to_hex(cand), text, to_hex(text)))
# resolving unicode complication
wrong_loc_count = 0
loc_diffs = []
for article in data['data']:
for para in article['paragraphs']:
para['context'] = para['context'].replace(u'\u000A', '')
para['context'] = para['context'].replace(u'\u00A0', ' ')
context = para['context']
for qa in para['qas']:
for answer in qa['answers']:
answer['text'] = answer['text'].replace(u'\u00A0', ' ')
text = answer['text']
answer_start = answer['answer_start']
if context[answer_start:answer_start + len(text)] == text:
if text.lstrip() == text:
pass
else:
answer_start += len(text) - len(text.lstrip())
answer['answer_start'] = answer_start
text = text.lstrip()
answer['text'] = text
else:
wrong_loc_count += 1
text = text.lstrip()
answer['text'] = text
starts = list(find_all(context, text))
if len(starts) == 1:
answer_start = starts[0]
elif len(starts) > 1:
new_answer_start = min(starts, key=lambda s: abs(s - answer_start))
loc_diffs.append(abs(new_answer_start - answer_start))
answer_start = new_answer_start
else:
raise Exception()
answer['answer_start'] = answer_start
answer_stop = answer_start + len(text)
answer['answer_stop'] = answer_stop
assert para['context'][answer_start:answer_stop] == answer['text'], "{} {}".format(
para['context'][answer_start:answer_stop], answer['text'])
print(wrong_loc_count, loc_diffs)
mismatch_count = 0
dep_fail_count = 0
no_answer_count = 0
size = sum(len(article['paragraphs']) for article in data['data'])
pbar = tqdm(range(size))
for ai, article in enumerate(data['data']):
for pi, para in enumerate(article['paragraphs']):
context = para['context']
sents = h.split_doc(context)
words = h.split_sent(context)
sent_starts = []
ref_idx = 0
for sent in sents:
new_idx = context.find(sent, ref_idx)
sent_starts.append(new_idx)
ref_idx = new_idx + len(sent)
para['sents'] = sents
para['words'] = words
para['sent_starts'] = sent_starts
consts = list(map(h.get_const, sents))
para['consts'] = consts
deps = list(map(h.get_dep, sents))
para['deps'] = deps
for qa in para['qas']:
question = qa['question']
question_const = h.get_const(question)
qa['const'] = question_const
question_dep = h.get_dep(question)
qa['dep'] = question_dep
qa['words'] = h.split_sent(question)
for answer in qa['answers']:
answer_start = answer['answer_start']
text = answer['text']
answer_stop = answer_start + len(text)
# answer_words = h.split_sent(text)
word_idxs = []
answer_words = []
for sent_idx, (sent, sent_start, dep) in enumerate(zip(sents, sent_starts, deps)):
if dep is None:
print("dep parse failed at {} {} {}".format(ai, pi, sent_idx))
dep_fail_count += 1
continue
nodes, edges = dep
words = [node[0] for node in nodes]
for word_idx, (word, _, _, start, _) in enumerate(nodes):
global_start = sent_start + start
global_stop = global_start + len(word)
if answer_start <= global_start < answer_stop or answer_start < global_stop <= answer_stop:
word_idxs.append((sent_idx, word_idx))
answer_words.append(word)
if len(word_idxs) > 0:
answer['answer_word_start'] = word_idxs[0]
answer['answer_word_stop'] = word_idxs[-1][0], word_idxs[-1][1] + 1
if not text.startswith(answer_words[0]):
print("'{}' '{}'".format(text, ' '.join(answer_words)))
mismatch_count += 1
else:
answer['answer_word_start'] = None
answer['answer_word_stop'] = None
no_answer_count += 1
pbar.update(1)
pbar.close()
print(mismatch_count, dep_fail_count, no_answer_count)
print("saving...")
json.dump(data, open(out_path, 'w')) | bi-att-flow-master | squad/aug_squad.py |
import logging
import requests
import nltk
import json
import networkx as nx
import time
class CoreNLPInterface(object):
def __init__(self, url, port):
self._url = url
self._port = port
def get(self, type_, in_, num_max_requests=100):
in_ = in_.encode("utf-8")
url = "http://{}:{}/{}".format(self._url, self._port, type_)
out = None
for _ in range(num_max_requests):
try:
r = requests.post(url, data=in_)
out = r.content.decode('utf-8')
if out == 'error':
out = None
break
except:
time.sleep(1)
return out
def split_doc(self, doc):
out = self.get("doc", doc)
return out if out is None else json.loads(out)
def split_sent(self, sent):
out = self.get("sent", sent)
return out if out is None else json.loads(out)
def get_dep(self, sent):
out = self.get("dep", sent)
return out if out is None else json.loads(out)
def get_const(self, sent):
out = self.get("const", sent)
return out
def get_const_tree(self, sent):
out = self.get_const(sent)
return out if out is None else nltk.tree.Tree.fromstring(out)
@staticmethod
def dep2tree(dep):
tree = nx.DiGraph()
for dep, i, gov, j, label in dep:
tree.add_edge(gov, dep, label=label)
return tree
| bi-att-flow-master | my/corenlp_interface.py |
bi-att-flow-master | my/__init__.py |
|
import json
from collections import deque
import numpy as np
from tqdm import tqdm
def mytqdm(list_, desc="", show=True):
if show:
pbar = tqdm(list_)
pbar.set_description(desc)
return pbar
return list_
def json_pretty_dump(obj, fh):
return json.dump(obj, fh, sort_keys=True, indent=2, separators=(',', ': '))
def index(l, i):
return index(l[i[0]], i[1:]) if len(i) > 1 else l[i[0]]
def fill(l, shape, dtype=None):
out = np.zeros(shape, dtype=dtype)
stack = deque()
stack.appendleft(((), l))
while len(stack) > 0:
indices, cur = stack.pop()
if len(indices) < shape:
for i, sub in enumerate(cur):
stack.appendleft([indices + (i,), sub])
else:
out[indices] = cur
return out
def short_floats(o, precision):
class ShortFloat(float):
def __repr__(self):
return '%.{}g'.format(precision) % self
def _short_floats(obj):
if isinstance(obj, float):
return ShortFloat(obj)
elif isinstance(obj, dict):
return dict((k, _short_floats(v)) for k, v in obj.items())
elif isinstance(obj, (list, tuple)):
return tuple(map(_short_floats, obj))
return obj
return _short_floats(o)
def argmax(x):
return np.unravel_index(x.argmax(), x.shape)
| bi-att-flow-master | my/utils.py |
import argparse
import os
import shutil
from zipfile import ZipFile
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='+')
parser.add_argument('-o', '--out', default='save.zip')
args = parser.parse_args()
return args
def zip_save(args):
temp_dir = "."
save_dir = os.path.join(temp_dir, "save")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for save_source_path in tqdm(args.paths):
# path = "out/basic/30/save/basic-18000"
# target_path = "save_dir/30/save"
# also output full path name to "save_dir/30/readme.txt
# need to also extract "out/basic/30/shared.json"
temp, _ = os.path.split(save_source_path) # "out/basic/30/save", _
model_dir, _ = os.path.split(temp) # "out/basic/30, _
_, model_name = os.path.split(model_dir)
cur_dir = os.path.join(save_dir, model_name)
if not os.path.exists(cur_dir):
os.makedirs(cur_dir)
save_target_path = os.path.join(cur_dir, "save")
shared_target_path = os.path.join(cur_dir, "shared.json")
readme_path = os.path.join(cur_dir, "readme.txt")
shared_source_path = os.path.join(model_dir, "shared.json")
shutil.copy(save_source_path, save_target_path)
shutil.copy(shared_source_path, shared_target_path)
with open(readme_path, 'w') as fh:
fh.write(save_source_path)
os.system("zip {} -r {}".format(args.out, save_dir))
def main():
args = get_args()
zip_save(args)
if __name__ == "__main__":
main()
| bi-att-flow-master | my/zip_save.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.