python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
dnw-master | data/__init__.py |
|
import os
import torch
import torchvision
from torchvision import transforms
from genutil.config import FLAGS
class CIFAR10:
def __init__(self):
super(CIFAR10, self).__init__()
data_root = os.path.join(FLAGS.data_dir, "cifar10")
use_cuda = torch.cuda.is_available()
# Data loading code
kwargs = {"num_workers": FLAGS.workers, "pin_memory": True} if use_cuda else {}
normalize = transforms.Normalize(
mean=[0.491, 0.482, 0.447], std=[0.247, 0.243, 0.262]
)
train_dataset = torchvision.datasets.CIFAR10(
root=data_root,
train=True,
download=True,
transform=transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
self.train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=FLAGS.batch_size, shuffle=True, **kwargs
)
test_dataset = torchvision.datasets.CIFAR10(
root=data_root,
train=False,
download=True,
transform=transforms.Compose([transforms.ToTensor(), normalize]),
)
self.val_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=FLAGS.test_batch_size, shuffle=False, **kwargs
)
| dnw-master | data/cifar10.py |
import os
import json
def load_aokvqa(aokvqa_dir, split, version='v1p0'):
assert split in ['train', 'val', 'test', 'test_w_ans']
dataset = json.load(open(
os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json")
))
return dataset
def get_coco_path(split, image_id, coco_dir):
return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg")
| aokvqa-main | load_aokvqa.py |
import os
import json
import argparse
import pathlib
from collections import Counter
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
train_set = load_aokvqa(args.aokvqa_dir, 'train')
train_freq = dict(Counter(
[d['choices'][d['correct_choice_idx']] for d in train_set]
))
most_common_answer = max(train_freq.keys(), key=train_freq.get)
##
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
predictions = {}
for d in eval_set:
q = d['question_id']
predictions[q] = most_common_answer
if args.multiple_choice:
choices = [c for c in d['choices'] if c in train_freq.keys()]
if len(choices) > 0:
predictions[q] = max(choices, key=train_freq.get)
json.dump(predictions, args.output_file)
| aokvqa-main | heuristics/most_common_answer.py |
import os
import json
from random import seed, sample
import argparse
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
seed(0)
train_set = load_aokvqa(args.aokvqa_dir, 'train')
if args.multiple_choice is False:
choices = list(set(
[d['choices'][d['correct_choice_idx']] for d in train_set]
))
##
predictions = {}
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
for d in eval_set:
q = d['question_id']
if args.multiple_choice:
choices = d['choices']
predictions[q] = sample(choices, 1)[0]
json.dump(predictions, args.output_file)
| aokvqa-main | heuristics/random_unweighted.py |
import os
import json
import numpy as np
import argparse
import pathlib
from collections import Counter
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
np.random.seed(0)
train_set = load_aokvqa(args.aokvqa_dir, 'train')
train_freq = dict(Counter(
[d['choices'][d['correct_choice_idx']] for d in train_set]
))
if args.multiple_choice is False:
choices = list(train_freq.keys())
probs = [f / len(train_set) for f in train_freq.values()]
##
predictions = {}
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
for d in eval_set:
if args.multiple_choice:
choices = d['choices']
probs = [train_freq.get(c, 0) for c in choices]
if probs == [0, 0, 0, 0]:
probs = [1, 1, 1, 1]
probs = [p / sum(probs) for p in probs]
q = d['question_id']
predictions[q] = np.random.choice(choices, size=1, p=probs)[0]
json.dump(predictions, args.output_file)
| aokvqa-main | heuristics/random_weighted.py |
import os
import argparse
from collections import Counter
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
# Build vocab from train set: correct choices + (direct answers appearing in >= 3 )
train_set = load_aokvqa(args.aokvqa_dir, 'train')
vocab = []
all_choices = Counter()
direct_answers = Counter()
for i in train_set:
vocab.append( i['choices'][i['correct_choice_idx']] )
all_choices.update(i['choices'])
direct_answers.update(set(i['direct_answers']))
vocab += [k for k,v in all_choices.items() if v >= 3]
vocab += [k for k,v in direct_answers.items() if v >= 3]
vocab = sorted(set(vocab))
print(f"Vocab size: {len(vocab)}")
# Save vocabulary Output
with open(args.output_file, 'w') as f:
for v in vocab:
print(v, file=f)
## Check validation set coverage
val_set = load_aokvqa(args.aokvqa_dir, 'val')
val_acc = [v['choices'][v['correct_choice_idx']] in vocab for v in val_set]
val_acc = sum(val_acc) / len(val_acc) * 100
print(f"Val set coverage: {val_acc:.2f}" )
| aokvqa-main | data_scripts/build_vocab.py |
import os
import argparse
import pathlib
from tqdm import tqdm
from PIL import Image
import torch
import torch.nn as nn
from torchvision import models
from torchvision import transforms as T
from load_aokvqa import load_aokvqa, get_coco_path
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
## Load dataset
dataset = load_aokvqa(args.aokvqa_dir, args.split)
## Load model
resnet_preprocess = T.Compose([
T.Resize(size=224, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(size=(224, 224)),
T.ToTensor(),
T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
device = "cuda" if torch.cuda.is_available() else "cpu"
resnet_model = models.resnet50(pretrained=True)
resnet_model = torch.nn.Sequential(
*list(resnet_model.children())[:-1],
nn.Flatten()
) # strip classification layer
resnet_model = resnet_model.to(device)
## Encoding loop
with torch.no_grad():
embeddings = {}
for d in tqdm(dataset):
img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir)).convert('RGB')
resnet_input = resnet_preprocess(img).unsqueeze(0).to(device)
resnet_features = resnet_model(resnet_input)
embeddings[d['question_id']] = {
'image' : resnet_features[0].cpu()
}
torch.save(embeddings, args.output_file)
| aokvqa-main | data_scripts/extract_resnet_features.py |
import os
import argparse
import pathlib
from tqdm import tqdm
import torch
from transformers import AutoTokenizer, AutoModel
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
## Load dataset
dataset = load_aokvqa(args.aokvqa_dir, args.split)
## Load model
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
model.eval()
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
## Encoding loop
with torch.no_grad():
embeddings = {}
for d in tqdm(dataset):
encoded_input = tokenizer([d['question']], padding=True, return_tensors='pt')
encoded_input = {k:v.to(device) for k,v in encoded_input.items()}
e = mean_pooling(model(**encoded_input), encoded_input['attention_mask'])
embeddings[d['question_id']] = {
'question' : e[0].cpu()
}
torch.save(embeddings, args.output_file)
| aokvqa-main | data_scripts/extract_bert_features.py |
import json
from tqdm import tqdm
import argparse
import pathlib
import torch
import clip
parser = argparse.ArgumentParser()
parser.add_argument('--vocab', type=pathlib.Path, required=True, dest='vocab_file')
parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type')
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load(args.model_type, device=device)
with torch.no_grad():
a = open(args.vocab_file).read().splitlines()
mc_text = clip.tokenize(a).to(device)
mc_text_features = torch.stack([model.encode_text(mct.unsqueeze(0)).cpu() for mct in tqdm(mc_text)], dim=1)[0]
mc_text_features = mc_text_features.float()
model_name = args.model_type.replace('/', '-').replace('@', '-')
torch.save(mc_text_features, args.output_file)
| aokvqa-main | data_scripts/encode_vocab_clip.py |
import os
from PIL import Image
from tqdm import tqdm
import argparse
import pathlib
import torch
import clip
from load_aokvqa import load_aokvqa, get_coco_path
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type')
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
## Load dataset
dataset = load_aokvqa(args.aokvqa_dir, args.split)
## Load model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load(args.model_type, device=device)
## Encoding loop
with torch.no_grad():
embeddings = {}
for d in tqdm(dataset):
q = d["question"]
q_text = clip.tokenize(q).to(device)
q_text_features = model.encode_text(q_text)
img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir))
img = preprocess(img).unsqueeze(0).to(device)
image_features = model.encode_image(img)
embeddings[d['question_id']] = {
'question' : q_text_features[0].float().cpu(),
'image' : image_features[0].float().cpu(),
}
torch.save(embeddings, args.output_file)
| aokvqa-main | data_scripts/extract_clip_features.py |
import argparse
import pathlib
import json
from load_aokvqa import load_aokvqa
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', type=argparse.FileType('r'), dest='mc_pred_file')
parser.add_argument('--da', type=argparse.FileType('r'), dest='da_pred_file')
parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file')
args = parser.parse_args()
assert args.mc_pred_file or args.da_pred_file
dataset = load_aokvqa(args.aokvqa_dir, args.split)
mc_preds = json.load(args.mc_pred_file) if args.mc_pred_file else None
da_preds = json.load(args.da_pred_file) if args.da_pred_file else None
predictions = {}
for d in dataset:
q = d['question_id']
predictions[q] = {}
if mc_preds and q in mc_preds.keys():
predictions[q]['multiple_choice'] = mc_preds[q]
if da_preds and q in da_preds.keys():
predictions[q]['direct_answer'] = da_preds[q]
json.dump(predictions, args.output_file)
| aokvqa-main | evaluation/prepare_predictions.py |
import argparse
import pathlib
import json
import glob
from load_aokvqa import load_aokvqa
def eval_aokvqa(dataset, preds, multiple_choice=False, strict=True):
if isinstance(dataset, list):
dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) }
if multiple_choice is False:
dataset = {k:v for k,v in dataset.items() if v['difficult_direct_answer'] is False}
if strict:
dataset_qids = set(dataset.keys())
preds_qids = set(preds.keys())
assert dataset_qids.issubset(preds_qids)
# dataset = q_id (str) : dataset element (dict)
# preds = q_id (str) : prediction (str)
acc = []
for q in dataset.keys():
if q not in preds.keys():
acc.append(0.0)
continue
pred = preds[q]
choices = dataset[q]['choices']
direct_answers = dataset[q]['direct_answers']
## Multiple Choice setting
if multiple_choice:
if strict:
assert pred in choices, 'Prediction must be a valid choice'
correct_choice_idx = dataset[q]['correct_choice_idx']
acc.append( float(pred == choices[correct_choice_idx]) )
## Direct Answer setting
else:
num_match = sum([pred == da for da in direct_answers])
vqa_acc = min(1.0, num_match / 3.0)
acc.append(vqa_acc)
acc = sum(acc) / len(acc) * 100
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test_w_ans'], required=True)
parser.add_argument('--preds', type=str, required=True, dest='prediction_files')
args = parser.parse_args()
dataset = load_aokvqa(args.aokvqa_dir, args.split)
for prediction_file in glob.glob(args.prediction_files):
predictions = json.load(open(prediction_file, 'r'))
# Multiple choice
mc_predictions = {}
for q in predictions.keys():
if 'multiple_choice' in predictions[q].keys():
mc_predictions[q] = predictions[q]['multiple_choice']
if mc_predictions != {}:
mc_acc = eval_aokvqa(
dataset,
mc_predictions,
multiple_choice=True,
strict=False
)
print(prediction_file, 'MC', mc_acc)
# Direct Answer
da_predictions = {}
for q in predictions.keys():
if 'direct_answer' in predictions[q].keys():
da_predictions[q] = predictions[q]['direct_answer']
if da_predictions != {}:
da_acc = eval_aokvqa(
dataset,
da_predictions,
multiple_choice=False,
strict=False
)
print(prediction_file, 'DA', da_acc)
| aokvqa-main | evaluation/eval_predictions.py |
import argparse
import pathlib
import json
from tqdm import tqdm
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import cos_sim
from load_aokvqa import load_aokvqa
def map_to_choices(dataset, predictions, device='cpu'):
if isinstance(dataset, list):
dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) }
if all([p in dataset[q]['choices'] for q, p in predictions.items()]):
return predictions
model = SentenceTransformer('sentence-transformers/average_word_embeddings_glove.6B.300d')
model.to(device)
for q in tqdm(predictions.keys()):
choices = dataset[q]['choices']
if predictions[q] not in choices:
choice_embeddings = model.encode([predictions[q]] + choices, convert_to_tensor=True)
a_idx = cos_sim(choice_embeddings[0], choice_embeddings[1:]).argmax().item()
predictions[q] = choices[a_idx]
return predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--pred', type=argparse.FileType('r'), required=True, dest='prediction_file')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
dataset = load_aokvqa(args.aokvqa_dir, args.split)
predictions = json.load(args.prediction_file)
predictions = map_to_choices(dataset, predictions)
json.dump(predictions, args.output_file)
| aokvqa-main | evaluation/remap_predictions.py |
import os
import json
import argparse
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir')
parser.add_argument('--split', type=str, choices=['train', 'val'], required=True)
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split)
coco_captions = json.load(open(os.path.join(args.coco_dir, 'annotations', f'captions_{args.split}2017.json')))['annotations']
coco_captions = {c['image_id'] : c['caption'] for c in coco_captions}
captions = { d['question_id'] : coco_captions[d['image_id']] for d in aokvqa_set }
json.dump(captions, args.output_file)
| aokvqa-main | gpt3/caption_inputs.py |
import os
import random
import json
from tqdm import tqdm
import argparse
import pathlib
import openai
openai.organization = os.getenv('OPENAI_ORG')
openai.api_key = os.getenv('OPENAI_API_KEY')
from load_aokvqa import load_aokvqa
random.seed(0)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--n', type=int, default=10, dest='num_examples')
parser.add_argument('--train-context', type=argparse.FileType('r'), dest='train_context_file')
parser.add_argument('--prefix', type=str, default='', dest='prompt_prefix')
parser.add_argument('--include-choices', action='store_true', dest='include_choices')
parser.add_argument('--context', type=argparse.FileType('r'), dest='context_file')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
train_set = load_aokvqa(args.aokvqa_dir, 'train')
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
train_context = {}
context = {}
if args.context_file is not None:
train_context = json.load(args.train_context_file)
context = json.load(args.context_file)
predictions = {}
for d in tqdm(eval_set):
q = d['question_id']
prompt = args.prompt_prefix
for e in random.sample(train_set, args.num_examples):
prompt += prompt_element(e,
context=train_context.get(q, None),
include_choices=args.include_choices,
answer=True
)
prompt += '\n\n'
prompt += prompt_element(d,
context=context.get(q, None),
include_choices=args.include_choices,
answer=False
)
response = openai.Completion.create(
engine="text-curie-001",
prompt=prompt,
temperature=0.0,
max_tokens=10,
)
predictions[q] = response.choices[0].text.strip()
json.dump(predictions, args.output_file)
def prompt_element(d, context=None, include_choices=False, answer=False):
return (f"Context: {context}\n" if context is not None else '') + \
f"Q: {d['question']}\n" + \
(f"Choices: {', '.join(d['choices'])}.\n" if include_choices else '') + \
f"A:" + (f" {d['choices'][d['correct_choice_idx']]}" if answer else '')
if __name__ == '__main__':
main()
| aokvqa-main | gpt3/query_gpt3.py |
import json
import argparse
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test_w_ans'], required=True)
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split)
rationales = {d['question_id'] : d['rationales'][0] for d in aokvqa_set}
json.dump(rationales, args.output_file)
| aokvqa-main | gpt3/rationale_inputs.py |
import sys
import os
import argparse
import pathlib
from tqdm import tqdm
import json
import torch
import torch.nn as nn
# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663
import sentencepiece; import pytorch_lightning as pl; import clip
from transfer_experiments.train import LinearClassifier
from load_aokvqa import load_aokvqa
from evaluation.remap_predictions import map_to_choices
parser = argparse.ArgumentParser()
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--features', type=pathlib.Path, required=True)
parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file')
#
parser_weights = parser.add_mutually_exclusive_group(required=True)
parser_weights.add_argument('--ckpt', type=pathlib.Path, dest='checkpoint_path')
parser_weights.add_argument('--zero-shot', action='store_true', dest='clip_zero_shot')
parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=('--zero-shot' in sys.argv))
#
parser.add_argument('--vocab', type=argparse.FileType('r'))
parser.add_argument('--vocab-features', type=pathlib.Path, dest='vocab_features')
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--clip-model-type', type=str,
choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'],
dest='clip_model_type', required=('--zero-shot' in sys.argv and '--mc' in sys.argv))
#
args = parser.parse_args()
## Load dataset
aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split)
## Load models
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.checkpoint_path is not None:
classifier = LinearClassifier.load_from_checkpoint(args.checkpoint_path)
classifier.to(device)
hp = classifier.hparams
elif args.clip_zero_shot:
classifier = nn.Identity().to(device)
hp = pl.utilities.AttributeDict(backbone='clip', clip_model_type=args.clip_model_type, objective='zero-shot', inputs=args.inputs)
# Load input features
embeddings = torch.load(args.features)
if hp.backbone == 'clip':
for q in embeddings.keys():
embeddings[q]['question'] = embeddings[q]['question'] / embeddings[q]['question'].norm(dim=-1, keepdim=True)
embeddings[q]['image'] = embeddings[q]['image'] / embeddings[q]['image'].norm(dim=-1, keepdim=True)
# Load vocab, vocab features, clip
if (hp.objective == 'classifier') or \
(hp.objective in ['contrastive', 'zero-shot'] and args.multiple_choice is False):
vocab = args.vocab.read().splitlines()
if hp.objective in ['contrastive', 'zero-shot']:
if args.multiple_choice is False:
vocab_features = torch.load(args.vocab_features).cpu()
vocab_features /= vocab_features.norm(dim=-1, keepdim=True)
else:
clip_model = clip.load(hp.clip_model_type, device=device)[0]
logit_scale = clip_model.logit_scale.exp().cpu()
## Prediction loop
predictions = {}
with torch.no_grad():
for o in tqdm(aokvqa_set):
q = o['question_id']
# Load input embedding (from question / image)
if hp.objective == 'zero-shot' and ('question' in hp.inputs and 'image' in hp.inputs):
e = embeddings[q]['question'] + embeddings[q]['image']
elif 'question' in hp.inputs and 'image' in hp.inputs:
e = torch.cat((embeddings[q]['question'], embeddings[q]['image']))
elif 'question' in hp.inputs:
e = embeddings[q]['question']
elif 'image' in hp.inputs:
e = embeddings[q]['image']
# Pass inputs through model
e = e.unsqueeze(0).to(device)
x = classifier(e)[0].cpu()
# Predict
if hp.objective in ['contrastive', 'zero-shot']:
if args.multiple_choice:
vocab = o['choices']
# Encode choices
vocab_features = clip.tokenize(vocab).to(device)
vocab_features = torch.stack([
clip_model.encode_text(v.unsqueeze(0)) for v in vocab_features
], dim=1)[0]
vocab_features /= vocab_features.norm(dim=-1, keepdim=True)
vocab_features = vocab_features.float().cpu()
x = logit_scale * x @ vocab_features.t()
x = x.softmax(dim=-1)
predictions[q] = vocab[x.argmax().item()]
## Save and evaluate predictions
# Map prediction to nearest neighbor choice (by word embeddings)
if args.multiple_choice and hp.objective == 'classifier':
predictions = map_to_choices(aokvqa_set, predictions)
json.dump(predictions, args.output_file)
| aokvqa-main | transfer_experiments/predict.py |
import os
import sys
import json
import argparse
import pathlib
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663
import sentencepiece; import pytorch_lightning as pl
import torchmetrics.functional as MF
from load_aokvqa import load_aokvqa
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--vocab', type=argparse.FileType('r'), required=True)
parser.add_argument('--log-dir', type=pathlib.Path, dest='log_dir', required=True)
#
parser.add_argument('--backbone', type=str, choices=['clip', 'resnet', 'bert'], required=True)
parser.add_argument('--clip-model-type', type=str,
choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'],
dest='clip_model_type', required=('clip' in sys.argv))
parser.add_argument('--train-features', type=pathlib.Path, required=True, dest='train_features')
parser.add_argument('--val-features', type=pathlib.Path, required=True, dest='val_features')
parser.add_argument('--vocab-features', type=pathlib.Path, required=('contrastive' in sys.argv), dest='vocab_features')
#
parser.add_argument('--objective', type=str, choices=['classifier', 'contrastive'], required=True)
parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=True)
# Defaults
parser.add_argument('--bs', type=int, default=128, dest='batch_size')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--gpus', type=int, default=1)
args = parser.parse_args()
pl.seed_everything(1)
vocab = args.vocab.read().splitlines()
## Data loading
dm = AokvqaEmbeddingsDataModule(
args.aokvqa_dir,
args.train_features,
args.val_features,
args.objective,
args.backbone,
args.inputs,
vocab,
args.vocab_features,
batch_size=args.batch_size,
num_workers=16
)
## Model definition
model = LinearClassifier(
args.objective,
args.backbone,
args.clip_model_type,
args.inputs,
len(vocab),
args.lr
)
## Training and testing loops
logger = pl.loggers.TensorBoardLogger(
args.log_dir,
name=f'{args.backbone}-{args.objective}',
version=f"inputs:{'+'.join(args.inputs)}"
)
trainer = pl.Trainer(
logger=logger,
gpus=args.gpus,
max_epochs=args.epochs,
callbacks=[
pl.callbacks.ModelCheckpoint(
monitor="val_acc",
filename="{epoch:02d}-{val_acc:.2f}",
mode="max"
)
],
)
trainer.fit(model, dm)
class AokvqaEmbeddingsDataset(Dataset):
def __init__(self, aokvqa_dir, split, input_features, objective, backbone, inputs, vocab, vocab_features):
aokvqa_set = load_aokvqa(aokvqa_dir, split)
assert ( backbone == 'resnet' and inputs == ['image'] and objective == 'classifier' ) \
or ( backbone == 'bert' and inputs == ['question'] and objective == 'classifier' ) \
or ( backbone == 'clip' )
embeddings = torch.load(input_features)
if backbone == 'clip':
for q in embeddings.keys():
embeddings[q]['question'] /= embeddings[q]['question'].norm(dim=-1, keepdim=True)
embeddings[q]['image'] /= embeddings[q]['image'].norm(dim=-1, keepdim=True)
if objective == 'contrastive':
vocab_embeddings = torch.load(vocab_features)
vocab_embeddings /= vocab_embeddings.norm(dim=-1, keepdim=True)
self.objective = objective
self.vocab_len = len(vocab)
self.embeddings = []
self.answers = []
for o in aokvqa_set:
correct_answers = set([o['choices'][o['correct_choice_idx']]] + o['direct_answers'])
correct_answers = [vocab.index(a) for a in correct_answers if a in vocab]
if self.objective == 'contrastive':
correct_answers = [vocab_embeddings[a] for a in correct_answers]
if len(correct_answers) == 0: continue
self.answers.append(correct_answers)
q = o['question_id']
if 'question' in inputs and 'image' in inputs:
e = torch.cat((embeddings[q]['question'], embeddings[q]['image']))
elif 'question' in inputs and 'image' not in inputs:
e = embeddings[q]['question']
elif 'question' not in inputs and 'image' in inputs:
e = embeddings[q]['image']
self.embeddings.append(e)
def __getitem__(self, index):
e = self.embeddings[index]
a = self.answers[index]
if self.objective == 'classifier':
a = torch.sum(F.one_hot(torch.tensor(a), num_classes=self.vocab_len), dim=0)
elif self.objective == 'contrastive':
a = random.sample(a, 1)[0]
return e, a
def __len__(self):
return len(self.embeddings)
class AokvqaEmbeddingsDataModule(pl.LightningDataModule):
def __init__(self, aokvqa_dir, train_features, val_features, objective, backbone, inputs, vocab, vocab_features, batch_size=1, num_workers=0):
super().__init__()
self.aokvqa_dir = aokvqa_dir
self.train_features = train_features
self.val_features = val_features
self.objective = objective
self.backbone = backbone
self.inputs = inputs
self.vocab = vocab
self.vocab_features = vocab_features
self.batch_size = batch_size
self.num_workers = num_workers
def setup(self, stage=None):
self.train_dataset = AokvqaEmbeddingsDataset(
self.aokvqa_dir, 'train', self.train_features, self.objective,
self.backbone, self.inputs, self.vocab, self.vocab_features
)
self.val_dataset = AokvqaEmbeddingsDataset(
self.aokvqa_dir, 'val', self.val_features, self.objective,
self.backbone, self.inputs, self.vocab, self.vocab_features
)
def train_dataloader(self):
return DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=True,
num_workers=int(0.8 * self.num_workers)
)
def val_dataloader(self):
return DataLoader(
self.val_dataset, batch_size=self.batch_size, shuffle=False,
num_workers=int(0.2 * self.num_workers)
)
class LinearClassifier(pl.LightningModule):
def __init__(self, objective, backbone, clip_model_type, inputs, vocab_len, lr=0.001):
super().__init__()
self.save_hyperparameters(ignore=['lr'])
self.lr = lr
if self.hparams.backbone == 'clip':
clip_dim = {
'RN50' : 1024,
'RN50x4' : 640,
'RN50x16' : 768,
'RN50x64' : 1024,
'RN101' : 512,
'ViT-B/32' : 512,
'ViT-B/16' : 512,
'ViT-L/14' : 768,
'ViT-L/14@336px' : 768,
}[clip_model_type]
emb_dim = clip_dim * len(inputs)
elif self.hparams.backbone == 'resnet':
emb_dim = 2048
elif self.hparams.backbone == 'bert':
emb_dim = 768
if self.hparams.objective == 'classifier':
out_dim = vocab_len
elif self.hparams.objective == 'contrastive':
out_dim = clip_dim
self.linear = nn.Linear(emb_dim, out_dim)
def forward(self, x):
x = self.linear(x)
if self.hparams.objective == 'classifier':
x = torch.sigmoid(x)
return x
def compute_loss(self, batch):
x, y = batch
y_pred = self.forward(x)
if self.hparams.objective == 'classifier':
loss = F.binary_cross_entropy(y_pred, y.float())
elif self.hparams.objective == 'contrastive':
indices = torch.arange(0, x.shape[0], dtype=torch.int64, device=self.device)
sim = (y_pred @ y.T).softmax(dim=-1)
loss = F.cross_entropy(sim, indices)
if self.hparams.objective == 'classifier':
acc = MF.f1_score(y_pred, y)
elif self.hparams.objective == 'contrastive':
acc = torch.mean(sim[indices, indices])
return loss, acc
def training_step(self, batch, batch_idx):
loss, acc = self.compute_loss(batch)
self.log("train_loss", loss)
self.log("train_acc", acc)
return loss
def validation_step(self, batch, batch_idx):
loss, acc = self.compute_loss(batch)
self.log("val_loss", loss)
self.log("val_acc", acc)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
if __name__ == '__main__':
main()
| aokvqa-main | transfer_experiments/train.py |
from setuptools import setup, find_packages
def parse_requirements_file(path):
requirements = []
with open(path) as requirements_file:
import re
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
r"^(git\+)?(https|ssh)://(git@)?github\.com/([\w-]+)/(?P<name>[\w-]+)\.git",
req,
)
if m is None:
return req
else:
return f"{m.group('name')} @ {req}"
for line in requirements_file:
line = line.strip()
if line.startswith("#") or len(line) <= 0:
continue
req, *comment = line.split("#")
req = fix_url_dependencies(req.strip())
requirements.append(req)
return requirements
setup(
name='bettermap',
version='1.3.1',
description="Parallelized drop-in replacements for Python's map function",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url='https://github.com/allenai/bettermap',
author="Dirk Groeneveld",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"test_fixtures",
"test_fixtures.*",
],
),
install_requires=parse_requirements_file("requirements.txt"),
extras_require={"dev": parse_requirements_file("dev-requirements.txt")},
python_requires='>=3.6'
)
| bettermap-master | setup.py |
import bettermap
def f(x: float) -> float:
return x * x
_INPUT = list(range(100))
_EXPECTED = list(map(f, _INPUT))
def test_map_per_process():
result = list(bettermap.map_per_process(f, _INPUT))
result.sort()
assert result == _EXPECTED
def test_ordered_map_per_process():
result = list(bettermap.ordered_map_per_process(f, _INPUT))
assert result == _EXPECTED
def test_ordered_map_per_thread():
result = list(bettermap.ordered_map_per_thread(f, _INPUT))
assert result == _EXPECTED
def test_map_in_chunks():
result = list(bettermap.map_in_chunks(f, _INPUT))
result.sort()
assert result == _EXPECTED
def test_ordered_map_in_chunks():
result = list(bettermap.ordered_map_in_chunks(f, _INPUT))
assert result == _EXPECTED
| bettermap-master | tests/test_basic_functionality.py |
bettermap-master | tests/__init__.py |
|
from .bettermap import *
| bettermap-master | bettermap/__init__.py |
#!/usr/bin/python3
import io
import sys
from concurrent.futures import ThreadPoolExecutor
import itertools
import multiprocessing as mp
from multiprocessing.connection import Connection
from multiprocessing.context import ForkProcess
from typing import Iterable, List, Optional, Any, Dict, Tuple
import dill
from queue import Queue
from threading import Thread
mpctx = mp.get_context("fork")
def threaded_generator(g, maxsize: int = 16):
q: Queue = Queue(maxsize=maxsize)
sentinel = object()
def fill_queue():
try:
for value in g:
q.put(value)
finally:
q.put(sentinel)
thread = Thread(name=repr(g), target=fill_queue, daemon=True)
thread.start()
yield from iter(q.get, sentinel)
def slices(n: int, i: Iterable) -> Iterable[List]:
i = iter(i)
while True:
s = list(itertools.islice(i, n))
if len(s) > 0:
yield s
else:
break
def map_per_process(
fn,
input_sequence: Iterable,
*,
serialization_items: Optional[List[Any]] = None,
parallelism: int = mpctx.cpu_count()
) -> Iterable:
if serialization_items is not None and len(serialization_items) > 0:
serialization_ids = [id(o) for o in serialization_items]
class MapPickler(dill.Pickler):
def persistent_id(self, obj):
try:
return serialization_ids.index(id(obj))
except ValueError:
return None
class MapUnpickler(dill.Unpickler):
def persistent_load(self, pid):
return serialization_items[pid]
else:
MapPickler = dill.Pickler # type: ignore
MapUnpickler = dill.Unpickler # type: ignore
def pickle(o: Any) -> bytes:
with io.BytesIO() as buffer:
pickler = MapPickler(buffer)
pickler.dump(o)
return buffer.getvalue()
def unpickle(b: bytes) -> Any:
with io.BytesIO(b) as buffer:
unpickler = MapUnpickler(buffer)
return unpickler.load()
pipeno_to_pipe: Dict[int, Connection] = {}
pipeno_to_process: Dict[int, ForkProcess] = {}
def process_one_item(send_pipe: Connection, item):
try:
processed_item = fn(item)
except Exception as e:
import traceback
send_pipe.send((None, (e, traceback.format_exc())))
else:
send_pipe.send((pickle(processed_item), None))
send_pipe.close()
def yield_from_pipes(pipes: List[Connection]):
for pipe in pipes:
result, error = pipe.recv()
pipeno = pipe.fileno()
del pipeno_to_pipe[pipeno]
pipe.close()
process = pipeno_to_process[pipeno]
process.join()
del pipeno_to_process[pipeno]
if error is None:
yield unpickle(result)
else:
e, tb = error
sys.stderr.write("".join(tb))
raise e
try:
for item in input_sequence:
receive_pipe, send_pipe = mpctx.Pipe(duplex=False)
process = mpctx.Process(target=process_one_item, args=(send_pipe, item))
pipeno_to_pipe[receive_pipe.fileno()] = receive_pipe
pipeno_to_process[receive_pipe.fileno()] = process
process.start()
# read out the values
timeout = 0 if len(pipeno_to_process) < parallelism else None
# If we have fewer processes going than we have CPUs, we just pick up the values
# that are done. If we are at the process limit, we wait until one of them is done.
ready_pipes = mp.connection.wait(pipeno_to_pipe.values(), timeout=timeout)
yield from yield_from_pipes(ready_pipes) # type: ignore
# yield the rest of the items
while len(pipeno_to_process) > 0:
ready_pipes = mp.connection.wait(pipeno_to_pipe.values(), timeout=None)
yield from yield_from_pipes(ready_pipes) # type: ignore
finally:
for process in pipeno_to_process.values():
if process.is_alive():
process.terminate()
def ordered_map_per_process(
fn,
input_sequence: Iterable,
*,
serialization_items: Optional[List[Any]] = None
) -> Iterable:
def process_item(item):
index, item = item
return index, fn(item)
results_with_index = map_per_process(
process_item,
enumerate(input_sequence),
serialization_items=serialization_items)
expected_index = 0
items_in_wait: List[Tuple[int, Any]] = []
for item in results_with_index:
index, result = item
if index == expected_index:
yield result
expected_index = index + 1
items_in_wait.sort(reverse=True)
while len(items_in_wait) > 0 and items_in_wait[-1][0] == expected_index:
index, result = items_in_wait.pop()
yield result
expected_index = index + 1
else:
items_in_wait.append(item)
def ordered_map_per_thread(
fn,
input_sequence: Iterable,
*,
parallelism: int = mpctx.cpu_count()
) -> Iterable:
executor = ThreadPoolExecutor(max_workers=parallelism)
input_sequence = (executor.submit(fn, item) for item in input_sequence)
input_sequence = threaded_generator(input_sequence, maxsize=parallelism)
for future in input_sequence:
yield future.result()
executor.shutdown()
def map_in_chunks(
fn,
input_sequence: Iterable,
*,
chunk_size: int = 10,
serialization_items: Optional[List[Any]] = None
) -> Iterable:
def process_chunk(chunk: List) -> List:
return list(map(fn, chunk))
processed_chunks = map_per_process(
process_chunk,
slices(chunk_size, input_sequence),
serialization_items=serialization_items)
for processed_chunk in processed_chunks:
yield from processed_chunk
def ordered_map_in_chunks(
fn,
input_sequence: Iterable,
*,
chunk_size: int = 10,
serialization_items: Optional[List[Any]] = None
) -> Iterable:
def process_chunk(chunk: List) -> List:
return list(map(fn, chunk))
processed_chunks = ordered_map_per_process(
process_chunk,
slices(chunk_size, input_sequence),
serialization_items=serialization_items)
for processed_chunk in processed_chunks:
yield from processed_chunk
| bettermap-master | bettermap/bettermap.py |
from os import mkdir
from os.path import join, dirname, expanduser, exists
DATA_DIR = expanduser("~/data-dbg")
COCO_SOURCE = join(DATA_DIR, "coco")
COCO_ANNOTATIONS = join(COCO_SOURCE, "annotations")
COCO_IMAGES = join(COCO_SOURCE, "images")
VQAE = join(DATA_DIR, "vqa-e")
VISUAL_NEWS = join(DATA_DIR, "visual_news/origin")
SNLI_VE_HOME = join(DATA_DIR, "SNLI_VE")
FLICKER30K = join(DATA_DIR, "SNLI_VE", "Flickr30K", "flickr30k_images")
VQA_ANNOTATIONS = join(DATA_DIR, "vqa")
ADAPTER_SOURCE = join(DATA_DIR, "adapters")
| close-main | close/file_paths.py |
import gzip
import logging
import tarfile
import tempfile
import zipfile
from os import listdir, makedirs
from os.path import dirname, exists, join
import requests
from tqdm import tqdm
from close import file_paths
from close.utils import py_utils
def ensure_dir_exists(filename):
"""Make sure the parent directory of `filename` exists"""
makedirs(dirname(filename), exist_ok=True)
def download_to_file(url, output_file, pbar=False, unzip=False):
"""Download `url` to `output_file`"""
logging.info(f"Downloading file from {url} to {output_file}")
ensure_dir_exists(output_file)
if not pbar:
with requests.get(url) as r:
r.raise_for_status()
content = r.content
if unzip:
content = gzip.decompress(content)
with open(output_file, 'wb') as f:
f.write(content)
else:
if unzip:
raise NotImplementedError()
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(output_file, 'wb') as f:
_write_to_stream(r, f, True)
def download_zip(url, source, progress_bar=True):
"""Download zip file at `url` and extract to `source`"""
# Download to a temp file to ensure we
# don't eat a lot of RAM with downloading a large file
with tempfile.TemporaryFile() as tmp_f:
with requests.get(url, stream=True) as r:
_write_to_stream(r, tmp_f, progress_bar)
logging.info("Extracting to %s...." % source)
makedirs(source, exist_ok=True)
with zipfile.ZipFile(tmp_f) as f:
f.extractall(source)
def download_tar(url, source, progress_bar=True):
"""Download tar file at `url` and extract to `source`"""
with tempfile.TemporaryFile() as tmp_f:
with requests.get(url, stream=True) as r:
_write_to_stream(r, tmp_f, progress_bar)
logging.info("Extracting to %s...." % source)
makedirs(source, exist_ok=True)
tmp_f.seek(0)
with tarfile.open(fileobj=tmp_f) as f:
f.extractall(source)
DRIVE_URL = "https://docs.google.com/uc?export=download"
def download_from_drive(file_id, output_file, progress_bar=False):
"""Download the public google drive file `file_id` to `output_file`"""
ensure_dir_exists(output_file)
session = requests.Session()
response = session.get(DRIVE_URL, params={'id': file_id}, stream=True)
# Check to see if we need to send a second, confirm, request
# https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
for key, value in response.cookies.items():
if key.startswith('download_warning'):
params = {'id': file_id, 'confirm': value}
response = session.get(DRIVE_URL, params=params, stream=True)
break
with open(output_file, "wb") as f:
_write_to_stream(response, f, progress_bar)
response.close()
def _write_to_stream(response, output_fh, progress_bar=True, chunk_size=32768):
"""Write streaming `response` to `output_fs` in chunks"""
response.raise_for_status()
if progress_bar:
content_len = response.headers.get("Content-Length")
if content_len is not None:
total = int(content_len)
else:
total = None
pbar = tqdm(desc="downloading", total=total, ncols=100, unit="b", unit_scale=True)
else:
pbar = None
cur_total = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
if pbar is not None:
cur_total += len(chunk)
next_value = cur_total
pbar.update(next_value - pbar.n)
output_fh.write(chunk)
if pbar is not None:
if pbar.total is not None:
pbar.update(pbar.total - pbar.n)
pbar.close()
VE_SRC = "https://storage.googleapis.com/allennlp-public-data/snli-ve"
def download_snli_ve():
if not exists(file_paths.SNLI_VE_HOME):
for file in ["snli_ve_train.jsonl", "snli_ve_dev.jsonl", "snli_ve_test.jsonl"]:
download_to_file(f"{VE_SRC}/{file}.gz", join(file_paths.SNLI_VE_HOME, file), unzip=True)
if not exists(file_paths.FLICKER30K):
logging.info(f"Downloading {VE_SRC}/flickr30k_images.tar.gz")
download_tar(
f"{VE_SRC}/flickr30k_images.tar.gz",
dirname(file_paths.FLICKER30K)
)
VQAE_FILES = {
"1CXogPObRixI1iR51T2px-Q75jdnhByCX": "VQA-E_train_set.json",
"12e8Px79J4lOT0NBUe2JVzTjbgfRy06qY": "VQA-E_val_set.json",
}
def download_vqae():
for file_id, filename in VQAE_FILES.items():
filename = join(file_paths.VQAE, filename)
if not exists(filename):
logging.info(f"Downloading {filename}")
download_from_drive(file_id, join(file_paths.VQAE, filename))
VQA_FILES = {
"questions": {
"train": "https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Train_mscoco.zip",
"val": "https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Val_mscoco.zip",
"test": "https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Test_mscoco.zip"
},
"annotations": {
"train": "https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Train_mscoco.zip",
"val": "https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Val_mscoco.zip"
}
}
def download_vqa_annotations():
if exists(file_paths.VQA_ANNOTATIONS):
return
for kind, files in VQA_FILES.items():
for split, file in files.items():
logging.info(f"Downloading {file}")
download_zip(file, file_paths.VQA_ANNOTATIONS, False)
COCO_ANNO = "http://images.cocodataset.org/annotations/annotations_trainval2014.zip"
COCO_IMAGES = {
"val2014": "http://images.cocodataset.org/zips/val2014.zip",
"test2014": "http://images.cocodataset.org/zips/test2014.zip",
"train2014": "http://images.cocodataset.org/zips/train2014.zip",
"test2015": "http://images.cocodataset.org/zips/test2015.zip",
}
def download_coco():
if not exists(join(file_paths.COCO_SOURCE, "annotations")):
logging.info(f"Downloading {COCO_ANNO}")
download_zip(COCO_ANNO, file_paths.COCO_SOURCE, True)
for k, url in COCO_IMAGES.items():
if not exists(join(file_paths.COCO_IMAGES, k)):
logging.info(f"Downloading {url}")
download_zip(url, file_paths.COCO_IMAGES, True)
ADAPTER_HOME = "https://ai2-prior-close.s3.us-west-2.amazonaws.com/adapters/"
adapter_paths = [
"cc3m-cov-diff.pkl",
"cc3m-linear-v1.pkl",
"cc3m-mean-diff.pkl",
"kp-restval-cov-diff.pkl",
"kp-restval-linear-v1.pkl",
"kp-restval-mean-diff.pkl",
]
def download_adapters():
for file_name in adapter_paths:
output = join(file_paths.ADAPTER_SOURCE, file_name)
if not exists(output):
logging.info(f"Downloading {file_name} to {output}")
download_to_file(join(ADAPTER_HOME, file_name), output)
KP_SOURCE = "http://cs.stanford.edu/people/karpathy/deepimagesent/caption_datasets.zip"
def download_kp():
if not exists(join(file_paths.COCO_SOURCE, "dataset_coco.json")):
download_zip(KP_SOURCE, file_paths.COCO_SOURCE)
def main():
py_utils.add_stdout_logger()
download_coco()
download_kp()
download_vqae()
download_snli_ve()
download_vqa_annotations()
download_adapters()
if __name__ == '__main__':
main() | close-main | close/download.py |
import argparse
import logging
import os
from transformers import AutoConfig
from l2v.data.visual_news import VisualNews
from l2v.experiments.utils import get_adapter
from l2v.train.optimizer import AdamWBuilder, DelayedWarmupScheduleBuilder
from l2v.train.trainer import TrainerSimple
from l2v.utils import py_utils
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from l2v.model.clip_t5_model import ClipT5Model, LinearAdapter, CLIP_DIMS
from l2v.model.model import BeamSearchSpec
from l2v.train.evaluator import ResultKey, VisualNewsEvaluator
from l2v.utils.pytorch_utils import get_devices
def main():
parser = argparse.ArgumentParser()
# Model args
parser.add_argument("--clip_model", default="open_clip")
parser.add_argument("--t5_model", default="t5-base")
parser.add_argument("--train_l", default="always") # change to "both" to also train on images
parser.add_argument("--cap_l", default="1to1")
parser.add_argument("--l_adapter", default="none")
parser.add_argument("--noise", type=float, default=0.0)
parser.add_argument("--scale", type=float)
# Optimizer args
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--warmup", type=int, default=None)
parser.add_argument("--decay", default="linear")
# Other training args
parser.add_argument("--batch_size", default=16, type=int)
parser.add_argument("--epochs", default=8, type=int)
# Where to save things
parser.add_argument("--override", action="store_true")
parser.add_argument("--output_dir")
parser.add_argument("--debug", action="store_true",
help="Train with tiny model/dataset for debugging")
args = parser.parse_args()
py_utils.add_stdout_logger()
l_adapter = get_adapter(args)
adapter = LinearAdapter(4)
if args.cap_l is not None and args.train_l is None:
args.train_l = "always"
if args.clip_model == "open_clip":
openai_clip = 'laion400m_e32'
args.clip_model = "ViT-L/14"
else:
openai_clip = None
dbg = args.debug
model = ClipT5Model(args.clip_model, args.t5_model, adapter,
caption_l=args.cap_l, language_shift=l_adapter,
lowercase_target=True, train_on_l=args.train_l,
one_to_many_loss="sum", openai_clip=openai_clip)
if args.warmup or args.decay:
scheduler = DelayedWarmupScheduleBuilder(warmup=args.warmup, decay=args.decay)
else:
scheduler = None
trainer = TrainerSimple(
train_dataset=VisualNews("train", sample=32 if dbg else None),
optimizer=AdamWBuilder(lr=args.lr, weight_decay=0.0, parameter_groups=[]),
epochs=args.epochs,
eval_dataset=VisualNews("val", sample=32 if dbg else None),
batch_size=args.batch_size,
evaluator=VisualNewsEvaluator(meteor=False, rouge=False),
prediction_args=dict(beam_search_spec=BeamSearchSpec(2, 16)),
scheduler=scheduler,
save_each_epoch=[args.epochs],
num_workers=3
)
trainer.train(model, args.output_dir, override=args.override)
if __name__ == '__main__':
main()
| close-main | close/experiments/train_visual_news.py |
import os
from typing import Union
from close.data.coco_captioning import CocoCaptioningKP
from close.data.dataset import Dataset
from close.data.vqa_e import EVQA
from close.data.vqa_v2 import Vqa2, VqaWithCaptions
from close.data.visual_entailment import VisualEntailment
from close.model.language_adapters import *
from close.train.evaluator import CaptionEvaluator, Evaluator, VqaEvaluator, EntailmentEvaluator
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def get_default_seq_len(ds: Dataset) -> int:
if isinstance(ds, (CocoCaptioningKP,)):
return 32
if isinstance(ds, (Vqa2, EVQA, VqaWithCaptions)):
return 16
if isinstance(ds, (VisualEntailment,)):
return 8
else:
raise NotImplementedError(f"No default lengths set for dataset {ds}")
def get_evaluator(ds: Dataset) -> Union[Evaluator, None]:
if isinstance(ds, (CocoCaptioningKP,)):
return CaptionEvaluator()
if isinstance(ds, (Vqa2, EVQA, VqaWithCaptions)):
return VqaEvaluator()
if isinstance(ds, (VisualEntailment,)):
return EntailmentEvaluator()
else:
raise ValueError()
def get_adapter(args):
if args.l_adapter.startswith("shift"):
_, src = args.l_adapter.split("-", maxsplit=1)
return Shift(src, args.scale, args.noise, renorm=False)
elif args.l_adapter == "lin":
return LinearAdapter("kp-linear-v1", args.noise, renorm=True)
elif args.l_adapter == "cc3m-lin":
return LinearAdapter("cc3m-linear-v1", args.noise, renorm=True)
elif args.l_adapter == "cov":
return CovNoise("kp-cov-v1", args.noise)
elif args.l_adapter == "cc3m-cov":
return CovNoise("cc3m-cov-v1", args.noise)
elif args.l_adapter == "noise":
return AddGuassianNoise(args.noise, renormalize=True)
elif args.l_adapter == "vis-news-shift":
l_adapter = CovNoise("kp-restval", args.noise, cov=False)
elif args.l_adapter == "vis-news-cov":
l_adapter = CovNoise("kp-restval", args.noise)
elif args.l_adapter is None:
raise NotImplementedError()
| close-main | close/experiments/utils.py |
import argparse
import logging
import os
import sys
root_folder = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(root_folder)
from close.data.coco_captioning import CocoCaptioningKP
from close.data.visual_entailment import VisualEntailment
from close.data.vqa_e import EVQA
from close.data.vqa_v2 import Vqa2, VqaWithCaptions
from close.experiments.utils import get_adapter, get_evaluator, get_default_seq_len
from close.train.optimizer import AdamWBuilder, DelayedWarmupScheduleBuilder
from close.train.trainer import TrainerSimple
from close.utils import py_utils
from close.model.clip_t5_model import ClipT5Model, EmbeddingTokenizer
from close.model.model import BeamSearchSpec
os.environ["TOKENIZERS_PARALLELISM"] = "false"
DEFAULT_NOISE = {
"vqa": 0.04,
"evqa": 0.04,
"ve": 0.08,
"s-cap": 0.12,
"m-cap": 0.04
}
def main():
parser = argparse.ArgumentParser("Train a CLOSE model")
parser.add_argument("--data", default="vqa-e")
# Model args
parser.add_argument("--clip_model", default="ViT-L/14")
parser.add_argument("--t5_model", default="t5-base")
parser.add_argument("--train_on_images", action="store_true")
parser.add_argument("--l_adapter", default="noise")
parser.add_argument("--noise", type=float, default=None)
parser.add_argument("--scale", type=float)
# Optimizer args
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--warmup", type=int, default=None)
parser.add_argument("--decay", default="linear")
# Other training args
parser.add_argument("--batch_size", default=None, type=int)
parser.add_argument("--epochs", default=8, type=int)
# Where to save things
parser.add_argument("--override", action="store_true")
parser.add_argument("--output_dir")
parser.add_argument("--debug", action="store_true",
help="Train with tiny dataset for debugging")
args = parser.parse_args()
dbg = args.debug
py_utils.add_stdout_logger()
# Figure out the per-dataset settings
default_batch_size = 128
caption_mode = "1to1"
if args.data in {"vqa-e", "vqa", "vqa-trainval"}:
if args.data == "vqa":
tr = VqaWithCaptions("train", 50 if dbg else None)
val = VqaWithCaptions("val", sample=50 if dbg else 5000)
elif args.data == "vqa-trainval":
tr = VqaWithCaptions("trainval", 50 if dbg else None)
val = VqaWithCaptions("val", sample=50 if dbg else 1000)
else:
tr = EVQA("train", 50 if dbg else None)
val = EVQA("val", sample=50 if dbg else 5000)
default_noise = 0.04
elif args.data == "ve":
tr = VisualEntailment("train", sample=8 if dbg else None)
val = VisualEntailment("val", sample=10 if dbg else None)
default_noise = 0.08
elif args.data == "s-cap":
tr = CocoCaptioningKP("train", 50 if dbg else None)
val = CocoCaptioningKP("val", 50 if dbg else None)
default_noise = 0.12
elif args.data == "m-cap":
caption_mode = "other-target"
tr = CocoCaptioningKP("train", 50 if dbg else None)
val = CocoCaptioningKP("val", 50 if dbg else None)
default_noise = 0.04
# Since we will train with about 4 targets per an input caption
default_batch_size = 32
else:
raise NotImplementedError(args.data)
if args.noise is None:
logging.info(f"Default to noise {args.noise}")
args.noise = default_noise
if args.batch_size is None:
logging.info(f"Default to batch size {default_batch_size}")
args.batch_size = default_batch_size
# Build the model
l_adapter = get_adapter(args)
adapter = EmbeddingTokenizer(4)
if args.clip_model == "open_clip":
openai_clip = 'laion400m_e32'
args.clip_model = "ViT-L/14"
else:
openai_clip = None
model = ClipT5Model(
args.clip_model, args.t5_model, adapter,
language_shift=l_adapter, lowercase_target=True, train_on_l=not args.train_on_images,
openai_clip=openai_clip, caption_mode=caption_mode,
average_vqa_caption=True
)
if args.warmup or args.decay:
scheduler = DelayedWarmupScheduleBuilder(warmup=args.warmup, decay=args.decay)
else:
scheduler = None
# Build the trainer and train
trainer = TrainerSimple(
train_dataset=tr,
optimizer=AdamWBuilder(lr=args.lr),
epochs=args.epochs,
eval_dataset=val,
batch_size=args.batch_size,
evaluator=get_evaluator(val),
prediction_args=dict(beam_search_spec=BeamSearchSpec(5, get_default_seq_len(val))),
scheduler=scheduler,
save_each_epoch=[args.epochs],
num_workers=3
)
trainer.train(model, args.output_dir, override=args.override)
if __name__ == '__main__':
main() | close-main | close/experiments/train.py |
import json
import nltk
import openai
import random
import numpy as np
from collections import OrderedDict
from tqdm import tqdm
openai.api_key = "YOUR_OPENAI_KEY"
harry_potter_characters = [
"Sirius Black",
"Cho Chang",
"Aberforth Dumbledore",
"Albus Dumbledore",
"Hermione Granger",
"Fenrir Greyback",
"Neville Longbottom",
"Luna Lovegood",
"Xenophilius Lovegood",
"Remus Lupin",
"Draco Malfoy",
"Lucius Malfoy",
"Harry Potter",
"James Potter",
"Lily Potter",
"Lord Voldemort",
"Arthur Weasley",
"Ron Weasley",
"Severus Snape",
"Voldemort",
"Bellatrix Lestrange",
"George Weasley",
"Rubeus Hagrid",
"Newt Scamander",
"Delores Umbridge",
"Minerva McGonagall",
"Alastor Moody",
"Molly Weasley",
"Albus Severus Potter",
"Gilderoy Lockhart",
"Ginny Weasley",
"Gellert Grindelwald",
"Dobby"
]
def generate_coco_style_prompt(prompt):
return """Generate caption from prompt:
person, pastry, bag:
A person putting some pastries into a bag.
statue, woman, bench, sit:
A statue of two women with purses sitting on a bench.
frisbee, catch:
A man in a grassy field about to catch a frisbee.
sculpture, living room, TV:
A living room with TV and entertainment center beside a sculpture of a torso.
girl, toddler, bed, lay:
A girl toddler laying on a day bed.
bicycle, building, door, red:
A red bicycle leaned against a red stucco building with a graffiti covered door.
sheep, grass, forest:
Sheep grazing in the grass, with a forest in the background.
cat, keyboard, yellow:
A cat sitting at a keyboard looking at a yellow post-it note with a scent written on it.
{}:""".format(prompt)
def generate_ego_centric_caption(coco_caption):
return """Generate ego-centric caption from the given caption:
The woman is wearing and umbrella hat and on her phone.
My aunt is wearing and umbrella hat and on her phone.
A group of people sitting around two couches.
We are sitting around two couches.
A woman peaking around a pile of old refrigerators.
My girlfriend peaking around a pile of old refrigerators.
A black and white picture of a busy street, only red is in color, a red double decker bus drives down the road.
We are waiting at a busy street, while a red double decker bus drives down the road.
A clock tower has a white pole on top.
We visited a clock tower with a white pole on top.
A road filled with cars in a desert.
We are driving on a road filled with cars in a desert.
There are horses standing in the rocks near the water.
We saw some horses standing in the rocks near the water.
A fire hydrant with grass grown around it on a curb side.
I walked past a fire hydrant with grass grown around it on a curb side.
{}
""".format(coco_caption)
def generate_harry_potter_prompt(prompt):
return """Generate captions in imaginary-scenes using characters in Harry Potter, from prompt:
Harry Potter:
Harry finds an old textbook with annotations by the Half-Blood Prince, due to which he achieves success in Potions class.
Sirius Black:
Harry's life shatters before his eyes as he is bitten by Remus Lupin while Sirius Black escapes on the back of Buckbeak.
Hermione Granger:
Hermione shut the book with a snap.
Ronald Weasley:
There before him stood Ron, fully dressed but drenched to the skin, his hair plastered to his face, the sword of Gryffindor in one hand and the Horcrux dangling from its broken chain in the other.
Luna Lovegood:
Luna looked around at them, her head tilted to one side. “Yes,” she said finally.
Draco Malfoy:
Draco Malfoy smiled mirthlessly. “And you think you can stop me, Potter? You’re just a boy.”
Remus Lupin:
Remus Lupin watched as the life seeped slowly out of Ted Tonks, knowing that he could do nothing to save him.
{}:""".format(prompt)
with open('../data/coco_style_captions_train.json') as f:
coco_data = json.load(f)
train_size = round(len(coco_data) * 0.7)
train_coco, val_coco = coco_data[:train_size], coco_data[train_size:]
print(f"train size = {len(train_coco)}")
print(f"val size = {len(val_coco)}")
with open('../data/ego_centric_captions_train.json', 'r+', encoding='utf-8') as f:
data = json.load(f)
for i, cap in tqdm(enumerate(train_coco), leave=False):
response = openai.Completion.create(
model="text-curie-001",
prompt=generate_ego_centric_caption(cap),
temperature=1,
max_tokens=64,
top_p=1,
best_of=3,
frequency_penalty=2,
presence_penalty=1
)
for choice in response.choices:
text = choice.text
while text.startswith('\n') or text.startswith(' ') or text.startswith('\t'):
text = text[1:]
if text and text != cap:
data.append([cap , text])
# To prevent occasional OPEN AI API errors, save the data frequently.
if i % 100 == 0:
f.seek(0)
json.dump(data, f, ensure_ascii=False)
f.seek(0)
json.dump(data, f, ensure_ascii=False)
with open('../data/ego_centric_captions_val.json', 'w', encoding='utf-8') as f:
data = json.load(f)
for i, cap in tqdm(enumerate(val_coco), leave=False):
response = openai.Completion.create(
model="text-curie-001",
prompt=generate_ego_centric_caption(cap),
temperature=1,
max_tokens=64,
top_p=1,
best_of=3,
frequency_penalty=2,
presence_penalty=1
)
for choice in response.choices:
text = choice.text
while text.startswith('\n') or text.startswith(' ') or text.startswith('\t'):
text = text[1:]
if text and text != cap:
data.append([cap , text])
# To prevent occasional OPEN AI API errors, save the data frequently.
if i % 100 == 0:
f.seek(0)
json.dump(data, f, ensure_ascii=False)
f.seek(0)
json.dump(data, f, ensure_ascii=False)
with open('../data/ego_centric_captions_train.json') as f:
data = json.load(f)
print(len(data))
filtered_data = []
all_ngrams = []
for cap in data:
tokens = nltk.word_tokenize(cap)
ngrams = nltk.everygrams(tokens, 4, 9)
all_ngrams.extend(ngrams)
filter_p = 0
for gram in ngrams:
if fdist[gram] >= 50:
filter_p += fdist[gram] / 800
r = random.uniform(0, 1)
if r > filter_p:
filtered_data.append(cap)
fdist = nltk.FreqDist(all_ngrams)
fdist_descending = OrderedDict(
sorted(fdist.items(), key=lambda kv: kv[1], reverse=True))
with open('../data/filtered_ego_centric_captions_train.json', 'w', encoding='utf-8') as f:
json.dump(filtered_data, f, ensure_ascii=False)
| close-main | close/experiments/generate_stylistic_captioning.py |
import argparse
import json
import logging
import os
from typing import Union
import numpy as np
import sys
root_folder = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(root_folder)
from close.data.coco_captioning import CocoCaptioningKP
from close.data.dataset import Dataset
from close.data.visual_entailment import VisualEntailment
from close.data.vqa_e import EVQA
from close.data.vqa_v2 import Vqa2, VqaWithCaptions
from close.eval.evaluation import save_predictions, save_evaluation
from close.experiments.utils import get_default_seq_len, get_evaluator
from close.model.model import BeamSearchSpec
from close.train.evaluator import CaptionEvaluator, Evaluator, VqaEvaluator
from close.train.runner import prediction_args_to_json, run
from close.utils import py_utils, pytorch_utils
from close.utils.to_params import to_params
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from datetime import datetime
from os.path import join, exists, dirname
from shutil import rmtree
def eval_on(args, run_dir, dataset, evaluator, prediction_args, devices, skip_existing=True):
if args.output_dir:
output_dir = args.output_dir
elif args.output_name is not None:
if args.output_name == "":
name = f"{dataset.get_name()}"
else:
name = f"{dataset.get_name()}--{args.output_name}"
eval_dir = join(run_dir, "eval")
if not exists(eval_dir):
os.mkdir(eval_dir)
output_dir = join(eval_dir, name)
else:
output_dir = None
if output_dir is not None:
if exists(output_dir):
if len(os.listdir(output_dir)) > 0:
if skip_existing:
logging.info(f"{output_dir} already exists, skipping")
return
if args.override or py_utils.get_yes_no(f"{output_dir} exists, delete (y/n)?"):
logging.info(f"Deleting {output_dir}")
rmtree(output_dir)
else:
logging.info("No override, not stopping")
return
elif not exists(dirname(output_dir)):
raise ValueError(f"Parent folder {dirname(output_dir)} does not exist")
else:
logging.info(f"Will save to {output_dir}")
else:
logging.info(f"Not saving the output")
if output_dir:
if not exists(output_dir):
os.mkdir(output_dir)
logging.info(f"Saving output to {output_dir}")
logging.info("Setting up...")
examples = dataset.load()
if args.dry_run:
logging.info("Skipping running the model since this is a dry run")
return
beams_to_keep = vars(args).get("beams_to_keep")
batch_size = args.batch_size
output = run(
run_dir, examples, devices, batch_size, args.num_workers,
prediction_args, beams_to_keep=beams_to_keep)
if output_dir is not None:
logging.info(f"Saving output to {output_dir}")
save_predictions(output, output_dir)
config = dict(
batch_size=batch_size,
num_workers=args.num_workers,
predictions_args=prediction_args_to_json(prediction_args),
dataset=to_params(dataset, Dataset),
beams_to_keep=beams_to_keep,
date=datetime.now().strftime("%m%d-%H%M%S"),
)
with open(output_dir + "/config.json", "w") as f:
json.dump(config, f, indent=2)
logging.info("Evaluating...")
if isinstance(evaluator, Evaluator):
results = evaluator.evaluate(examples, output)
if output_dir is not None:
results["n"] = len(output)
logging.info(f"Caching evaluation to {output_dir}")
save_evaluation(output_dir, evaluator, results)
results = {str(k): v for k, v in results.items()}
print(json.dumps(results, indent=2))
elif evaluator is None:
logging.info(f"No evaluator for this data")
elif output_dir is not None:
submission_file = join(output_dir, "submission.json")
logging.info(f"Building submission file {submission_file}")
evaluator.build(dataset, output, submission_file)
def eval_generative_model(args, run_dir, dataset, devices, skip_existing=True):
prediction_args = {}
arg_dict = vars(args)
if arg_dict.get("max_seq_len"):
max_seq_len = arg_dict["max_seq_len"]
else:
max_seq_len = get_default_seq_len(dataset)
logging.info(f"Defaulting to max_seq_len {max_seq_len} for dataset {dataset.get_name()}")
if max_seq_len is not None:
bs = BeamSearchSpec(beam_size=args.beam_size, max_seq_len=max_seq_len)
else:
bs = None
prediction_args["beam_search_spec"] = bs
evaluator = get_evaluator(dataset)
eval_on(args, run_dir, dataset, evaluator, prediction_args, devices, skip_existing)
def main():
parser = argparse.ArgumentParser(description="Compute predictions for a GPV model")
parser.add_argument("model")
parser.add_argument("data", nargs="+")
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--sample", type=int, default=None)
parser.add_argument("--beams_to_keep", type=int, default=5, help="Number of predictions to save")
parser.add_argument("--max_seq_len", type=int, default=None)
parser.add_argument("--beam_size", type=int, default=5)
parser.add_argument("--override", action="store_true", help="Delete output dir if it exists")
parser.add_argument("--output_dir", help="Save to this directory")
parser.add_argument("--output_name",
help="Save results in model/run/eval/{dataset_name}--{output_name}")
parser.add_argument("--dry_run", action="store_true")
args = parser.parse_args()
py_utils.add_stdout_logger()
if args.output_dir and args.output_name:
raise ValueError("Cannot specify output_name and output_dir")
models = py_utils.find_models(args.model)
if len(models) == 0:
logging.info("No models selected")
return
datasets = []
for ds in args.data:
if ds == "cap":
datasets.append(CocoCaptioningKP("val", sample=args.sample))
elif ds == "cap-test":
datasets.append(CocoCaptioningKP("test", sample=args.sample))
elif ds == "evqa":
datasets.append(EVQA("val", sample=args.sample))
elif ds == "vqa":
datasets.append(Vqa2("val", sample=args.sample))
elif ds == "vqa-test":
datasets.append(Vqa2("test", sample=args.sample))
elif ds == "ve":
datasets.append(VisualEntailment("val", sample=args.sample))
elif ds == "ve-test":
datasets.append(VisualEntailment("test", sample=args.sample))
else:
raise NotImplementedError(ds)
devices = pytorch_utils.get_device()
if args.output_dir:
models = py_utils.flatten_list(x[1] for x in models.values())
if len(models) > 1:
raise ValueError("Cannot use one output dir if more than one model selected!")
model = models[0]
if len(datasets) > 1:
raise ValueError("Cannot use one output dir if more than one dataset is selected!")
if len(datasets) == 0:
raise ValueError("No datasets is selected!")
eval_generative_model(args, model, datasets[0], devices, skip_existing=False)
else:
targets = []
for model_name, (model_dir, runs) in models.items():
for ds in datasets:
for run_dir in runs:
targets.append((run_dir, ds))
if len(targets) == 0:
raise ValueError("No datasets to evaluate on found!")
for i, (run_dir, dataset) in enumerate(targets):
if len(targets) > 1:
logging.info(f"Evaluating on {run_dir} {dataset.get_name()} ({i+1}/{len(targets)})")
else:
logging.info(f"Evaluating on {run_dir} {dataset.get_name()}")
eval_generative_model(args, run_dir, dataset, devices, skip_existing=len(targets) > 1)
if __name__ == '__main__':
main()
| close-main | close/experiments/eval.py |
from os import listdir
from os.path import join
from close import file_paths
_IMAGE_ID_TO_SIZE_MAP = {}
IMAGE_SOURCE_MAP = {
"coco": file_paths.COCO_IMAGES,
"flicker30k": file_paths.FLICKER30K,
"visual_news": file_paths.VISUAL_NEWS,
}
def get_image_file(image_id) -> str:
"""Returns the filepath of an image corresponding to an input image id
To support multiple datasets, we prefix image_ids with "source/"
We use this extra level of indirection instead of the using filepaths directly to allow
file-system independent image_ids
"""
source, key = image_id.split("/", 1)
if source in IMAGE_SOURCE_MAP:
return join(IMAGE_SOURCE_MAP[source], key)
raise ValueError(f"Unknown image id {image_id}")
def get_coco_image_id(subset, image_id):
"""Turns COCO image_id into a COCO filepath"""
return f'coco/{subset}/COCO_{subset}_{str(image_id).zfill(12)}.jpg'
_IMAGE_TO_SUBSETS = None
def get_coco_subset(image_id: int) -> str:
"""Get the COCO subset an image belongs"""
global _IMAGE_TO_SUBSETS
if _IMAGE_TO_SUBSETS is None:
_IMAGE_TO_SUBSETS = {}
for subset in ["train2014", "val2014"]:
for image_file in listdir(join(file_paths.COCO_IMAGES, subset)):
image_id = int(image_file.split("_")[-1].split(".")[0])
_IMAGE_TO_SUBSETS[image_id] = subset
return _IMAGE_TO_SUBSETS[image_id]
def get_coco_id_from_int_id(image_id: int) -> str:
"""Get a COCO image id from an int image"""
return get_coco_image_id(get_coco_subset(image_id), image_id)
| close-main | close/utils/image_utils.py |
import logging
from typing import Union
import torch
from torch import nn
def get_device(device_name: Union[None, str, int]=None):
if device_name is None:
if torch.cuda.is_available():
logging.info("cuda found, defaulting to cuda device")
return torch.device('cuda')
else:
logging.info("cuda not found, using cpu device")
return torch.device('cpu')
else:
try:
device_name = int(device_name)
except ValueError:
pass
return torch.device(device_name)
def to_device(batch, device):
if batch is None:
return None
if isinstance(batch, (float, int, str)):
return batch
if isinstance(batch, dict):
return {sub_k: to_device(sub_v, device) for sub_k, sub_v in batch.items()}
if isinstance(batch, (tuple, list)):
return [to_device(x, device) for x in batch]
else:
return batch.to(device)
def get_model_device(module: torch.nn.Module):
return next(module.parameters()).device
def replace_parameters(model: nn.Module, persistent):
"""Replace's the model parameters with buffers"""
for child in model.modules():
for name, param in list(child.named_parameters(recurse=False)):
child.__delattr__(name)
child.register_buffer(name, param.data, persistent)
def segment_mean(x, segments):
counts = torch.unique_consecutive(segments.cpu(), return_counts=True)[1]
start = 0
means = []
for c in counts:
means.append(x[start:start+c].mean(0))
start += c
return torch.stack(means, 0)
def concat_masked_sequences(
seq1, seq2, mask2=None
):
batch = seq1.size(0)
if mask2 is None:
return torch.cat([seq1, seq2], 1), None
else:
out = torch.cat([seq1, seq2], 1)
mask = torch.cat([
torch.ones(batch, seq1.size(1), device=seq1.device, dtype=mask2.dtype),
mask2
], 1)
return out, mask
| close-main | close/utils/pytorch_utils.py |
"""Code from GPV-2 for saving FromParams objects to disk, used for model/trainer saving
AllenNLP recently added their own to_params approach, but there default implementation
does not work for some of our models so we stick with the GPV-2 version.
"""
import enum
import typing
from collections import OrderedDict
from inspect import signature, Parameter
from typing import Dict, Any, Type, Union
from allennlp.common import FromParams, Registrable
from allennlp.common.from_params import remove_optional
import numpy as np
def get_type_name(cls: Type[FromParams], super_cls: Type[Registrable]):
for name in super_cls.list_available():
if cls == super_cls.resolve_class_name(name)[0]:
return name
raise ValueError(f"Unable to find type name for {cls} from class {super_cls}, "
f"check if it was registered correctly")
def _has_args(anno):
return (
hasattr(anno, "__args__") and
anno.__args__ is not None and
len(anno.__args__) > 0
)
def _is_fromparams(anno):
if type(anno) not in {type, enum.EnumMeta}:
# typing.* annotations do not work with issubclass, so fail them here since `FromParam`
# objects will be annotated with actual types
return False
return issubclass(anno, FromParams)
def to_params_any(obj, annotation):
"""Convert and object with a type annotation to parameter or parameter dictionary"""
if obj is None:
# None is allowed for any annotation, so no type-checking required
return obj
obj_type = type(obj)
annotation = remove_optional(annotation)
origin = getattr(annotation, "__origin__", None)
if origin is Union:
if len(annotation.__args__) == 1:
# This can happen after `remove_optional(Optional[str])` as well as
# from user type annotations
annotation = annotation.__args__[0]
else:
# Attempt to figure out what annotation is applicable, this is imperfect
# since doing this in general is non-trivial, but we make a best-effort
# attempt that works for simple cases.
# We can fail if the union has multiple types that are only distinguished
# by the generic type e.g., Union[List[int], List[str]]
# Also if the union has superclasses of its own elements, e.g., Union[Number, float]
# TODO there are a few ways to make this more general
candidates = []
for anno in annotation.__args__:
if hasattr(anno, "__args__") and hasattr(anno, "__origin__") and anno.__args__ is not None:
# Anno is a container with its own sub-types, hust check the top-level types
if isinstance(obj, anno.__origin__):
candidates.append(anno)
else:
if isinstance(obj, anno):
candidates.append(anno)
if len(candidates) == 0:
raise ValueError(f"Object {obj} does not match any type annotation in Union {annotation}")
if len(candidates) > 1:
raise ValueError(f"Ambiguous Union {annotation} for object {obj}")
annotation = candidates[0]
# `FromParams` object, in which case we need an accurate annotation
if isinstance(obj, FromParams):
if annotation is typing.Any:
raise ValueError(f"FromParams object {obj} needs type annotations")
elif not _is_fromparams(annotation):
raise ValueError(f"FromParams object {obj} has non-FromParams annotation {annotation}")
return to_params(obj, annotation)
elif _is_fromparams(annotation):
raise ValueError(f"FromParams annotation {annotation} has non-FromParams object {obj}")
# Base cases, no need to worry about annotations
# note we allow incorrect typing here because I don't think it matters when loading the class
if obj_type in {str, int, float, bool, np.integer, np.floating, np.ndarray, np.bool}:
return obj
# Collections, if there are type annotations, try to preserve them, since we will need
# them if the collection contains `FromParams` classes
# For the most part we trust clients to have correctly-typed containers
elif obj_type in (list, set, frozenset): # Single arg containers
if not _has_args(annotation):
anno = typing.Any
else:
assert len(annotation.__args__) == 1, "Incorrect annotation"
anno = annotation.__args__[0]
return obj.__class__(to_params_any(x, anno) for x in obj)
elif obj_type == tuple:
if not _has_args(annotation):
return obj.__class__(to_params_any(x, typing.Any) for x in obj)
elif origin in (list, typing.List): # Allow tuples for list objects
assert len(annotation.__args__) == 1, "Incorrect annotation"
anno = annotation.__args__[0]
return obj.__class__(to_params_any(x, anno) for x in obj)
else:
if len(annotation.__args__) != len(obj):
# TODO handle variable length tuple annotation
raise ValueError()
return obj.__class__(to_params_any(x, anno) for x, anno in zip(obj, annotation.__args__))
elif obj_type in (dict, OrderedDict, Dict): # Two arg containers
if not _has_args(annotation):
k_anno, v_anno = typing.Any, typing.Any
else:
assert len(annotation.__args__) == 2, "Incorrect annotation"
k_anno, v_anno = annotation.__args__
output = obj.__class__()
for k, v in obj.items():
output[to_params_any(k, k_anno)] = to_params_any(v, v_anno)
return output
else:
# Not a collection, base type, or FromParams, we can't convert it
raise ValueError(f"Unable to convert {obj.__class__} to parameters")
def to_params(obj: FromParams, source_cls=None) -> Dict[str, Any]:
"""Tries to convert a `FromParams` object to its parameter dictionary.
This requires `obj` to store the parameters to __init__ as attributes with the
corresponding parameter name, or to to provided a `to_params` method. These attributes
should themselves be basic python types, or other FromParams classes.
Any `FromParams` instances found needs to have accurate type annotations
"""
cls = obj.__class__
if (
hasattr(obj, "to_params") and
cls._to_params != Registrable._to_params and
cls._to_params != FromParams._to_params
):
# If the object has overridden the default to_params method, use that
args = obj.to_params().as_dict(quiet=True)
else:
init = cls.__init__
if init is object.__init__:
args = {} # No init args
else:
init_signature = signature(init)
for param in init_signature.parameters.values():
if param.kind != Parameter.POSITIONAL_OR_KEYWORD:
raise NotImplementedError(cls.__name__ + " has **kwargs or *args in __init__")
param_names = [p for p in init_signature.parameters.keys() if p != "self"]
args = {}
for name in param_names:
if not hasattr(obj, name):
raise ValueError(cls.__name__ + " did not store parameter " + name)
val = getattr(obj, name)
annotation = init_signature.parameters[name].annotation
args[name] = to_params_any(val, annotation)
if source_cls is not None and source_cls != cls:
args["type"] = get_type_name(cls, source_cls)
return args
| close-main | close/utils/to_params.py |
import json
import logging
import pickle
import sys
from collections import defaultdict
from json import JSONEncoder
from os import listdir, remove, walk, makedirs
from os.path import exists, join, isdir, basename, dirname, split, relpath
from shutil import rmtree
from typing import TypeVar, List, Iterable, Dict, Any, Tuple
import numpy as np
from allennlp.common.util import import_module_and_submodules
def get_yes_no(msg):
while True:
txt = input(msg).strip().lower()
if txt in {"y", "yes"}:
return True
if txt in {"n", "no"}:
return False
def load_pickle_object(file_name):
with open(file_name, "rb") as f:
return pickle.load(f)
def dump_pickle_object(obj, file_name):
with open(file_name, "wb") as f:
pickle.dump(obj, f)
def load_json_object(file_name):
with open(file_name, "r") as f:
return json.load(f)
def dump_json_object(dump_object, file_name, indent=2):
with open(file_name, "w") as f:
json.dump(dump_object, f, indent=indent)
class DisableLogging:
"""Context manager the temporarily disables logging"""
def __init__(self, to_level=logging.INFO):
self.to_level = to_level
def __enter__(self):
self.prev_level = logging.root.manager.disable
if self.prev_level < self.to_level:
logging.disable(self.to_level)
def __exit__(self, exc_type, exc_val, exc_tb):
logging.disable(self.prev_level)
def add_stdout_logger():
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d %H:%M:%S', )
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
root = logging.getLogger()
# fiftyone adds an stdout logger for some reason, we detect here by looking for its
# completely plain formatting (seriously why is it here?) and delete it
for i, h in enumerate(list(root.handlers)):
if h.formatter._fmt == '%(message)s':
root.removeHandler(h)
root.setLevel(logging.INFO)
root.addHandler(handler)
# Re-direction warning to logging
logging.captureWarnings(True)
def clear_if_nonempty(output_dir, override=False):
if output_dir:
if exists(output_dir) and listdir(output_dir):
if override or get_yes_no("%s is non-empty, override (y/n)?" % output_dir):
for x in listdir(output_dir):
if isdir(join(output_dir, x)):
rmtree(join(output_dir, x))
else:
remove(join(output_dir, x))
else:
raise ValueError(f"Output directory ({output_dir}) already exists and is not empty.")
def select_run_dir(run_dir):
"""If `run_dir` is top-level model dir with a single run, returns that run"""
if exists(join(run_dir, "model.json")):
candidates = []
for filename in listdir(run_dir):
filepath = join(run_dir, filename)
if isdir(filepath) and filename.startswith("r"):
candidates.append(filepath)
if len(candidates) > 1:
raise ValueError(f"Multiple runs in {run_dir}, please select one")
elif len(candidates) == 0:
raise ValueError(f"No runs found in {run_dir}")
else:
logging.info(f"Selecting run {basename(candidates[0])} for {run_dir}")
run_dir = candidates[0]
return run_dir
K = TypeVar('K')
T = TypeVar('T')
def transpose_list_of_dicts(lst: List[Dict[K, T]]) -> Dict[K, List[T]]:
out = defaultdict(list)
for r in lst:
for k, v in r.items():
out[k].append(v)
return {k: v for k, v in out.items()}
def transpose_lists(lsts: Iterable[Iterable[T]]) -> List[List[T]]:
"""Transpose a list of lists."""
return [list(i) for i in zip(*lsts)]
def duration_to_str(seconds):
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '%s%dd%dh%dm%ds' % (sign_string, days, hours, minutes, seconds)
elif hours > 0:
return '%s%dh%dm%ds' % (sign_string, hours, minutes, seconds)
elif minutes > 0:
return '%s%dm%ds' % (sign_string, minutes, seconds)
else:
return '%s%ds' % (sign_string, seconds)
def flatten_list(iterable_of_lists: Iterable[Iterable[T]]) -> List[T]:
"""Unpack lists into a single list."""
return [x for sublist in iterable_of_lists for x in sublist]
def val_to_str(val: float, fmt):
if val is None:
return "-"
if isinstance(val, str):
return val
return fmt % (100*val)
def table_string(table: List[List[str]]) -> str:
"""Table as list-of=lists to string."""
# print while padding each column to the max column length
if len(table) == 0:
return ""
col_lens = [0] * len(table[0])
for row in table:
for i, cell in enumerate(row):
col_lens[i] = max(len(cell), col_lens[i])
formats = ["{0:<%d}" % x for x in col_lens]
out = []
for row in table:
out.append(" ".join(formats[i].format(row[i]) for i in range(len(row))))
return "\n".join(out)
def dict_of_dicts_as_table_str(data: Dict[str, Dict[str, Any]], val_format, all_keys=None,
top_right="_", table_format="even-spaced") -> str:
"""Table of row->col->value to string"""
if all_keys is None:
all_keys = {}
for name, result in data.items():
for key in result:
if key not in all_keys:
all_keys[key] = 0
all_keys = list(all_keys)
header = [top_right] + all_keys
table = [header]
for name, result in data.items():
row = [name] + [val_to_str(result.get(x), val_format) for x in all_keys]
table.append(row)
if table_format == "even-spaced":
return table_string(table)
elif table_format == "none":
return table
elif table_format == "csv":
return "\n".join(",".join(row) for row in table)
elif table_format == "tsv":
return "\n".join(",".join(row) for row in table)
elif table_format == "latex":
return "\n".join(" & ".join(row) + "\\\\" for row in table)
else:
raise ValueError()
def list_of_dicts_as_table_str(data: List[Dict[str, Any]], val_format,
all_keys=None, table_format="even-spaced") -> str:
"""Table of row->col->value to string"""
if all_keys is None:
all_keys = {}
for result in data:
for key in result:
if key not in all_keys:
all_keys[key] = 0
all_keys = list(all_keys)
header = all_keys
table = [header]
for result in data:
row = [val_to_str(result.get(x), val_format) for x in all_keys]
table.append(row)
if table_format == "even-spaced":
return table_string(table)
elif table_format == "csv":
return "\n".join(",".join(row) for row in table)
else:
raise ValueError()
def int_to_str(k: int) -> str:
if isinstance(k, int) and k % 1000 == 0:
return str(k//1000) + "k"
else:
return str(k)
def nested_struct_to_flat(tensors, prefix=(), cur_dict=None) -> Dict[Tuple, Any]:
"""Converts a nested structure of dict/lists/tuples to a flat dict with tuple keys"""
if cur_dict is None:
cur_dict = {}
nested_struct_to_flat(tensors, (), cur_dict)
return cur_dict
if isinstance(tensors, dict):
if len(tensors) == 0:
raise ValueError("Cannot convert empty dict")
for k, v in tensors.items():
if isinstance(k, int):
# We currently use int keys to signal a list, so this would result in errors
raise NotImplementedError("Integer keys")
nested_struct_to_flat(v, prefix + (k, ), cur_dict)
elif isinstance(tensors, (tuple, list)):
if len(tensors) == 0:
raise ValueError("Cannot convert empty tuples/lists")
for ix, v in enumerate(tensors):
nested_struct_to_flat(v, prefix + (ix, ), cur_dict)
else:
cur_dict[prefix] = tensors
def flat_to_nested_struct(nested: Dict):
"""Undos the effect of `nested_struct_to_flat`"""
if len(nested) == 0:
return None
if isinstance(next(iter(nested.keys()))[0], str):
out = {}
else:
out = []
for prefix, value in nested.items():
parent = out
for i, key in enumerate(prefix[:-1]):
next_parent = {} if isinstance(prefix[i+1], str) else []
if isinstance(key, str):
if key not in parent:
parent[key] = next_parent
parent = parent[key]
elif isinstance(key, int):
if len(parent) < key + 1:
parent += [None] * (key + 1 - len(parent))
if parent[key] is None:
parent[key] = next_parent
parent = parent[key]
else:
raise NotImplementedError()
key = prefix[-1]
if isinstance(key, int):
if len(parent) < key + 1:
parent += [None] * (key + 1 - len(parent))
parent[prefix[-1]] = value
return out
def import_all():
import_module_and_submodules(f"close.model")
def is_model_dir(x):
return exists(join(x, "model.json"))
def is_run_dir(x):
return isdir(x) and any(z.endswith(".pth") for z in listdir(x))
def extract_runs(model_dir):
candidates = [join(model_dir, x) for x in listdir(model_dir)]
return [x for x in candidates if is_run_dir(x)]
def find_models(roots, require_runs=True) -> Dict[str, Tuple[str, List[str]]]:
"""Find all trained models in a directory, or list of directories
#:return A dictionary of name -> (model_dir, runs) of models found in `roots`. The name
is derived from the location of model_dir relate to the input root.
"""
if isinstance(roots, str) and is_run_dir(roots):
return {split(roots)[1]: (dirname(roots), [roots])}
if isinstance(roots, str):
roots = [(None, roots)]
elif isinstance(roots, dict):
roots = list(roots.items())
elif len(roots) == 1:
roots = [(None, roots[0])]
else:
names = [x.rstrip("/").split("/")[-2] for x in roots]
roots = list(zip(names, roots))
models = {}
for root_name, root in roots:
if is_model_dir(root):
runs = []
for run_dir in listdir(root):
run_dir = join(root, run_dir)
if is_run_dir(run_dir):
runs.append(run_dir)
model_name = basename(root)
if root_name:
model_name = join(root_name, model_name)
models[model_name] = (root, runs)
continue
for dirpath, dirnames, filenames in walk(root):
for model_dir in dirnames:
model_dir = join(dirpath, model_dir)
if not is_model_dir(model_dir):
continue
model_name = relpath(model_dir, root)
if root_name:
model_name = join(root_name, model_name)
runs = extract_runs(model_dir)
if not require_runs or len(runs) > 0:
models[model_name] = (model_dir, runs)
return models
def ensure_dir_exists(filename):
"""Make sure the parent directory of `filename` exists"""
makedirs(dirname(filename), exist_ok=True)
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
def subsample(out, num_examples, seed, sort_key=None):
if num_examples is None:
return out
if sort_key is not None:
out.sort(key=sort_key)
if isinstance(num_examples, float):
sample = int(round(len(out)*num_examples))
else:
sample = num_examples
np.random.RandomState(seed).shuffle(out)
return out[:sample]
| close-main | close/utils/py_utils.py |
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
# Modified to silence the stederr output
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
from typing import List, Dict
from pycocoevalcap.tokenizer import ptbtokenizer
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class QuitePTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image: Dict[str, List[str]]):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c.replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(ptbtokenizer.__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences.encode())
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
token_lines = token_lines.decode()
lines = token_lines.split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| close-main | close/utils/quiet_ptbtokenizer.py |
import logging
from os import listdir
from os.path import dirname, join, exists
import torch
from allennlp.common import Params
from close.model.model import Model, BEST_STATE_NAME
from close.utils import py_utils
from close.utils.py_utils import load_json_object, import_all, select_run_dir
def load_model(run_dir, use_best_weights=True, device=None, epoch=None):
import_all()
if run_dir.endswith("/"):
run_dir = run_dir[:-1]
run_dir = select_run_dir(run_dir)
model_spec = join(dirname(run_dir), "model.json")
params = Params(load_json_object(model_spec))
with py_utils.DisableLogging():
model: Model = Model.from_params(params)
model.initialize(load_params=False)
if epoch:
src = join(run_dir, f"state-ep{epoch}.pth")
if not exists(src):
raise ValueError(f"Requested epoch {epoch} not found in {run_dir}")
state_dict = torch.load(src, map_location="cpu")
else:
state_dict = None
if use_best_weights:
src = join(run_dir, BEST_STATE_NAME)
if exists(src):
state_dict = torch.load(src, map_location="cpu")
else:
logging.info(f"No best-path found for {run_dir}, using last saved state")
if state_dict is None:
epochs = [x for x in listdir(run_dir) if x.startswith("state-ep")]
epochs.sort(key=lambda x: int(x.split(".pth")[0][len("state-ep"):]), reverse=True)
if len(epochs) == 0:
logging.info()
raise ValueError(f"No save states found in {run_dir}")
else:
logging.info(f"Using last saved state, {epochs[0]}")
state_dict = torch.load(join(run_dir, epochs[0]), map_location="cpu")
model.load_state_dict(state_dict)
if device is not None:
model.to(device)
model.eval()
return model
| close-main | close/model/load_model.py |
import logging
from collections import Counter
from dataclasses import dataclass, field, replace
from typing import Any, Callable, List, Dict, Tuple, Union, Optional
import numpy as np
import clip
import torch
from PIL import Image
from allennlp.common import Registrable, Params
from torch import nn
from transformers import T5ForConditionalGeneration, AutoTokenizer, AutoConfig, T5Config, \
T5Tokenizer
from transformers.modeling_outputs import Seq2SeqLMOutput
from close.data.coco_captioning import CaptioningExample
from close.data.visual_entailment import VisualEntailmentExample
from close.data.visual_news import VisualNewsExample
from close.data.vqa_v2 import VqaExample
from close.model.layers import Layer
from close.model.model import Model, ExampleOutput, BeamSearchSpec
from close.train.allennlp_beamsearch import t5_initialize_decoding
from close.utils import image_utils, pytorch_utils
from close.utils.pytorch_utils import replace_parameters
CLIP_DIMS = {
"ViT-B/32": 512,
"ViT-L/14": 768,
"RN101": 512,
"RN50": 1024,
"RN50x4": 640,
"RN50x16": 768,
"RN50x64": 1024
}
@Layer.register("linear")
class EmbeddingTokenizer(Layer):
def __init__(self, n_tokens: int=4, n_constant: int=0):
super().__init__()
self.n_tokens = n_tokens
self.n_constant = n_constant
def init(self, t5_dim, clip_dim):
self.t5_dim = t5_dim
self.lin = nn.Linear(clip_dim, t5_dim*self.n_tokens)
if self.n_constant:
self.constant_tokens = nn.Parameter(torch.zeros(self.n_constant, t5_dim))
def forward(self, clip_features):
seq = self.lin(clip_features).reshape(-1, self.n_tokens, self.t5_dim)
if self.n_constant:
seq = torch.cat([self.constant_tokens.unsqueeze(0).tile(seq.size(0), 1, 1), seq], 1)
return seq
@dataclass
class TrainingExample:
image_id: Optional[str] = None
target_text: Union[List[str], None] = None
input_text: Optional[str] = None
image_text: Union[str, List[str], None] = None
example_id: Optional[str] = None
def get_example_id(self):
return self.example_id
@dataclass
class Collate:
tokenizer: Any
pre: Any
encode_image: bool
def __call__(self, batch: List[TrainingExample]):
out = {}
# Encode the target text, To support examples with multiple target texts,
# track text->example_num. mapping This lets us efficiently handle this examples
# by avoiding re-encoding the context for each target
if batch[0].target_text is not None:
texts = []
mapping = []
for batch_ix, x in enumerate(batch):
texts += x.target_text
mapping += [batch_ix]*len(x.target_text)
out["target_mapping"] = torch.as_tensor(mapping, dtype=torch.long)
labels = self.tokenizer(
texts, return_tensors='pt', padding=True, truncation=True)
out["target_ids"] = labels["input_ids"]
else:
out["target_ids"] = None # For testing
# Encode any additional text context (e.g, question, hypothesis)
if batch[0].input_text is not None:
texts = [x.input_text for x in batch]
labels = self.tokenizer(
texts, return_tensors='pt', padding=True, truncation=True)
out["input_ids"] = labels["input_ids"]
out["input_attention_mask"] = labels["attention_mask"]
# Encode the image or text input
if self.encode_image:
images = []
for ex in batch:
with Image.open(image_utils.get_image_file(ex.image_id)) as f:
images.append(self.pre(f))
out["clip_images"] = torch.stack(images, 0)
elif isinstance(batch[0].image_text, str):
out["clip_text"] = clip.tokenize([x.image_text for x in batch], truncate=True)
out["clip_images"] = None
else:
# Multiple image target captions that will be averaged to get an input vector
texts = []
mapping = []
for batch_ix, x in enumerate(batch):
if isinstance(x.image_text, str):
texts.append(x.image_text)
mapping.append(batch_ix)
else:
texts += x.image_text
mapping += [batch_ix]*len(x.image_text)
out["clip_text"] = clip.tokenize(texts)
out["clip_images"] = None
out["input_average_mapping"] = torch.as_tensor(mapping, dtype=torch.long)
return out
@Model.register("clip-t5")
class ClipT5Model(Model):
@classmethod
def from_params(
cls, params: Params, constructor_to_call=None,
constructor_to_inspect=None, **extras
):
return super().from_params(params, constructor_to_call, constructor_to_inspect, **extras)
def __init__(
self, clip_model: str, t5_model_name: str, adapter: Layer,
language_shift: Layer=None, openai_clip=None, train_on_l: bool=True,
lowercase_target=False, caption_mode="other-target", average_vqa_caption=True):
super().__init__()
self.openai_clip = openai_clip
self.language_shift = language_shift
self.lowercase_target = lowercase_target
self.clip_model = clip_model
self.t5_model_name = t5_model_name
self.adapter = adapter
self.train_on_l = train_on_l
self.caption_mode = caption_mode
self.average_vqa_caption = average_vqa_caption
# Set during init
self._clip_model = None
self._clip_pre = None
self._t5_model = None
self.tokenizer = None
self.image_id_to_ix = None
# Prediction args
self.beam_search_spec = None
def initialize(self, load_params=True):
"""Initialize the model by constructing the need pytorch sub-modules
if `load_params` is true the pre-trained weights will be loaded, if false
the parameters will random (usually because a state_dict will be loaded)
"""
if self.openai_clip:
import open_clip
logging.info(f"Loading clip {self.clip_model}/{self.openai_clip}...")
model, _, preprocess = open_clip.create_model_and_transforms(
self.clip_model, pretrained=self.openai_clip)
else:
logging.info(f"Loading clip {self.clip_model}...")
model, preprocess = clip.load(self.clip_model)
# Store the CLIP parameters as non-persistent buffers so it
# doesn't take up space in the state_dict
replace_parameters(model, False)
clip_dim = CLIP_DIMS[self.clip_model]
self._clip_pre = preprocess
self._clip_model = model
for param in self._clip_model.parameters():
param.requires_grad = False
logging.info(f"Loading T5 {self.t5_model_name}...")
if load_params:
self._t5_model = T5ForConditionalGeneration.from_pretrained(self.t5_model_name)
else:
self._t5_model = T5ForConditionalGeneration(AutoConfig.from_pretrained(self.t5_model_name))
t5_dim = self._t5_model.encoder.config.d_model
print("DEBUG")
self.tokenizer = T5Tokenizer.from_pretrained(self.t5_model_name, local_files_only=True)
self.adapter.init(t5_dim, clip_dim)
def get_collate(self, is_train=False) -> Callable[[List], Dict[str, Any]]:
"""Get the collate function that should be used with this model"""
if is_train:
encode_image = not self.train_on_l
else:
encode_image = True
return Collate(self.tokenizer, self._clip_pre, encode_image)
def preprocess_example_train(self, ex) -> List[TrainingExample]:
"""Transform an input example into a general-purpose format we can collate"""
if isinstance(ex, CaptioningExample):
if not self.train_on_l:
out = [TrainingExample(ex.image_id, ex.captions, image_text=None)]
elif self.caption_mode == "1to1":
out = [TrainingExample(ex.image_id, [x], image_text=x) for x in ex.captions]
elif self.caption_mode == "other-target":
targets = ex.captions
out = []
for i, text in enumerate(targets):
out.append(TrainingExample(ex.image_id, targets[:i] + targets[i+1:], image_text=text))
else:
raise NotImplementedError(self.caption_model)
elif isinstance(ex, VisualEntailmentExample):
out = [TrainingExample(
ex.image_id, [ex.label], ex.hypothesis, image_text=ex.premise, example_id=ex.example_id)]
elif isinstance(ex, VqaExample):
target_text = []
if isinstance(ex.answers, Counter):
# Train on all answers that either occur more than 3 times, or are as common as the
# most common answer
on = None
for w, c in ex.answers.most_common():
if on is None:
target_text.append(w)
on = c
elif c == on or c >= 3:
target_text.append(w)
else:
break
else:
assert isinstance(ex.answers, str)
target_text = [ex.answers]
if isinstance(ex.image_text, str) or self.average_vqa_caption:
out = [TrainingExample(ex.image_id, target_text,
ex.question, ex.image_text, example_id=ex.example_id)]
else:
out = [TrainingExample(ex.image_id, target_text, ex.question, x, example_id=ex.example_id)
for x in ex.image_text]
elif isinstance(ex, VisualNewsExample):
extract_i = (self.train_on_l in {"never", "both"} or
(self.train_on_l in {"optional", "skip-lang"} and ex.image_id is not None))
out = [
TrainingExample(
example_id=ex.example_id,
image_id=ex.image_id if extract_i else None,
input_text=ex.article,
image_text=ex.caption,
target_text=[ex.caption])
]
else:
raise NotImplementedError()
if self.lowercase_target:
for ex in out:
ex.target_text = [x.lower() for x in ex.target_text]
return out
def preprocess_example(self, example) -> TrainingExample:
"""Preprocess a train example"""
if isinstance(example, CaptioningExample):
if example.captions:
# In case we are testing with text input
cap = example.captions[np.random.randint(0, len(example.captions))]
else:
cap = None
return TrainingExample(example_id=example.example_id, image_id=example.image_id,
target_text=None, input_text=None, image_text=cap)
elif isinstance(example, VqaExample):
return TrainingExample(example_id=example.example_id, image_id=example.image_id,
target_text=None, input_text=example.question,
image_text=example.image_text)
elif isinstance(example, VisualEntailmentExample):
return TrainingExample(example_id=example.example_id, image_id=example.image_id,
input_text=example.hypothesis,
target_text=None, image_text=example.premise)
elif isinstance(example, VisualNewsExample):
return TrainingExample(example_id=example.example_id,
image_id=example.image_id,
input_text=example.article,
image_text=example.caption,
target_text=None)
else:
raise NotImplementedError()
def _encode(self, clip_images, clip_text, input_ids, input_attention_mask,
input_average_mapping=None):
if clip_images is not None:
assert clip_text is None
with torch.no_grad():
image_fe = self._clip_model.encode_image(clip_images)
image_fe = image_fe.float()
image_fe = image_fe / image_fe.norm(dim=-1, keepdim=True)
clip_features = image_fe
else:
assert clip_images is None
with torch.no_grad():
text_fe = self._clip_model.encode_text(clip_text)
text_fe = text_fe.float()
if input_average_mapping is not None:
text_fe = pytorch_utils.segment_mean(text_fe, input_average_mapping)
text_fe = text_fe / text_fe.norm(dim=-1, keepdim=True)
text_fe = self.language_shift(text_fe)
clip_features = text_fe
clip_tokens = self.adapter(clip_features)
if input_ids is not None:
input_embed = self._t5_model.shared(input_ids)
input_embed, input_mask = pytorch_utils.concat_masked_sequences(
clip_tokens, input_embed, input_attention_mask)
else:
input_embed = clip_tokens
input_mask = None
encoding = self._t5_model.encoder(
inputs_embeds=input_embed,
return_dict=True
).last_hidden_state
return encoding, input_mask
def forward(
self, clip_images, clip_text, target_ids, target_mapping=None,
input_average_mapping=None, input_ids=None, input_attention_mask=None
) -> Tuple[torch.Tensor, Dict[str, float]]:
target_ids = target_ids.masked_fill(
target_ids == self.tokenizer.pad_token_id, -100)
encoder_out, input_mask = self._encode(clip_images, clip_text, input_ids,
input_attention_mask, input_average_mapping)
if target_mapping is not None:
encoder_out = encoder_out[target_mapping]
if input_mask is not None:
input_mask = input_mask[target_mapping]
out: Seq2SeqLMOutput = self._t5_model(
encoder_outputs=(encoder_out, ),
attention_mask=input_mask,
labels=target_ids,
return_dict=True
)
return out.loss, {}
def set_prediction_args(self, beam_search_spec: BeamSearchSpec):
self.beam_search_spec = beam_search_spec
def predict(self, clip_images=None, clip_text=None, target_ids=None, target_mapping=None,
input_ids=None, input_attention_mask=None):
enc, input_mask = self._encode(clip_images, clip_text, input_ids, input_attention_mask)
bs = self.beam_search_spec.build(self.tokenizer.eos_token_id)
decode_init = t5_initialize_decoding(
self.tokenizer, self._t5_model, enc, input_mask)
input_ids, logprobs = bs.search(*decode_init)
logprobs = logprobs.cpu().numpy()
input_ids = input_ids.cpu().numpy()
out_text = []
for batch in range(len(input_ids)):
text = [self.tokenizer.decode(x, skip_special_tokens=True) for x in input_ids[batch]]
out_text.append(text)
return [ExampleOutput(txt, p.tolist()) for txt, p in zip(out_text, logprobs)]
| close-main | close/model/clip_t5_model.py |
import pickle
from os.path import join, dirname
import torch
from allennlp.common import Params
from torch.distributions import multivariate_normal
from close import file_paths
from close.model.layers import Layer
from close.utils.py_utils import load_json_object
import numpy as np
from close.utils.pytorch_utils import replace_parameters
@Layer.register("normalize-layer")
class Normalize(Layer):
def forward(self, x):
return x / x.norm(dim=-1, keepdim=True)
@Layer.register("add-guassian-noise")
class AddGuassianNoise(Layer):
def __init__(self, scale: float, renormalize=True):
super().__init__()
self.scale = scale
self.renormalize = renormalize
def forward(self, x):
if self.training:
x = x + torch.randn_like(x)*self.scale
if self.renormalize:
x = x / x.norm(dim=-1, keepdim=True)
return x
@Layer.register("mean-shift")
class Shift(Layer):
def __init__(self, src, scale, noise, renorm=False):
super().__init__()
self.renorm = renorm
self.src = src
self.noise = noise
self.scale = scale
src = join(file_paths.DATA_DIR, "clip-stats")
with open(join(src, f"{self.src}.pkl"), "rb") as f:
shift = pickle.load(f)
self.register_buffer("_shift", torch.as_tensor(shift*scale, dtype=torch.float), persistent=False)
def forward(self, x):
x = x + self._shift
if self.renorm:
x = x / x.norm(dim=-1, keepdim=True)
if self.training:
x = x + torch.randn_like(x)*self.noise
x = x / x.norm(dim=-1, keepdim=True)
return x
@Layer.register("random-shift")
@Layer.register("fixed-random-shift")
class FixedRandomShift(Layer):
def __init__(self, scale, noise, dim, seed=None, renorm=False):
super().__init__()
self.renorm = renorm
self.noise = noise
self.seed = seed
self.scale = scale
self.dim = dim
shift = np.random.randn(dim)
shift = scale * shift / np.linalg.norm(shift)
self.register_buffer("shift", torch.as_tensor(shift, dtype=torch.float, device=torch.device("cuda")), persistent=False)
def forward(self, x):
x = x + self.shift
if self.renorm:
x = x / x.norm(dim=-1, keepdim=True)
if self.training and self.noise:
x = x + torch.randn_like(x)*self.noise
x = x / x.norm(dim=-1, keepdim=True)
return x
@Layer.register("cov-noise")
class CovNoise(Layer):
def __init__(self, src, scale, shift=True, cov=True, version=2):
super().__init__()
self.version = version
self.src = src
self.cov = cov
self.shift = shift
self.scale = scale
src = join(file_paths.DATA_DIR, "clip-stats")
if self.shift:
with open(join(src, f"{self.src}-mean-diff.pkl"), "rb") as f:
shift = pickle.load(f)
self.register_buffer("_shift", torch.as_tensor(shift, dtype=torch.float), persistent=False)
if self.cov:
with open(join(src, f"{self.src}-cov-diff.pkl"), "rb") as f:
cov = pickle.load(f)
self.register_buffer("_cov", torch.as_tensor(cov, dtype=torch.float), persistent=False)
def forward(self, x):
if self.training:
device = x.device
if self.cov:
if self.shift:
x = x + torch.unsqueeze(self._shift, 0)
dist = multivariate_normal.MultivariateNormal(
loc=torch.zeros(x.shape[1], device=device), covariance_matrix=self._cov)
x = x + dist.sample(x.shape[:1])*3
else:
x = x + self._shift
x = x + torch.randn_like(x)*self.scale
x = x / x.norm(dim=-1, keepdim=True)
return x
@Layer.register("linear-adapter")
class LinearAdapter(Layer):
def __init__(self, src, noise, renorm=False):
super().__init__()
self.renorm = renorm
self.src = src
self.noise = noise
src = join(file_paths.DATA_DIR, "adapters")
with open(join(src, f"{self.src}.pkl"), "rb") as f:
coef, bias = pickle.load(f)
self.register_buffer("coef", torch.as_tensor(coef, dtype=torch.float), persistent=False)
self.register_buffer("bias", torch.as_tensor(bias, dtype=torch.float), persistent=False)
def forward(self, x):
x = torch.matmul(x, self.coef.T) + torch.unsqueeze(self.bias, 0)
if self.renorm:
x = x / x.norm(dim=-1, keepdim=True)
if self.training:
x = x + torch.randn_like(x)*self.noise
x = x / x.norm(dim=-1, keepdim=True)
return x
@Layer.register("coco-cap-mean-diff")
class CocoCapMeanDiff(Layer):
def __init__(self):
super().__init__()
with open(file_paths.COCO_CAP_MEAN_DIFF, "rb") as f:
data = pickle.load(f)
self.register_buffer("shift", torch.as_tensor(data), persistent=False)
def forward(self, x):
return x - self.shift
@Layer.register("trained-adapter")
class TrainedAdapter(Layer):
def __init__(self, src, noise=None):
super().__init__()
self.src = src
self.noise = noise
self.adapter = load_adapter(src)
replace_parameters(self.adapter, False)
def forward(self, x):
out = self.adapter(x)
if self.noise:
out = torch.randn_like(out)*self.gnoise
return out
def load_adapter(src):
state = torch.load(join(src, "best-state.pth"))
model = load_json_object(join(dirname(src), "model.json"))
model = Layer.from_params(Params(model["aligner"]))
state = {k.split(".", 1)[1]: v for k, v in state.items() if k.startswith("aligner.")}
model.load_state_dict(state)
return model
| close-main | close/model/language_adapters.py |
from typing import Union, Optional, List, Callable, Any, Dict, Tuple
import torch
from allennlp.common import Registrable, FromParams
from allennlp.nn.beam_search import BeamSearch
from dataclasses import dataclass
from torch import nn
BEST_STATE_NAME = "best-state.pth"
@dataclass
class ExampleOutput:
text: List[str]
text_logprobs: List[float]
def set_beams_to_keep(self, n):
if self.text is None:
return self
return ExampleOutput(self.text[:n], self.text_logprobs[:n])
class BeamSearchSpec(FromParams):
"""Specifies how to do beam search"""
def __init__(self, beam_size, max_seq_len, per_node_beam_size=None, sampler=None):
self.beam_size = beam_size
self.per_node_beam_size = per_node_beam_size
self.sampler = sampler
self.max_seq_len = max_seq_len
def build(self, end_index) -> BeamSearch:
return BeamSearch(
end_index, self.max_seq_len, self.beam_size,
self.per_node_beam_size, self.sampler,
)
class Model(nn.Module, Registrable):
"""Generic model API inherited from GPV 2, basically a pytorch module with additional
pre-processing and prediction APIs"""
def initialize(self, load_params=True):
"""Initialize the model by constructing all parameters and buffers needed
if `load_params` is false, the model should still set up all its parameters and buffers,
but does not need to fill them with initialized values (e.g., because it will load
those parameters from state dict).
"""
raise NotImplementedError()
def preprocess_example_train(self, example) -> List:
"""Convert a training example for a task into a pre-processed format
We support a one-to-many mapping for train examples
"""
# By default, use the general method
return [self.preprocess_example(example)]
def preprocess_example(self, example) -> Any:
"""Convert an eval example for a task into a pre-processed format"""
raise NotImplementedError()
def get_collate(self, is_train=False) -> Callable[[List], Dict[str, Any]]:
"""Function that maps pre-processed examples to tensors suitable for `forward`"""
raise NotImplementedError()
def forward(self, *args, **kwargs) -> Tuple[torch.Tensor, Dict[str, float]]:
"""Computes the loss and any scalars to log using the outputs of `self.get_collate()(batch)`
This is used during training.
"""
raise NotImplementedError()
def predict(self, *args, **kwargs) -> List:
"""Computes the test-time example outputs for a batch of examples"""
raise NotImplementedError()
def set_prediction_args(
self, *args: Union[str, int, float, BeamSearchSpec],
**kwargs: Union[str, int, float, BeamSearchSpec]
):
"""Sets parameters to be used during prediction"""
raise NotImplementedError()
| close-main | close/model/model.py |
from os.path import join, dirname
from typing import List, Dict, Any
import torch
from allennlp.common import Registrable, FromParams
from torch import nn
from close.utils import pytorch_utils
from close.utils.py_utils import load_json_object
from close.utils.to_params import to_params
class Layer(nn.Module, Registrable):
pass
@Layer.register("seq")
class Sequential(Layer, nn.Sequential):
def __init__(self, args: List[Layer]):
super(Sequential, self).__init__(*args)
def _to_params(self) -> Dict[str, Any]:
return dict(args=[to_params(x, Layer) for x in self])
@Layer.register("normalize")
class Normalize(Layer):
def forward(self, x):
return x / x.norm(dim=-1, keepdim=True)
| close-main | close/model/layers.py |
import json
from collections import Callable
from dataclasses import dataclass
from typing import Dict, Union
import torch
from allennlp.common import FromParams, Params
from torch.utils.data import DataLoader
from tqdm import tqdm
from close.model.load_model import load_model
from close.model.model import ExampleOutput, Model, BeamSearchSpec
from close.utils import pytorch_utils, py_utils
from close.utils.to_params import to_params_any
def prediction_args_to_json(prediction_args):
prediction_args_dict = {}
for k, v in prediction_args.items():
if isinstance(v, FromParams):
v = to_params_any(v, Union[BeamSearchSpec, float, int, str])
prediction_args_dict[k] = v
return prediction_args_dict
def save_example_output(output: Dict[str, ExampleOutput], output_dir):
predictions = {}
for key, out in output.items():
predictions[key] = dict(
answer=out.text,
probs=None if out.text_logprobs is None else out.text_logprobs.tolist()
)
with open(output_dir + "/predictions.json", "w") as f:
json.dump(predictions, f)
@dataclass
class CollateWithBatch(Callable):
collate: Callable
def __call__(self, batch):
return batch, self.collate(batch)
def build_per_example_output(examples, output, beams_to_keep=1):
out = {}
for ex, ex_out in zip(examples, output):
out[ex.example_id] = ex_out.set_beams_to_keep(beams_to_keep)
return out
def run(model, examples, device,
batch_size, num_workers, prediction_args, beams_to_keep=1,
desc="eval", nopbar=False):
if len(set(ex.example_id for ex in examples)) != len(examples):
raise ValueError("Repeated ids in examples")
if isinstance(model, str):
model = load_model(model, device=device)
model.set_prediction_args(**prediction_args)
loader = DataLoader(
[model.preprocess_example(x) for x in examples],
batch_size=batch_size,
collate_fn=CollateWithBatch(model.get_collate()),
num_workers=num_workers,
shuffle=False,
pin_memory=True
)
return run_model(model, loader, beams_to_keep, desc, nopbar,
prediction_args=prediction_args)
def run_model(
model, data_loader, beams_to_keep=1,
desc="eval", nopbar=False, model_device=None,
prediction_args=None
) -> Dict[str, ExampleOutput]:
if prediction_args is None:
prediction_args = {}
model.eval()
if model_device is None:
model_device = pytorch_utils.get_model_device(model)
model.set_prediction_args(**prediction_args)
if desc is None:
desc = "eval"
out = {}
if nopbar:
it = data_loader
else:
it = tqdm(data_loader, desc=desc, ncols=100)
for examples, batch in it:
batch = pytorch_utils.to_device(batch, model_device)
with torch.no_grad():
output = model.predict(**batch)
out.update(build_per_example_output(examples, output, beams_to_keep))
return out
| close-main | close/train/runner.py |
import torch
from typing import Dict, Tuple, List, Optional, Any, Union
from allennlp.common import Registrable
from dataclasses import dataclass
from torch.optim import AdamW, SGD, Optimizer
class OptimizerBuilder(Registrable):
"""Builds an Optimizer
We use this class rather then using an Optimizer directly since it can be
serialized with FromParams, and can dynamically determine how to handle groups
of parameters depending on the model
"""
def build(self, model, epoch_size, n_epochs) -> Optimizer:
raise NotImplementedError()
class TrainingScheduleBuilder(Registrable):
"""Builds an learning rate schedule"""
def build(self, optimizer, num_steps, last_epoch):
raise NotImplementedError()
def _per_or_int_to_int(x, total):
if isinstance(x, int):
return x
return round(x*total)
class DelayedWarmupSchedule:
def __init__(
self,
optimizer,
warmup: Union[int, float],
total: int,
decay="linear",
):
self.warmup = 0 if warmup is None else warmup
self.total = total
self.optimizer = optimizer
self.decay = decay
self._step = 0
for group in optimizer.param_groups:
group["initial_lr"] = group["lr"]
def state_dict(self):
return dict(step=self._step)
def load_state_dict(self, state):
self._step = state["step"]
def step(self):
self._step += 1
for group in self.optimizer.param_groups:
wu = group.get("warmup", self.warmup)
wu = _per_or_int_to_int(wu, self.total)
if self._step < wu:
factor = self._step / wu
else:
decay = group.get("decay", self.decay)
if decay == "linear":
factor = (self.total - self._step) / (self.total - wu)
elif decay is None or decay == "none":
factor = 1.0
else:
raise NotImplementedError()
group["lr"] = group["initial_lr"] * factor
@dataclass
@TrainingScheduleBuilder.register("delayed-warmup-linear")
class DelayedWarmupScheduleBuilder(TrainingScheduleBuilder):
warmup: Union[int, float, None] = 0
decay: Optional[str] = "linear"
def build(self, optimizer, num_steps, last_epoch):
return DelayedWarmupSchedule(optimizer, self.warmup, num_steps, self.decay)
@OptimizerBuilder.register("adam-w")
@dataclass
class AdamWBuilder(OptimizerBuilder):
lr: float
weight_decay: float = 0.0
betas: Tuple[float, float] = (0.9, 0.999)
def build(self, model: torch.nn.Module, epoch_size, n_epochs):
return AdamW(model.parameters(), lr=self.lr, weight_decay=self.weight_decay, betas=self.betas)
| close-main | close/train/optimizer.py |
import json
import logging
import os
import socket
from datetime import datetime
from os import makedirs
from os.path import join, exists
from time import perf_counter
from typing import List, Optional, Dict, Union
import numpy as np
import torch
from allennlp.common import FromParams, Params
from dataclasses import dataclass
from random import shuffle
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm
from close.data.dataset import Dataset
from close.model.model import Model, BEST_STATE_NAME, BeamSearchSpec
from close.train.evaluator import Evaluator, ResultKey
from close.train.optimizer import OptimizerBuilder, TrainingScheduleBuilder
from close.train.runner import CollateWithBatch
from close.utils import py_utils, pytorch_utils
from close.utils.py_utils import dump_json_object
from close.utils.to_params import to_params
def select_subdir(output_dir, target=None):
prefix = "" if target is None else target + "-"
i = 0
while True:
candidate = join(output_dir, prefix + "r" + str(i))
if not exists(candidate):
try:
os.mkdir(candidate)
return candidate
except FileExistsError:
pass
i += 1
@dataclass
class TrainerSimple(FromParams):
"""Class to run the training loop for our models"""
train_dataset: Dataset
"""Datast to train on"""
optimizer: OptimizerBuilder
"""Optimizer to use"""
epochs: int
"""Number of epochs to train on"""
batch_size: int
"""Batch size to train with"""
# Evaluation parameter
eval_dataset: Dataset = None
"""Evaluation dataset to evaluate on every epoch"""
evaluator: Evaluator = None
"""Evaluator to use for evaluations"""
prediction_args: Dict[str, Union[int, float, str, BeamSearchSpec]]=None
"""Test-time args (e.g., beam size) to use during evaluation"""
# Dataloader parameters
num_workers: int = 4
pin_memory: bool = True
# Additional optimization settings
scheduler: TrainingScheduleBuilder = None
"""Supports a learning rate shedule to adjust the learning rate each step"""
clip_grad_norm: Optional[float] = None
"""Do gradient norm clipping"""
# Saving the results
save_evaluation_results: bool = True
"""Should we save the evaluation results in json files"""
save_each_epoch: Union[int, List[int]] = True
"""Should we save the model each epoch"""
best_model_key: ResultKey = None
"""Keep track of the best model weights using this metrics from `self.evaluator`"""
# Cosmetic/Logging
tb_log: bool = True
"""Should we log to tensorboard"""
tb_log_intervals: int = 20
"""How often to log per-train-step metrics to tensorboard"""
log_lr: bool = True
"""Should the learning rate be logged to tensorboard"""
loss_logging_ema: float = 0.99
"""Decay factor for exponential moving average of the loss"""
monitor_ema: float = 0.99
"""Decay factor for exponential moving average of other metrics"""
eval_pbar: bool = True
"""Show a progress bar when evaluating"""
epoch_pbar: bool = True
"""Show a progress bar when training"""
def train(self, model: Model, output_dir: Optional[str],
device: Optional[int]=None, override=False):
"""Train a model
:param model: Model to train
:param output_dir: directory to save reults
:param device: GPU device to train on
:param override: Override `output_dir` if it exists
"""
if output_dir is not None:
logging.info(f"Initializing model dir {output_dir}")
py_utils.clear_if_nonempty(output_dir, override)
makedirs(output_dir, exist_ok=True)
Params(to_params(self)).to_file(join(output_dir, "trainer.json"))
Params(to_params(model, Model)).to_file(join(output_dir, "model.json"))
else:
logging.info(f"No output dir, model will not be saved")
return self._train(model, output_dir, device)
@staticmethod
def train_another_model(output_dir: str, device: Optional[int]=None, save=True):
"""Train another run of the model stored in `output_dir`
:param output_dir:
:param device: Devices to train on
:param save: Save the new run in `output_dir`, otherwise do not save anything
"""
logging.info(f"Starting another run for {output_dir}")
logging.info("Getting trainer/model")
with py_utils.DisableLogging():
trainer = TrainerSimple.from_params(Params.from_file(join(output_dir, "trainer.json")))
model = Model.from_params(Params.from_file(join(output_dir, "model.json")))
if not save:
logging.info("Save is false, so no results will be recorded")
output_dir = None
return trainer._train(model, output_dir, device)
def _train(self, model: Union[str, Model], output_dir, device):
"""Train with the output dir already initialized, and possibly a checkpoint file"""
if device is None:
device = pytorch_utils.get_device()
if output_dir is not None:
# Build the dir to save our configuration, log the metrics, and save the model state
logging.info("Initializing run dir")
run_dir = select_subdir(output_dir)
with open(join(run_dir, "runtime.json"), "w") as f:
json.dump(dict(
hostname=socket.gethostname(),
date=datetime.now().strftime("%m%d-%H%M%S"),
device=str(device)
), f, indent=2)
dump_json_object(dict(done=False), join(run_dir, "status.json"))
log_file = join(run_dir, "out.log")
record_log_handle = logging.FileHandler(log_file)
logging.getLogger().addHandler(record_log_handle)
else:
# Not saving anything to disk
run_dir = None
MAX_TRAIN_EXAMPLES = 200_000
MAX_EVAL_EXAMPLES = 20_000
device = pytorch_utils.get_device()
logging.info(f"Initializing model on {device}")
model.initialize()
model.to(device)
if self.prediction_args is not None:
model.set_prediction_args(**self.prediction_args)
logging.info("Loading training data")
training_examples = self.train_dataset.load()
training_examples = py_utils.flatten_list(model.preprocess_example_train(x) for x in training_examples)
if self.eval_dataset is not None:
assert self.evaluator is not None
logging.info("Loading eval data")
eval_examples = self.eval_dataset.load()
logging.info("Preparing optimizers")
optimizer = self.optimizer.build(model, min(len(training_examples), MAX_TRAIN_EXAMPLES), self.epochs)
if self.scheduler is not None:
schedule = self.scheduler.build(optimizer, min(len(training_examples), MAX_TRAIN_EXAMPLES)*self.epochs, 0)
else:
schedule = None
# Other stuff we need to track during training for logging metrics
if run_dir and self.tb_log:
summary_writer = SummaryWriter(join(run_dir, "log"))
else:
summary_writer = None
best_saved_score = None
monitor_ema = {}
loss_ema = 0
global_step = 0
n_train = sum(p.requires_grad for p in model.parameters())
n_freeze = sum(not p.requires_grad for p in model.parameters())
logging.info(f"Have {n_train} params and {n_freeze} frozen parameters")
logging.info(f"Start training")
for epoch in range(0, self.epochs):
ep_start = perf_counter()
model.train()
# tqdm to get a progress bar for the DataLoader
shuffle(training_examples)
train_loader = DataLoader(
training_examples[:MAX_TRAIN_EXAMPLES], self.batch_size,
shuffle=True, num_workers=self.num_workers,
collate_fn=model.get_collate(True),
pin_memory=self.pin_memory,
)
pbar = tqdm(train_loader, disable=not self.epoch_pbar, ncols=100,
desc="loss=", total=len(train_loader))
for batch in pbar:
batch = pytorch_utils.to_device(batch, device)
loss, monitor = model(**batch)
loss.backward()
loss = loss.item()
if self.clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.clip_grad_norm)
optimizer.step()
if not np.isfinite(loss):
raise ValueError(f"non-finite foss {loss}")
# Manually remove gradients, slightly faster then `optimizer.zero_grad`
for group in optimizer.param_groups:
for p in group['params']:
p.grad = None
global_step += 1
if schedule is not None:
# Have to call step manually
schedule.step()
for metric_name, metric_value in monitor.items():
# Compute the exponential moving average for the metrics
if metric_name not in monitor_ema:
monitor_ema[metric_name] = metric_value
to_show = metric_value
else:
cur = monitor_ema[metric_name]
ema = cur * self.monitor_ema + metric_value * (1 - self.monitor_ema)
monitor_ema[metric_name] = ema
to_show = (ema / (1 - self.monitor_ema ** global_step))
# Write to tensorboard
if summary_writer is not None and global_step % self.tb_log_intervals == 0:
summary_writer.add_scalar(f"train/{metric_name}", to_show, global_step)
# Compute the exponential moving average of the loss
loss_ema = loss_ema * self.loss_logging_ema + loss * (1 - self.loss_logging_ema)
corrected_loss_ema = (loss_ema / (1 - self.loss_logging_ema ** global_step))
# Set it to the pbar description
pbar.set_description("loss=%.4f" % corrected_loss_ema, refresh=False)
if summary_writer is not None and global_step % self.tb_log_intervals == 0:
# Write the loss to tensorboard
summary_writer.add_scalar("train/loss-smoothed", corrected_loss_ema, global_step)
summary_writer.add_scalar("train/loss", loss, global_step)
if self.log_lr:
# Write the loss to tensorboard, useful to check what the learnign shedule is doing
for j, group in enumerate(optimizer.param_groups):
name = group.get("name", f"group_{j}")
summary_writer.add_scalar(f'lr/{name}', group["lr"], global_step)
ep_end = perf_counter()
logging.info(f"Epoch {epoch + 1} took {py_utils.duration_to_str(ep_end - ep_start)}, starting evaluation")
eval_start = perf_counter()
model.eval()
predictions = {}
shuffle(eval_examples)
eval_loader = DataLoader(
[model.preprocess_example(x) for x in eval_examples[:MAX_EVAL_EXAMPLES]],
batch_size=self.batch_size,
collate_fn=CollateWithBatch(model.get_collate()),
num_workers=self.num_workers,
shuffle=False,
pin_memory=self.pin_memory
)
it = tqdm(eval_loader, desc="eval", ncols=100, disable=not self.eval_pbar)
for examples, batch in it:
batch = pytorch_utils.to_device(batch, device)
with torch.no_grad():
output = model.predict(**batch)
for ex, out in zip(examples, output):
predictions[ex.get_example_id()] = out
results = self.evaluator.evaluate(eval_examples[:MAX_EVAL_EXAMPLES], predictions)
eval_end = perf_counter()
logging.info(f"Evaluation {epoch + 1} took {py_utils.duration_to_str(eval_end - eval_start)}")
for k, v in results.items():
if isinstance(v, float):
v = "%0.4f" % v
logging.info(f"{k}={v}")
if summary_writer:
# Log evaluation result o tensorboard
summary_writer.add_scalar("time/train", ep_end-ep_start, epoch+1)
summary_writer.add_scalar("time/eval", eval_end - eval_start, epoch + 1)
for key, val in results.items():
summary_writer.add_scalar(str(key), val, global_step)
if self.best_model_key:
# Check is this is the best model so far according to `self.best_model_key`,
# if so save as our best set of weights
score = results[self.best_model_key]
if best_saved_score is None or best_saved_score < score:
prefix = "Saving as" if run_dir else "Found"
if best_saved_score is None:
logging.info(f"{prefix} best model ({score:.5f}) ep={epoch+1}")
else:
logging.info(f"{prefix} best model ({score:.5f} > {best_saved_score:.5f}) ep={epoch+1}")
best_saved_score = score
if run_dir:
best_model_file = join(run_dir, BEST_STATE_NAME)
torch.save(model.state_dict(), best_model_file)
if run_dir is not None:
if self.save_each_epoch and (
(isinstance(self.save_each_epoch, list) and (epoch+1) in self.save_each_epoch) or
(isinstance(self.save_each_epoch, int) and (epoch+1) % self.save_each_epoch == 0)
):
state_file = join(run_dir, f"state-ep{epoch+1}.pth")
logging.info(f"Saving state as {state_file}")
torch.save(model.state_dict(), state_file)
| close-main | close/train/trainer.py |
import re
from collections import defaultdict, Counter
from numbers import Number
from typing import Optional, List, Dict, Any
import numpy as np
from allennlp.common import FromParams, Registrable, Params
from dataclasses import dataclass, replace
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.rouge.rouge import Rouge
from close.data.coco_captioning import CaptioningExample
from close.data.visual_news import VisualNewsExample
from close.data.vqa_v2 import VqaExample
from close.eval.vqa_eval import vqa_preprocess
from close.model.model import ExampleOutput
from close.utils import py_utils
from close.utils.quiet_ptbtokenizer import QuitePTBTokenizer
class SubmissionFileBuilder(Registrable):
def build(self, dataset, predictions, output_file):
raise NotImplementedError()
@dataclass(frozen=True)
class ResultKey(FromParams):
"""Key for a result from a model"""
metric_name: str
subset_name: Optional[str] = None
dataset_name: Optional[str] = None
def __str__(self):
out = [self.dataset_name, self.subset_name, self.metric_name]
return "/".join(x for x in out if x is not None)
def __repr__(self):
return str(self)
class Evaluator(Registrable):
"""Computes evaluations metrics"""
def evaluate(
self, examples: List, predictions: Dict[str, Any],
allow_partial=False, subset_mapping=None
) -> Dict[ResultKey, Number]:
"""Computes corpus wide metrics
:param examples: List of source examples
:param predictions: example key -> model output
:param allow_partial: Allow the predictions to only cover a subset of `examples`,
in which only those predictions should be evaluated
:param subset_mapping: Function that maps example -> list of strings, names of the subsets that
example is part of
"""
raise NotImplementedError()
class PerExampleEvaluator(Evaluator):
"""Computes per-examples evaluations metrics"""
def evaluate_examples(self, examples: List, predictions: Dict[str, Any])-> List[Dict[str, Number]]:
raise NotImplementedError()
def evaluate(
self,
examples: List,
predictions: Dict[str, Any],
allow_partial=False,
mean=True,
subset_mapping=None
) -> Dict[ResultKey, Number]:
examples_with_predictions = [x for x in examples if x.get_example_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
per_example_scores = self.evaluate_examples(examples, predictions)
per_metric_scores = py_utils.transpose_list_of_dicts(per_example_scores)
subsets = defaultdict(list)
all_ids = [x.get_example_id() for x in examples]
id_to_ix = {k: i for i, k in enumerate(all_ids)}
subsets[None] = list(range(len(all_ids)))
if subset_mapping is not None:
for example in examples:
example_id = id_to_ix[example.get_example_id()]
for subset in subset_mapping(example):
subsets[subset].append(example_id)
out = {}
for metric_name, score in per_metric_scores.items():
score = np.array(score)
for subset_name, ixs in subsets.items():
if mean:
out[ResultKey(metric_name, subset_name)] = float(np.mean(score[ixs]))
else:
out[ResultKey(metric_name, subset_name)] = (float(np.sum(score[ixs])), len(ixs))
return out
@Evaluator.register("cap-evaluator")
class CaptionEvaluator(Evaluator):
@classmethod
def from_params(
cls, params: Params, constructor_to_call=None,
constructor_to_inspect=None, **extras
):
if "per_caption" in params:
del params["per_caption"]
return super().from_params(params, constructor_to_call, constructor_to_inspect, **extras)
def __init__(self, cider=True, bleu=4):
self.cider = cider
self.bleu = bleu
scorers = {}
if cider:
# from exp.ours.eval.fast_cider import FastCider
scorers["cider"] = Cider()
if bleu:
scorers["bleu"] = Bleu(bleu)
self.scorers = scorers
self.tokenizer = QuitePTBTokenizer()
def evaluate(
self,
examples: List,
predictions: Dict[str, Any],
allow_partial=False,
subset_mapping=None,
):
examples_with_predictions = [x for x in examples if x.get_example_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
subsets = defaultdict(list)
subsets[None] = examples
if subset_mapping is not None:
for example in examples:
example_subsets = subset_mapping(example)
for subset in example_subsets:
subsets[subset].append(example)
out = {}
for subset_name, examples in subsets.items():
all_scores = self._get_scores(examples, predictions)
results = {}
for name, scorer in self.scorers.items():
corpus_scores, _ = all_scores[name]
if isinstance(scorer, Cider):
results["cider"] = corpus_scores
elif isinstance(scorer, Bleu):
scores, _ = all_scores[name]
for i, score in enumerate(corpus_scores):
results[f"bleu{i+1}"] = score
if subset_name is not None:
results["n"] = len(examples)
out.update({ResultKey(metric_name=k, subset_name=subset_name): v for k, v in results.items()})
return out
def evaluate_examples(self, examples: List[CaptioningExample], predictions: Dict[str, Any]):
all_scores = self._get_scores(examples, predictions)
per_examples_scores = [{} for _ in examples]
for name, scorer in self.scorers.items():
score, scores = all_scores[name]
if isinstance(scorer, Cider):
for score, ex_scores in zip(scores, per_examples_scores):
ex_scores["cider"] = score
elif isinstance(scorer, Bleu):
scores = py_utils.transpose_lists(scores)
for score, ex_scores in zip(scores, per_examples_scores):
for i, s in enumerate(score):
ex_scores[f"bleu{i+1}"] = s
return per_examples_scores
def _get_scores(self, examples: List[CaptioningExample], predictions: Dict[str, Any]):
gts = {}
res = {}
for ix, instance in enumerate(examples):
key = instance.get_example_id()
assert key not in res
res[key] = [predictions[instance.get_example_id()].text[0]]
gts[key] = [x.lower() for x in instance.captions]
res = self.tokenizer.tokenize(res)
gts = self.tokenizer.tokenize(gts)
scores = {}
for name, scorer in self.scorers.items():
if isinstance(scorer, Bleu):
scores[name] = scorer.compute_score(gts, res, verbose=0)
else:
scores[name] = scorer.compute_score(gts, res)
return scores
def vqa_score(answer, ground_truth_answer_counts):
normlized_answers = Counter()
for k, v in ground_truth_answer_counts.items():
normlized_answers[vqa_preprocess(k)] = v
return min(normlized_answers.get(vqa_preprocess(answer), 0) / 3, 1)
@Evaluator.register("vqa-evaluator")
class VqaEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[VqaExample],
predictions: Dict[str, ExampleOutput], add_scores=False):
out = []
for example in examples:
answer = predictions[example.example_id].text[0]
score = vqa_score(answer, example.answers)
out.append(dict(score=score))
return out
@Evaluator.register("vis-news-evaluator")
class VisualNewsEvaluator(Evaluator):
@classmethod
def from_params(
cls, params: Params, constructor_to_call=None,
constructor_to_inspect=None, **extras
):
return super().from_params(params, constructor_to_call, constructor_to_inspect, **extras)
def __init__(self, cider=True, meteor=True, rouge=True, bleu=4):
self.cider = cider
self.meteor = meteor
self.rouge = rouge
self.bleu = bleu
scorers = {}
if cider:
scorers["cider"] = Cider()
if meteor:
scorers["meteor"] = Meteor()
if rouge:
scorers["rouge"] = Rouge()
if bleu:
scorers["bleu"] = Bleu(bleu)
self.scorers = scorers
self.tokenizer = QuitePTBTokenizer()
def evaluate(
self,
examples: List,
predictions: Dict[str, Any],
allow_partial=False,
subset_mapping=None,
):
examples_with_predictions = [x for x in examples if x.get_example_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
subsets = defaultdict(list)
subsets[None] = examples
if subset_mapping is not None:
for example in examples:
example_subsets = subset_mapping(example)
for subset in example_subsets:
subsets[subset].append(example)
out = {}
for subset_name, examples in subsets.items():
all_scores = self._get_scores(examples, predictions)
results = {}
for name, scorer in self.scorers.items():
corpus_scores, _ = all_scores[name]
if isinstance(scorer, Cider):
results["cider"] = corpus_scores
elif isinstance(scorer, Meteor):
results["meteor"] = corpus_scores
elif isinstance(scorer, Rouge):
results["rouge"] = corpus_scores
elif isinstance(scorer, Bleu):
scores, _ = all_scores[name]
for i, score in enumerate(corpus_scores):
results[f"bleu{i+1}"] = score
if subset_name is not None:
results["n"] = len(examples)
out.update({ResultKey(metric_name=k, subset_name=subset_name): v for k, v in results.items()})
return out
def evaluate_examples(self, examples: List[VisualNewsExample], predictions: Dict[str, Any]):
all_scores = self._get_scores(examples, predictions)
per_examples_scores = [{} for _ in examples]
for name, scorer in self.scorers.items():
score, scores = all_scores[name]
if isinstance(scorer, Cider):
for score, ex_scores in zip(scores, per_examples_scores):
ex_scores["cider"] = score
elif isinstance(scorer, Bleu):
scores = py_utils.transpose_lists(scores)
for score, ex_scores in zip(scores, per_examples_scores):
for i, s in enumerate(score):
ex_scores[f"bleu{i+1}"] = s
return per_examples_scores
def _get_scores(self, examples: List[VisualNewsExample], predictions: Dict[str, Any]):
MAX_LOG_EXAMPLES = 0 # adjust this to list more examples
MAX_ARTICLE_LEN = 1_800
gts = {}
res = {}
for ix, instance in enumerate(examples):
key = instance.get_example_id()
assert key not in res
res[key] = [predictions[instance.get_example_id()].text[0]]
gts[key] = [instance.caption.lower()]
if ix < MAX_LOG_EXAMPLES:
print(f'example id: {instance.example_id}')
print(f'image id: {instance.image_id}')
print(f'news article: {instance.article[:MAX_ARTICLE_LEN]}\n')
print(f'target caption: {gts[key][0]}')
print(f'predicted caption: {res[key][0]}\n')
res = self.tokenizer.tokenize(res)
gts = self.tokenizer.tokenize(gts)
scores = {}
for name, scorer in self.scorers.items():
if isinstance(scorer, Bleu):
scores[name] = scorer.compute_score(gts, res, verbose=0)
else:
scores[name] = scorer.compute_score(gts, res)
return scores
@Evaluator.register("entailment-evaluator")
class EntailmentEvaluator(Evaluator):
def evaluate(
self,
examples: List,
predictions: Dict[str, Any],
allow_partial=False,
subset_mapping=None,
):
pass | close-main | close/train/evaluator.py |
"""Provides functions to use T5 in allennlp's BeamSearch
We use this instead of transformer's beam search mostly for legacy reasons since that is
what the GPV-2 models used
"""
import torch
from torch.nn import functional as F
from close.utils import py_utils
def t5_initialize_decoding(tokenizer, model, encoder_out, encoder_mask, post_process=None):
batch_size = encoder_out.size(0)
device = encoder_out.device
initial_state = dict(
encoder_mask=encoder_mask,
encoder_outputs=encoder_out
)
def _decode_step(predictions, prev_state, time_step):
return _t5_decoding_step(model, predictions, prev_state, post_process, time_step)
initial_out = torch.full(
(batch_size,), tokenizer.pad_token_id, dtype=torch.long, device=device)
return initial_out, initial_state, _decode_step
def _t5_decoding_step(model, predictions, state, post_process, time_step):
past = py_utils.flat_to_nested_struct({k: v.contiguous() for k, v in state.items()
if isinstance(k, tuple)})
model_inputs = model.prepare_inputs_for_generation(
predictions.unsqueeze(1),
past=past, attention_mask=state["encoder_mask"],
encoder_outputs=(state["encoder_outputs"],),
use_cache=True)
out = model(**model_inputs, return_dict=True)
logits = out.logits
logits = logits.squeeze(1)
logits = F.log_softmax(logits, -1)
if post_process is not None:
logits = post_process(logits, model_inputs, time_step)
next_state = dict(
encoder_mask=state["encoder_mask"],
encoder_outputs=state["encoder_outputs"],
)
py_utils.nested_struct_to_flat(out.past_key_values, cur_dict=next_state)
return logits, next_state
| close-main | close/train/allennlp_beamsearch.py |
import json
from datetime import datetime
from os.path import isdir
from typing import Dict, Any, Union
from close.model.model import ExampleOutput
from close.train.evaluator import Evaluator
from close.utils import py_utils
from close.utils.py_utils import load_json_object, dump_json_object
from close.utils.to_params import to_params
def save_evaluation(prefix_or_dir: str, evaluator: Evaluator, stats: Dict[str, Any]):
"""Save the results in `prefix_or_dir`
:param prefix_or_dir: Where to save the results
:param evaluator: Evaluator used, save for book-keeping purposes
:param stats: The states to save
"""
if isdir(prefix_or_dir) and not prefix_or_dir.endswith("/"):
prefix_or_dir += "/"
cache_file = prefix_or_dir + "eval.json"
to_save = {("all" if k.subset_name is None else k.subset_name) + "/" + k.metric_name: v
for k, v in stats.items()}
to_save = dict(
stats=to_save,
evaluator=to_params(evaluator, Evaluator),
date=datetime.now().strftime("%m%d-%H%M%S"),
version=6,
)
dump_json_object(to_save, cache_file)
def save_predictions(predictions: Dict[str, Union[Dict, ExampleOutput]], output_dir):
pred_dict = {}
for key, pred in predictions.items():
if isinstance(pred, ExampleOutput):
pred_dict[key] = dict(
text=pred.text,
text_logprobs=pred.text_logprobs
)
else:
pred_dict[key] = pred
with open(output_dir + "/predictions.json", "w") as f:
json.dump(pred_dict, f, cls=py_utils.NumpyArrayEncoder)
def load_predictions(file: str) -> Dict[str, ExampleOutput]:
pred = load_json_object(file)
return {k: ExampleOutput(v["text"], v["text_logprobs"]) for k, v in pred.items()}
| close-main | close/eval/evaluation.py |
import argparse
import json
import logging
import os
from typing import Union
import numpy as np
from l2v.data.coco_captioning import CocoCaptioning, CocoSCE
from l2v.data.dataset import Dataset
from l2v.data.visual_news import VisualNews
from l2v.data.vqa_e import EVQA
from l2v.data.vqa_v2 import Vqa2
from l2v.eval.evaluation import save_predictions, save_evaluation
from l2v.model.model import BeamSearchSpec
from l2v.train.evaluator import CaptionEvaluator, Evaluator, ResultKey, VqaEvaluator, VisualNewsEvaluator
from l2v.train.runner import prediction_args_to_json, run
from l2v.utils import py_utils, pytorch_utils
from l2v.utils.to_params import to_params
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from datetime import datetime
from os.path import join, exists, dirname
from shutil import rmtree
def get_default_seq_len(ds: Dataset) -> int:
if isinstance(ds, (CocoCaptioning, CocoSCE)):
return 30
if isinstance(ds, (Vqa2, EVQA)):
return 24
if isinstance(ds, (VisualNews)):
return 30
else:
raise NotImplementedError(f"No default lengths set for dataset {ds}")
def get_evaluator(ds: Dataset) -> Union[Evaluator, None]:
if isinstance(ds, (CocoCaptioning, CocoSCE)):
return CaptionEvaluator()
if isinstance(ds, (Vqa2, EVQA)):
return VqaEvaluator()
if isinstance(ds, (VisualNews)):
return VisualNewsEvaluator()
else:
raise ValueError()
def eval_on(args, run_dir, dataset, evaluator, prediction_args, devices, skip_existing=True):
if args.output_dir:
output_dir = args.output_dir
elif args.output_name is not None:
if args.output_name == "":
name = f"{dataset.get_name()}"
else:
name = f"{dataset.get_name()}--{args.output_name}"
eval_dir = join(run_dir, "eval")
if not exists(eval_dir):
os.mkdir(eval_dir)
output_dir = join(eval_dir, name)
else:
output_dir = None
if output_dir is not None:
if exists(output_dir):
if len(os.listdir(output_dir)) > 0:
if skip_existing:
logging.info(f"{output_dir} already exists, skipping")
return
if args.override or py_utils.get_yes_no(f"{output_dir} exists, delete (y/n)?"):
logging.info(f"Deleting {output_dir}")
rmtree(output_dir)
else:
logging.info("No override, not stopping")
return
elif not exists(dirname(output_dir)):
raise ValueError(f"Parent folder {dirname(output_dir)} does not exist")
else:
logging.info(f"Will save to {output_dir}")
else:
logging.info(f"Not saving the output")
if output_dir:
if not exists(output_dir):
os.mkdir(output_dir)
logging.info(f"Saving output to {output_dir}")
logging.info("Setting up...")
examples = dataset.load()
if args.dry_run:
logging.info("Skipping running the model since this is a dry run")
return
beams_to_keep = vars(args).get("beams_to_keep")
batch_size = args.batch_size
output = run(
run_dir, examples, devices, batch_size, args.num_workers,
prediction_args, beams_to_keep=beams_to_keep)
if output_dir is not None:
logging.info(f"Saving output to {output_dir}")
save_predictions(output, output_dir)
config = dict(
batch_size=batch_size,
num_workers=args.num_workers,
predictions_args=prediction_args_to_json(prediction_args),
dataset=to_params(dataset, Dataset),
beams_to_keep=beams_to_keep,
date=datetime.now().strftime("%m%d-%H%M%S"),
)
with open(output_dir + "/config.json", "w") as f:
json.dump(config, f, indent=2)
logging.info("Evaluating...")
if isinstance(evaluator, Evaluator):
results = evaluator.evaluate(examples, output, allow_partial=True, subset_mapping=None)
k = [k for k in results if k.metric_name == "n"]
if len(k) == 1:
del results[k[0]]
if output_dir is not None:
results[ResultKey("n", None)] = len(output)
logging.info(f"Caching evaluation to {output_dir}")
save_evaluation(output_dir, evaluator, results)
results = {str(k): v for k, v in results.items()}
print(json.dumps(results, indent=2))
elif evaluator is None:
logging.info(f"No evaluator for this data")
elif output_dir is not None:
submission_file = join(output_dir, "submission.json")
logging.info(f"Building submission file {submission_file}")
evaluator.build(dataset, output, submission_file)
def eval_generative_model(args, run_dir, dataset, devices, skip_existing=True):
prediction_args = {}
arg_dict = vars(args)
prediction_args["test_on_l"] = arg_dict.get("test_on_l", False)
if "image_cache" in arg_dict:
prediction_args["image_cache"] = arg_dict["image_cache"]
if arg_dict.get("max_seq_len"):
max_seq_len = arg_dict["max_seq_len"]
else:
max_seq_len = get_default_seq_len(dataset)
logging.info(f"Defaulting to max_seq_len {max_seq_len} for dataset {dataset.get_name()}")
if max_seq_len is not None:
bs = BeamSearchSpec(beam_size=args.beam_size, max_seq_len=max_seq_len)
else:
bs = None
prediction_args["beam_search_spec"] = bs
evaluator = get_evaluator(dataset)
eval_on(args, run_dir, dataset, evaluator, prediction_args, devices, skip_existing)
def main():
parser = argparse.ArgumentParser(description="Compute predictions for a GPV model")
parser.add_argument("model")
parser.add_argument("--data", default=["coco"], nargs="+")
parser.add_argument("--image_cache", default=None)
parser.add_argument("--device", nargs="+", default=[None], help="GPU devices to use")
parser.add_argument("--batch_size", type=int, default=30)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--sample", type=int, default=None)
parser.add_argument("--beams_to_keep", type=int, default=5, help="Number of predictions to save")
parser.add_argument("--max_seq_len", type=int, default=None)
parser.add_argument("--beam_size", type=int, default=5)
parser.add_argument("--test_on_l", action="store_true")
parser.add_argument("--noeval", action="store_true", help="Evaluate the results")
parser.add_argument("--override", action="store_true", help="Delete output dir if it exists")
parser.add_argument("--output_dir", help="Save to this directory")
parser.add_argument("--output_name",
help="Save results in model/run/eval/{dataset_name}--{output_name}")
parser.add_argument("--dry_run", action="store_true")
args = parser.parse_args()
py_utils.add_stdout_logger()
if args.output_dir and args.output_name:
raise ValueError("Cannot specify output_name and output_dir")
models = py_utils.find_models(args.model, require_done=False)
if len(models) == 0:
logging.info("No models selected")
return
datasets = []
for ds in args.data:
if ds == "coco":
datasets.append(CocoCaptioning("val", sample=args.sample))
elif ds == "evqa":
datasets.append(EVQA("val", sample=args.sample))
elif ds == "vis-news":
datasets.append(VisualNews("test", sample=args.sample))
else:
raise RuntimeError(ds)
devices = pytorch_utils.get_devices(args.device)
if args.output_dir:
models = py_utils.flatten_list(x[1] for x in models.values())
if len(models) > 1:
raise ValueError("Cannot use one output dir if more than one model selected!")
model = models[0]
if len(datasets) > 1:
raise ValueError("Cannot use one output dir if more than one dataset is selected!")
if len(datasets) == 0:
raise ValueError("No datasets is selected!")
eval_generative_model(args, model, datasets[0], devices, skip_existing=False)
else:
targets = []
for model_name, (model_dir, runs) in models.items():
for ds in datasets:
for run_dir in runs:
targets.append((run_dir, ds))
if len(targets) == 0:
raise ValueError("No datasets to evaluate on found!")
for i, (run_dir, dataset) in enumerate(targets):
if len(targets) > 1:
logging.info(f"Evaluating on {run_dir} {dataset.get_name()} ({i+1}/{len(targets)})")
else:
logging.info(f"Evaluating on {run_dir} {dataset.get_name()}")
eval_generative_model(args, run_dir, dataset, devices, skip_existing=len(targets) > 1)
if __name__ == '__main__':
main()
| close-main | close/eval/compute_predictions.py |
import re
"""VQA evaluation copied from the offical VQA 2.0 eval script"""
contractions = {
"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", \
"couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", \
"hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", \
"he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", \
"Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", \
"maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", \
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", \
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", \
"she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", \
"somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", \
"somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", \
"someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", \
"something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", \
"there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", \
"they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", \
"wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", \
"whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", \
"whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", \
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", \
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", \
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", \
"youll": "you'll", "youre": "you're", "youve": "you've"}
manualMap = {
'none': '0',
'zero': '0',
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10'}
articles = ['a','an','the']
punct = [
';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
commaStrip = re.compile("(\d)(\,)(\d)")
def processPunctuation(inText):
outText = inText
for p in punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = periodStrip.sub("",outText,re.UNICODE)
return outText
def processDigitArticle(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manualMap.setdefault(word, word)
if word not in articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in contractions:
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText
def vqa_preprocess(ans, cache={}):
if ans in cache:
return cache[ans]
original = ans
ans = ans.replace('\n', ' ')
ans = ans.replace('\t',' ')
ans = ans.lower().strip()
ans = processDigitArticle(processPunctuation(ans))
cache[original] = ans
return ans
| close-main | close/eval/vqa_eval.py |
from dataclasses import dataclass, replace
from os.path import join
from typing import List, Optional, Union
import numpy as np
from collections import Counter
from close import file_paths
from close.data.coco_captioning import CocoCaptioning2014
from close.data.dataset import Dataset
from close.utils import image_utils, py_utils
from close.utils.py_utils import int_to_str, load_json_object
ANNOTATION_FILE_MAP = {
"train": [
"v2_OpenEnded_mscoco_train2014_questions.json",
"v2_mscoco_train2014_annotations.json"
],
"val": [
"v2_OpenEnded_mscoco_val2014_questions.json",
"v2_mscoco_val2014_annotations.json"
],
"test": [
"v2_OpenEnded_mscoco_test2015_questions.json",
None
]
}
@dataclass
class VqaExample:
example_id: str
question: str
image_id: str
question_type: str
answers: Counter
image_text: Union[None, str, List[str]] = None
multiple_choice_answer: str = None
answer_type: Optional[str] = None
def get_example_id(self):
return self.example_id
def _load(q_file, a_file, sample, subset) -> List[VqaExample]:
q_data = load_json_object(q_file)["questions"]
if sample:
q_data = sorted(q_data, key=lambda x: x["question_id"])
q_data = np.random.RandomState(613423).choice(q_data, sample, replace=False)
if a_file:
a_data = load_json_object(a_file)
anno_map = {}
for anno in a_data["annotations"]:
anno_map[anno["question_id"]] = anno
else:
anno_map = None
out = []
for q in q_data:
anno = None if anno_map is None else anno_map[q["question_id"]]
image_id = image_utils.get_coco_image_id(subset, q["image_id"])
out.append(VqaExample(
q["question_id"], q["question"], image_id,
question_type=None if anno is None else anno["question_type"],
answers=None if anno is None else Counter(x["answer"] for x in anno["answers"]),
image_text=None
))
return out
@Dataset.register("vqa-v2")
class Vqa2(Dataset):
def __init__(self, split, sample=None):
if split not in ANNOTATION_FILE_MAP:
raise ValueError()
self.split = split
self.sample = sample
def get_name(self) -> str:
name = f"vqa-{self.split}"
if self.sample is not None:
name += f"-s{int_to_str(self.sample)}"
return name
def load(self) -> List[VqaExample]:
q_file, a_file = ANNOTATION_FILE_MAP[self.split]
q_file = join(file_paths.VQA_ANNOTATIONS, q_file)
a_file = None if a_file is None else join(file_paths.VQA_ANNOTATIONS, a_file)
if self.split == "test":
subset = "test2015"
else:
subset = self.split + "2014"
return _load(q_file, a_file, self.sample, subset)
@Dataset.register("paired-vqa")
class VqaWithCaptions(Dataset):
def __init__(self, split, sample=None):
self.split = split
self.sample = sample
def get_name(self) -> str:
return "vqa2-cap"
def load(self) -> List:
if self.split == "trainval":
splits = ["train", "val"]
else:
splits = [self.split]
out = []
for split in splits:
vqa = Vqa2(split).load()
cap = CocoCaptioning2014(split).load()
image_id_to_cap = {x.image_id: x for x in cap}
for ex in vqa:
cap = image_id_to_cap[ex.image_id]
out.append(replace(ex, image_text=cap.captions))
return py_utils.subsample(out, self.sample, 281741)
| close-main | close/data/vqa_v2.py |
from dataclasses import dataclass
from os.path import isfile, join
from typing import List
import numpy as np
from close import file_paths
from close.data.dataset import Dataset
from close.utils.py_utils import int_to_str, load_json_object
@dataclass
class VisualNewsExample:
example_id: str
caption: str
image_id: str
article: str
def get_example_id(self):
return self.example_id
def _load(file, sample):
data = load_json_object(file)
if sample:
data = sorted(data, key=lambda x: x["id"])
if sample < 1:
sample = int(len(data) * sample)
data = np.random.RandomState(613423).choice(data, sample, replace=False)
out = []
for d in data:
a_path = join(file_paths.VISUAL_NEWS, d["article_path"][2:])
i_path = join(file_paths.VISUAL_NEWS, d["image_path"][2:])
if not isfile(a_path) or not isfile(i_path):
continue
with open(a_path, 'r') as f:
article = f.readlines()[0]
out.append(VisualNewsExample(
example_id=d["id"],
caption=d["caption"],
# NB: The split depends on the image file path. Mine looks like
# `/home/sophiag/data/visual_news/origin/bbc/images/0013/600.jpg`,
# and `split("/", 6)[-1]` gives me `bbc/images/0013/600.jpg`.
image_id=f'visual_news/{i_path.split("/", 6)[-1]}',
article=article
))
return out
@Dataset.register("visual-news")
class VisualNews(Dataset):
def __init__(self, split, sample=None):
if split not in {'train', 'val', 'test'}:
raise ValueError()
self.split = split
self.sample = sample
def get_name(self) -> str:
name = f"visual-news-{self.split}"
if self.sample is not None:
name += f"-s{int_to_str(self.sample)}"
return name
def load(self) -> List[VisualNewsExample]:
file = join(file_paths.VISUAL_NEWS, f'{self.split}_data.json')
return _load(file, self.sample)
if __name__ == '__main__':
print(len(VisualNews("val").load()))
| close-main | close/data/visual_news.py |
from typing import List
from allennlp.common import Registrable
class Dataset(Registrable):
"""Dataset we can train/evaluate on"""
def get_name(self) -> str:
"""Get the name of the dataset that uniquely identifies it"""
raise NotImplementedError()
def load(self) -> List:
"""Loads the examples"""
raise NotImplementedError()
| close-main | close/data/dataset.py |
import json
import logging
from collections import defaultdict
from dataclasses import dataclass
from os.path import join
from typing import Optional, Dict, Any, List
from close import file_paths
from close.data.dataset import Dataset
from close.utils import image_utils, py_utils
from close.utils.py_utils import int_to_str
@dataclass(frozen=True)
class CaptioningExample:
example_id: str
image_id: Optional[str]
captions: List[str]
meta: Optional[Dict[str, Any]] = None
@Dataset.register("coco-kp-cap")
class CocoCaptioningKP(Dataset):
def __init__(self, split, sample=None):
self.split = split
self.sample = sample
def get_name(self) -> str:
name = f"coco-cap-kp-{self.split}"
if self.sample is not None:
name += f"-s{int_to_str(self.sample)}"
return name
def load(self) -> List:
with open(join(file_paths.COCO_SOURCE, "dataset_coco.json")) as f:
data = json.load(f)["images"]
examples = [x for x in data if x["split"] == self.split]
out = []
for ex in examples:
image_id = join("coco", ex["filepath"], ex["filename"])
out.append(CaptioningExample(
f"coco-cap-{ex['imgid']}'", image_id,
[x["raw"] for x in ex["sentences"]],
))
return py_utils.subsample(out, self.sample, 96810)
ANNOTATION_FILE_MAP = {
"train": "captions_train2014.json",
"val": "captions_val2014.json",
"test": "captions_test2014.json"
}
def _load(caption_file, sample=None):
logging.info(f"Loading captioning data from {caption_file}")
with open(caption_file) as f:
data = json.load(f)
subset = caption_file.split("_")[1].split(".")[0]
assert subset in {"train2014", "val2014"}
image_id_to_cap = defaultdict(list)
for anno in data["annotations"]:
image_id_to_cap[anno["image_id"]].append(anno)
image_ids = image_id_to_cap
if sample:
image_ids = py_utils.subsample(image_ids, sample, 613423, lambda x: x)
out = []
for image_id in image_ids:
caps = image_id_to_cap[image_id]
cap_objects = []
for cap in caps:
cap_objects.append(cap['caption'])
out.append(CaptioningExample(
f"coco-cap-{image_id}",
image_utils.get_coco_image_id(subset, image_id),
cap_objects
))
return out
@Dataset.register("coco-cap")
class CocoCaptioning2014(Dataset):
def __init__(self, split, sample=None):
if split not in ANNOTATION_FILE_MAP:
raise ValueError()
self.split = split
self.sample = sample
def get_name(self) -> str:
name = f"coco-cap-{self.split}"
if self.sample is not None:
name += f"-s{int_to_str(self.sample)}"
return name
def load(self) -> List[CaptioningExample]:
return _load(join(file_paths.COCO_ANNOTATIONS, ANNOTATION_FILE_MAP[self.split]), self.sample)
| close-main | close/data/coco_captioning.py |
import json
from dataclasses import dataclass
from os.path import join
from typing import Dict, List, Any, Optional
from close import file_paths
from close.data.dataset import Dataset
from close.utils import py_utils
from close.utils.py_utils import int_to_str
@dataclass
class VisualEntailmentExample:
example_id: str
image_id: Optional[str]
label: str
hypothesis: str
premise: str
@Dataset.register("snli-ve")
class VisualEntailment(Dataset):
def __init__(self, split, sample=None, use_images=True):
self.split = split
self.sample = sample
self.use_images = use_images
def get_name(self) -> str:
if self.use_images:
text = "snli"
else:
text = "snli-ve"
text += f"-{self.split}"
if self.sample is not None:
text += f"-s{int_to_str(self.sample)}"
return text
def load(self):
out = []
split = self.split
if split == "val":
split = "dev"
src = join(file_paths.SNLI_VE_HOME, f"snli_ve_{split}.jsonl")
with open(src) as f:
lines = f.readlines()
lines = py_utils.subsample(lines, self.sample, 132124)
for line in lines:
example = json.loads(line)
image_id = "flicker30k/" + example["Flickr30K_ID"] + ".jpg"
out.append(VisualEntailmentExample(
example_id="snli-ve/" + example["pairID"],
image_id=image_id if self.use_images else None,
label=example["gold_label"],
hypothesis=example["sentence2"],
premise=example["sentence1"]
))
return out
| close-main | close/data/visual_entailment.py |
import json
import logging
from collections import Counter
from os.path import join
from typing import List
from close import file_paths
from close.data.dataset import Dataset
from close.data.vqa_v2 import VqaExample
from close.utils import py_utils
from close.utils.image_utils import get_coco_image_id
from close.utils.py_utils import int_to_str
@Dataset.register("evqa")
class EVQA(Dataset):
def __init__(self, split, sample=None, load_answer_types=True):
self.sample = sample
self.split = split
self.load_answer_types = load_answer_types
def get_name(self) -> str:
name = f"evqa-{self.split}"
if self.sample is not None:
name += f"-s{int_to_str(self.sample)}"
return name
def load(self) -> List:
e_file = join(file_paths.VQAE, f"VQA-E_{self.split}_set.json")
logging.info(f"Loading {e_file}")
with open(e_file) as f:
data = json.load(f)
answer_types = {}
if self.load_answer_types:
with open(join(file_paths.VQA_ANNOTATIONS, f"v2_mscoco_{self.split}2014_annotations.json")) as f:
annotations = json.load(f)
for anno in annotations["annotations"]:
key = tuple(sorted(x["answer"] for x in anno["answers"]))
assert key not in annotations
answer_types[key] = anno["answer_type"]
out = []
for ix, ex in enumerate(data):
out.append(VqaExample(
example_id=f'image{ex["img_id"]}-id{ix}',
question=ex["question"],
answers=Counter(ex["answers"]),
multiple_choice_answer=ex["multiple_choice_answer"],
question_type=ex["question_type"],
answer_type=answer_types[tuple(sorted(ex["answers"]))],
image_text=ex["explanation"][0].strip(),
image_id=get_coco_image_id(self.split + "2014", ex["img_id"])
))
return py_utils.subsample(out, self.sample, 16914)
| close-main | close/data/vqa_e.py |
import json
import re
def main():
# todo: @@SEP@@ to ; , @@#@@ to #
predictions_file = "old_data_dev_low_level_preds.json"
traget_file= predictions_file.replace('.json', '.csv')
with open(predictions_file, "r") as fd:
preds = [json.loads(line) for line in fd.readlines()]
preds = [re.sub(r'@@(\d+)@@', '#\g<1>', re.sub('@@SEP@@',';', ' '.join(p['predicted_tokens'][0]))) for p in preds]
preds.insert(0,'prediction')
preds = [f'"{p}"\n' for p in preds]
with open(traget_file, "wt") as fd:
fd.writelines(preds)
if __name__ == '__main__':
main() | break-evaluator-master | allennlp_preds_format.py |
import networkx as nx
from queue import Queue, deque
def has_cycle(graph: nx.DiGraph):
try:
nx.find_cycle(graph, orientation='original')
return True
except:
return False
def get_graph_levels(graph: nx.DiGraph):
"""
Find graph level for each node
level[node] := 0 if the node has no successors
level[node] := max[over successors s](level[s])+1
:param graph: directed graph with no cycles
:return: (nodes_level, levels) tuple where:
nodes_level: dictionary of <node_id>:<level:int>
levels: dictionary of <level:int>:[<node_id>]
"""
updated_nodes = Queue()
# first layer
leafs = [n_id for n_id in graph.nodes if not any(graph.successors(n_id))]
nodes_levels = {n_id: 0 for n_id in leafs}
updated_nodes.queue = deque(leafs)
# update predecessors
while not updated_nodes.empty():
n_id = updated_nodes.get()
low_bound = nodes_levels[n_id] + 1
if low_bound > graph.number_of_nodes():
raise ValueError("Cyclic graphs are not allowed")
for s_id in graph.predecessors(n_id):
if nodes_levels.get(s_id, -1) < low_bound:
nodes_levels[s_id] = low_bound
updated_nodes.put(s_id)
levels = {}
for n_id, l in nodes_levels.items():
levels[l] = levels.get(l, []) + [n_id]
return nodes_levels, levels
| break-evaluator-master | utils/graph.py |
from __future__ import print_function
import sys
import threading
try:
import thread
except ImportError:
import _thread as thread
def quit_function(fn_name):
print('{0} took too long'.format(fn_name), file=sys.stderr)
sys.stderr.flush()
# raises KeyboardInterrupt
thread.interrupt_main()
def exit_after(s):
"""
use as decorator to exit process if
function takes longer than s seconds
"""
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
| break-evaluator-master | utils/timeout.py |
from time import sleep
from utils.timeout import exit_after
@exit_after(5)
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
sleep(1)
print('countdown finished')
if __name__ == "__main__":
try:
countdown(10)
except KeyboardInterrupt:
print('timeout!')
| break-evaluator-master | utils/timeout_test.py |
from typing import Dict, Tuple
import numbers
from itertools import zip_longest
import argparse
import os
import random
import re
import numpy as np
import pandas as pd
import json
from evaluation.decomposition import Decomposition
from evaluation.graph_matcher import GraphMatchScorer, get_ged_plus_scores
from evaluation.sari_hook import get_sari
from evaluation.sequence_matcher import SequenceMatchScorer
from evaluation.normal_form.normalized_graph_matcher import NormalizedGraphMatchScorer
import evaluation.normal_form.normalization_rules as norm_rules
pd.set_option('display.max_colwidth', -1)
def evaluate(ids, questions, decompositions, golds, metadata,
output_path_base,
metrics=None):
decompositions_str = [d.to_string() for d in decompositions]
golds_str = [g.to_string() for g in golds]
# calculating exact match scores
exact_match = get_exact_match(decompositions_str, golds_str) \
if (metrics is None) or 'exact_match' in metrics else None
# evaluate using SARI
sari = get_sari_score(decompositions_str, golds_str, questions) \
if (metrics is None) or 'sari' in metrics else None
# evaluate using sequence matcher
match_ratio = get_match_ratio(decompositions_str, golds_str) \
if (metrics is None) or 'match' in metrics else None
structural_match_ratio = get_structural_match_ratio(decompositions_str, golds_str) \
if (metrics is None) or 'structural_match' in metrics else None
# evaluate using graph distances
graph_scorer = GraphMatchScorer()
decomposition_graphs = [d.to_graph() for d in decompositions]
gold_graphs = [g.to_graph() for g in golds]
ged_scores = graph_scorer.get_edit_distance_match_scores(decomposition_graphs, gold_graphs)
# structural_ged_scores = graph_scorer.get_edit_distance_match_scores(decomposition_graphs, gold_graphs,
# structure_only=True)
# ged_plus_scores = get_ged_plus_scores(decomposition_graphs, gold_graphs,
# exclude_thr=5, num_processes=num_processes)
# calculate normalized match scores
normalize_scorer = NormalizedGraphMatchScorer()
def try_invoke(func, graph, default=None):
try:
return func(graph)
except Exception as ex:
return default
decomposition_norm_graphs = [try_invoke(normalize_scorer.normalize_graph, g, default=g) for g in
decomposition_graphs]
decomposition_norm_str = [try_invoke(lambda x: Decomposition.from_graph(x).to_string(), g) for g in
decomposition_norm_graphs]
gold_norm_graphs = [try_invoke(normalize_scorer.normalize_graph, g, default=g) for g in gold_graphs]
gold_norm_str = [try_invoke(lambda x: Decomposition.from_graph(x).to_string(), g) for g in gold_norm_graphs]
normalized_exact_match = skip_none(get_exact_match, decomposition_norm_str, gold_norm_str) \
if (metrics is None) or 'normalized_exact_match' in metrics else None
normalized_sari = skip_none(get_sari_score, decomposition_norm_str, gold_norm_str, questions) \
if (metrics is None) or 'normalized_sari' in metrics else None
normalized_match_ratio = skip_none(get_match_ratio, decomposition_norm_str, gold_norm_str) \
if (metrics is None) or 'normalized_match' in metrics else None
normalized_structural_match_ratio = skip_none(get_structural_match_ratio, decomposition_norm_str, gold_norm_str) \
if (metrics is None) or 'normalized_structural_match' in metrics else None
evaluation_dict = {
"id": ids,
"question": questions,
"gold": golds_str,
"prediction": decompositions_str,
"exact_match": exact_match,
"match": match_ratio,
"structural_match": structural_match_ratio,
"sari": sari,
"ged": ged_scores,
# "structural_ged": structural_ged_scores,
# "ged_plus": ged_plus_scores,
"normalized_exact_match": normalized_exact_match,
"normalized_match": normalized_match_ratio,
"normalized_structural_match": normalized_structural_match_ratio,
"normalized_sari": normalized_sari,
}
evaluation_dict = {k: v for k, v in evaluation_dict.items() if v is not None}
num_examples = len(questions)
print_first_example_scores(evaluation_dict, min(5, num_examples))
mean_scores = print_score_stats(evaluation_dict)
if output_path_base:
write_evaluation_output(output_path_base, num_examples, **evaluation_dict)
### Addition write the mean scores json
write_evaluation_results(mean_scores)
if metadata is not None:
#metadata = metadata[metadata["question_text"].isin(evaluation_dict["question"])]
metadata = metadata[metadata['question_id'].isin(evaluation_dict['id'])]
metadata["dataset"] = metadata["question_id"].apply(lambda x: x.split("_")[0])
metadata["num_steps"] = metadata["decomposition"].apply(lambda x: len(x.split(";")))
score_keys = [key for key in evaluation_dict if key not in ["id", "question", "gold", "prediction"]]
for key in score_keys:
metadata[key] = evaluation_dict[key]
for agg_field in ["dataset", "num_steps"]:
df = metadata[[agg_field] + score_keys].groupby(agg_field).agg("mean")
print(df.round(decimals=3))
return mean_scores
def skip_none(func, *args, **kwargs):
zipped = list(zip_longest(*args))
none_ids = [i for i, x in enumerate(zipped) if None in x]
args_ = tuple([x for i,x in enumerate(a) if i not in none_ids] for a in args)
res = func(*args_, **kwargs)
combined = []
none_i = 0
res_i = 0
for i in range(len(zipped)):
if none_i < len(none_ids) and (i == none_ids[none_i]):
combined.append(None)
none_i += 1
else:
combined.append(res[res_i])
res_i += 1
return combined
def get_exact_match(decompositions_str:[str], golds_str:[str]):
return [d.lower() == g.lower() for d, g in zip(decompositions_str, golds_str)]
def get_sari_score(decompositions_str: [str], golds_str: [str], questions: [str]):
sources = [q.split(" ") for q in questions]
predictions = [d.split(" ") for d in decompositions_str]
targets = [[g.split(" ")] for g in golds_str]
sari, keep, add, deletion = get_sari(sources, predictions, targets)
return sari
def get_match_ratio(decompositions_str: [str], golds_str: [str]):
sequence_scorer = SequenceMatchScorer(remove_stop_words=False)
return sequence_scorer.get_match_scores(decompositions_str, golds_str,
processing="base")
def get_structural_match_ratio(decompositions_str: [str], golds_str: [str]):
sequence_scorer = SequenceMatchScorer(remove_stop_words=False)
return sequence_scorer.get_match_scores(decompositions_str, golds_str,
processing="structural")
def print_first_example_scores(evaluation_dict, num_examples):
for i in range(num_examples):
print("evaluating example #{}".format(i))
for k,v in evaluation_dict.items():
if isinstance(v[i], numbers.Number):
print("\t{}: {}".format(k, round(v[i], 3)))
else:
print("\t{}: {}".format(k, v[i]))
def print_score_stats(evaluation_dict):
skiped_samples = {}
mean_scores = {}
print("\noverall scores:")
for key in evaluation_dict:
# ignore keys that do not store scores
if key in ["id", "question", "gold", "prediction"]:
continue
score_name, scores = key, evaluation_dict[key]
# ignore examples without a score
if None in scores:
scores_ = [score for score in scores if score is not None]
skiped_samples[key] = len(scores)-len(scores_)
else:
scores_ = scores
mean_score, max_score, min_score = np.mean(scores_), np.max(scores_), np.min(scores_)
print("{} score:\tmean {:.3f}\tmax {:.3f}\tmin {:.3f}".format(
score_name, mean_score, max_score, min_score))
mean_scores[score_name] = mean_score
for score, skiped in skiped_samples.items():
print(f"skipped {skiped} examples when computing {score}.")
return mean_scores
def write_evaluation_output(output_path_base, num_examples, **kwargs):
# write evaluation summary
with open(output_path_base + '_summary.tsv', 'w') as fd:
fd.write('\t'.join([key for key in sorted(kwargs.keys())]) + '\n')
for i in range(num_examples):
fd.write('\t'.join([str(kwargs[key][i]) for key in sorted(kwargs.keys())]) + '\n')
# write evaluation scores per example
df = pd.DataFrame.from_dict(kwargs, orient="columns")
df.to_csv(output_path_base + '_full.tsv', sep='\t', index=False)
def write_evaluation_results(mean_scores):
# write mean evaluation scores
# leaderboard results must be in results/metrics.json
with open('/results/metrics.json', 'w') as json_file:
json.dump(mean_scores, json_file)
def format_qdmr(input:str):
# replace multiple whitespaces with a single whitespace.
input = ' '.join(input.split())
# replace semi-colons with @@SEP@@ token, remove 'return' statements.
parts = input.split(';')
parts = [re.sub(r'return', '', part.strip().strip('\r')) for part in parts]
# replacing references with special tokens, for example replacing #2 with @@2@@.
parts = [re.sub(r'#(\d+)', '@@\g<1>@@', part) for part in parts]
return Decomposition(parts)
def main(args):
# load data
try:
metadata = pd.read_csv(args.dataset_file)
ids = metadata["question_id"].to_list()
questions = metadata["question_text"].to_list()
golds = [format_qdmr(decomp) for decomp in metadata["decomposition"].to_list()]
except Exception as ex:
raise ValueError(f"Could not load dataset file {args.dataset_file}", ex)
# load predictions
try:
preds_file = pd.read_csv(args.preds_file)
predictions = [format_qdmr(pred) for pred in preds_file['decomposition'].to_list()]
except Exception as ex:
raise ValueError(f"Could not load predictions file {args.preds_file}", ex)
assert len(golds) == len(predictions), "mismatch number of gold questions and predictions"
if args.random_n and len(golds) > args.random_n:
indices = random.sample(range(len(ids)), args.random_n)
ids = [ids[i] for i in indices]
questions = [questions[i] for i in indices]
golds = [golds[i] for i in indices]
predictions = [predictions[i] for i in indices]
if not args.no_cache:
norm_rules.load_cache(args.dataset_file.replace(".csv", "__cache"))
res = evaluate(ids=ids,
questions=questions,
golds=golds,
decompositions=predictions,
metadata=metadata,
output_path_base=args.output_file_base,
metrics=args.metrics)
if not args.no_cache:
norm_rules.save_cache(args.dataset_file.replace(".csv", "__cache"))
return res
def validate_args(args):
# input question(s) for decomposition are provided.
assert args.preds_file and args.dataset_file
# input files exist.
if args.dataset_file:
assert os.path.exists(args.dataset_file)
if args.preds_file:
assert os.path.exists(args.preds_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="evaluate QDMR predictions")
parser.add_argument('--dataset_file', type=str, help='path to dataset file')
parser.add_argument('--preds_file', type=str, help='path to a csv predictions file, with "prediction" column')
parser.add_argument('--random_n', type=int, default=0,
help='choose n random examples from input file')
parser.add_argument('--no_cache', action='store_true',
help="don't cache dependency parsing on normalized metrics")
parser.add_argument('--output_file_base', type=str, default=None, help='path to output file')
parser.add_argument('--metrics', nargs='+', default=['exact_match', 'sari', 'ged', 'normalized_exact_match'], help='path to output file')
args = parser.parse_args()
validate_args(args)
res = main(args)
# rename for AllenAI leader board
map = {'exact_match': 'EM', 'normalized_exact_match': 'norm_EM', 'sari': 'SARI', 'ged': 'GED'}
res = {map.get(k, k): v for k,v in res.items()}
print(res) | break-evaluator-master | scripts/evaluate_predictions.py |
from pathlib import Path
import os
import argparse
import traceback
import pandas as pd
import re
from enum import Enum
DELIMITER = ';'
REF = '#'
pd.set_option('display.max_colwidth', -1)
def parse_decomposition(qdmr):
"""Parses the decomposition into an ordered list of steps
Parameters
----------
qdmr : str
String representation of the QDMR
Returns
-------
list
returns ordered list of qdmr steps
"""
crude_steps = qdmr.split(DELIMITER)
steps = []
for i in range(len(crude_steps)):
step = crude_steps[i]
tokens = step.split()
step = ""
# remove 'return' prefix
for tok in tokens[1:]:
step += tok.strip() + " "
step = step.strip()
steps += [step]
return steps
def extract_position_relations(qdmr_step):
"""Extract a relation regarding entity positions
in a QDMR step. Relevant for VQA data
Parameters
----------
qdmr_step : str
string of the QDMR step containg relative position knowledge.
Either a FILTER of BOOLEAN step.
Returns
-------
str
string of the positional relation.
"""
if ' left ' in qdmr_step:
return 'POS_LEFT_OF'
elif ' right ' in qdmr_step:
return 'POS_RIGHT_OF'
elif (' between ' in qdmr_step) or (' middle of ' in qdmr_step):
return 'POS_BETWEEN'
elif (' behind ' in qdmr_step) or (' rear of ' in qdmr_step):
return 'POS_BEHIND_OF'
elif (' in ' in qdmr_step and ' front ' in qdmr_step) or \
(' infront ' in qdmr_step):
return 'POS_IN_FRONT_OF'
elif ' touch' in qdmr_step:
return 'POS_TOUCHING'
elif ' reflect' in qdmr_step:
return 'POS_REFLECTING'
elif (' cover' in qdmr_step) or (' obscur' in qdmr_step) or \
(' blocking' in qdmr_step) or (' blocked' in qdmr_step) or \
(' hidden' in qdmr_step) or (' obstruct' in qdmr_step):
return 'POS_COVERS'
elif (' near' in qdmr_step) or (' close ' in qdmr_step) or \
(' closer ' in qdmr_step) or (' closest ' in qdmr_step) or \
(' next to ' in qdmr_step) or (' adjacent ' in qdmr_step):
return 'POS_NEAR'
else:
return None
return None
### Code for QDMR step identifier:
class QDMROperation(Enum):
FIND, SELECT, FILTER, PROJECT, AGGREGATE, GROUP, SUPERLATIVE, COMPARATIVE, UNION, \
INTERSECTION, DISCARD, SORT, BOOLEAN, ARITHMETIC, COMPARISON, NONE = range(16)
def op_name(qdmr_op):
return {
QDMROperation.FIND: 'FIND',
QDMROperation.SELECT: 'SELECT',
QDMROperation.FILTER: 'FILTER',
QDMROperation.PROJECT: 'PROJECT',
QDMROperation.AGGREGATE: 'AGGREGATE',
QDMROperation.GROUP: 'GROUP',
QDMROperation.SUPERLATIVE: 'SUPERLATIVE',
QDMROperation.COMPARATIVE: 'COMPARATIVE',
QDMROperation.UNION: 'UNION',
QDMROperation.INTERSECTION: 'INTERSECTION',
QDMROperation.DISCARD: 'DISCARD',
QDMROperation.SORT: 'SORT',
QDMROperation.BOOLEAN: 'BOOLEAN',
QDMROperation.ARITHMETIC: 'ARITHMETIC',
QDMROperation.COMPARISON: 'COMPARISON',
QDMROperation.NONE: 'NONE'
}.get(qdmr_op, QDMROperation.NONE)
def step_type(step, is_high_level):
"""
Maps a single QDMR step into relevant its operator type
Parameters
----------
step : str
String representation a single QDMR step
is_high_level : bool
Flag whether or not we include the high level FIND steps,
associated with RC datasets
Returns
-------
QDMROperation
returns the type of QDMR operation of the step
"""
step = step.lower()
references = extract_references(step)
if len(references) == 0:
# SELECT step - no references to previous steps
return QDMROperation.SELECT
# Discrete QDMR step types:
if len(references) == 1:
# AGGREGATION step - aggregation applied to one reference
aggregators = ['number of', 'highest', 'largest', 'lowest', 'smallest', 'maximum', 'minimum', \
'max', 'min', 'sum', 'total', 'average', 'avg', 'mean ']
for aggr in aggregators:
aggr_ref = aggr + ' #'
aggr_of_ref = aggr + ' of #'
if (aggr_ref in step) or (aggr_of_ref in step):
return QDMROperation.AGGREGATE
if 'for each' in step:
# GROUP step - contains term 'for each'
return QDMROperation.GROUP
if len(references) >= 2 and len(references) <= 3 and ('where' in step):
# COMPARATIVE step - '#1 where #2 is at most three'
comparatives = ['same as', 'higher than', 'larger than', 'smaller than', 'lower than', \
'more', 'less', 'at least', 'at most', 'equal', 'is', 'are', 'was', 'contain', \
'include', 'has', 'have', 'end with', 'start with', 'ends with', \
'starts with', 'begin']
for comp in comparatives:
if comp in step:
return QDMROperation.COMPARATIVE
if step.startswith('#') and ('where' in step) and len(references) == 2:
# SUPERLATIVE step - '#1 where #2 is highest/lowest'
superlatives = ['highest', 'largest', 'most', 'smallest', 'lowest', 'smallest', 'least', \
'longest', 'shortest', 'biggest']
for s in superlatives:
if s in step:
return QDMROperation.SUPERLATIVE
if len(references) > 1:
# UNION step - '#1, #2, #3, #4' / '#1 or #2' / '#1 and #2'
is_union = re.search("^[#\s]+[and0-9#or,\s]+$", step) or \
re.search("^both[#\s]+[and0-9#,\s]+$",step)
if is_union:
return QDMROperation.UNION
if len(references) > 1 and ('both' in step) and ('and' in step):
# INTERSECTION step - 'both #1 and #2'
return QDMROperation.INTERSECTION
if (len(references) >= 1) and (len(references) <= 2) and \
(re.search("^[#]+[0-9]+[\s]+", step) or re.search("[#]+[0-9]+$", step)) and \
('besides' in step or 'not in' in step):
# DISCARD step - '#2 besides X'
return QDMROperation.DISCARD
if ('sorted by' in step) or ('order by' in step) or ('ordered by' in step):
# SORT step - '#1 ordered/sorted by #2'
return QDMROperation.SORT
if step.lower().startswith('if ') or step.lower().startswith('is ') or \
step.lower().startswith('are '):
# BOOLEAN step - starts with either 'if', 'is' or 'are'
return QDMROperation.BOOLEAN
if step.lower().startswith('which') and len(references) > 1:
# COMPARISON step - 'which is better A or B or C'
return QDMROperation.COMPARISON
if len(references) >= 1 and ('and' in step or ',' in step):
# ARITHMETIC step - starts with arithmetic operation
arithmetics = ['sum', 'difference', 'multiplication', 'division']
for a in arithmetics:
if step.startswith(a) or step.startswith('the ' + a):
return QDMROperation.ARITHMETIC
# Non-discrete QDMR step types:
if len(references) == 1 and re.search("[\s]+[#]+[0-9\s]+", step):
# PROJECT step - 'property of #2'
return QDMROperation.PROJECT
if len(references) == 1 and step.startswith("#"):
# FILTER step - '#2 [condition]'
return QDMROperation.FILTER
if len(references) > 1 and step.startswith("#"):
# FILTER step - '#2 [relation] #3'
if extract_position_relations(step) != None:
# check if relation is a valid positional relation
return QDMROperation.FILTER
if is_high_level:
return QDMROperation.FIND
return QDMROperation.NONE
def extract_references(step):
"""Extracts list of references to previous steps
Parameters
----------
step : str
String representation of a QDMR step
Returns
-------
list
returns list of ints of referenced steps
"""
# make sure decomposition does not contain a mere '# ' rather than reference.
step = step.replace("# ", "hashtag ")
# replace ',' with ' or'
step = step.replace(", ", " or ")
references = []
l = step.split(REF)
for chunk in l[1:]:
if len(chunk) > 1:
ref = chunk.split()[0]
ref = int(ref)
references += [ref]
if len(chunk) == 1:
ref = int(chunk)
references += [ref]
return references
# %%
#
# qdmr = "return professionals ;return cities of #1 ;return #1 where #2 contain substring 'West ;return roles of #3 ;return streets of #3 ;return cities of #3 ;return states of #3 ;return #4 , #5 , #6 , #7"
# steps = parse_decomposition(qdmr)
# print(steps)
# for step in steps:
# print(step)
# op_num = step_type(step, True)
# print(op_name(op_num))
# %%
class ExecQDMR:
"""
Class representing executable QDMR operation.
"""
def __init__(self, op_type, op_string, prev_refs_code):
"""Creaing a new executable QDMR operation
Parameters
----------
op_type : QDMROperation
relevant operation type
op_string : str
string of the QDMR operation
prev_refs_code : dict
dictionary where each key has the code of the referneced steps
of the current QDMR operation
"""
self.op_type = op_type
self.op_string = op_string
self.prev_refs_code = prev_refs_code
self.arguments = self.get_op_arguments()
def __str__(self):
op_code = ''
args = self.arguments
op_type = self.op_type
op_code += op_name(op_type) + '('
# add the operator arguments
for arg in args:
op_code += arg + ','
# remove final comma
op_code = op_code[:-1]
op_code += ')'
return op_code
def get_op_arguments(self):
"""Extract the operator arguments from the op string and
the previous references
Returns
-------
list
returns list of operator arguments
"""
args = []
op_string = self.op_string.lower()
op_type = self.op_type
prev_refs_code = self.prev_refs_code
if op_type == QDMROperation.SELECT:
# extract the entities to select
args += [op_string]
elif op_type == QDMROperation.FILTER:
if extract_position_relations(op_string) != None:
# positional relation FILTER: "#1 [relation] #2"
relation = extract_position_relations(op_string)
args += [relation]
refs = extract_references(op_string)
for ref_num in refs:
prev_ref_code = prev_refs_code[ref_num]
args += [prev_ref_code]
else:
# extract the reference code and the filter condition
ref_num = extract_references(op_string)[0]
prev_ref_code = prev_refs_code[ref_num]
condition = op_string.split('#' + str(ref_num))[1]
args += [prev_ref_code, condition]
elif op_type == QDMROperation.PROJECT:
# extract the projection attribute and the reference code
ref_num = extract_references(op_string)[0]
prev_ref_code = prev_refs_code[ref_num]
projection = op_string.split('#' + str(ref_num))[0].strip()
# remove 'of' from the projection
if projection.endswith('of'):
projection = projection[:-2].strip()
args += [projection, prev_ref_code]
elif op_type == QDMROperation.AGGREGATE:
# extract the aggregate and the reference code
ref_num = extract_references(op_string)[0]
prev_ref_code = prev_refs_code[ref_num]
aggr = extract_aggregator(op_string)
args += [aggr, prev_ref_code]
elif op_type == QDMROperation.GROUP:
# extract the group operator, its values and the grouping key
aggr = extract_aggregator(op_string)
# split the step to the aggregated data (prefix) and key (suffix)
data, key = op_string.split('for each')
data_ref = extract_references(data)
key_ref = extract_references(key)
# check if both parts actually contained references
if data_ref == []:
data_code = data.split()[-1]
else:
data_code = prev_refs_code[data_ref[0]]
if key_ref == []:
key_code = key.split()[-1]
else:
key_code = prev_refs_code[key_ref[0]]
args += [aggr, data_code, key_code]
elif op_type == QDMROperation.SUPERLATIVE:
# extract whether it is a min or max supelative
aggr = extract_aggregator(op_string)
entity, attribute = extract_references(op_string)
entity_code = prev_refs_code[entity]
attribute_code = prev_refs_code[attribute]
args += [aggr, entity_code, attribute_code]
elif op_type == QDMROperation.COMPARATIVE:
# extract comparator, numeric, entity and attribute
comparator = extract_comparator_expr(op_string)
if comparator != 'FILTER':
# numeric appears as the suffix of the comparative step
comp_expr = op_string.split()[-1]
else:
# if FILTER comparator, return entire suffix condition
refs = extract_references(op_string)
condition_ref = '#' + str(refs[1])
comp_expr = op_string.split(condition_ref)[1].strip()
if extract_references(comp_expr) != []:
# numeric expression is itself a reference (e.g., average of)
refs = extract_references(comp_expr)
assert (len(refs) == 1)
ref = refs[0]
comp_expr = prev_refs_code[ref]
# extract the references
# if contains 'where':
if 'where #' in op_string:
prefix, suffix = op_string.split('where')
entity = extract_references(prefix)[0]
attribute = extract_references(suffix)[0]
# step does not follow 'where' satandard format
else:
entity, attribute = extract_references(op_string)
entity_code = prev_refs_code[entity]
attribute_code = prev_refs_code[attribute]
args += [entity_code, attribute_code, comparator, comp_expr]
elif op_type == QDMROperation.UNION:
# add all previous references as the union arguments
refs = extract_references(op_string)
for ref in refs:
args += [prev_refs_code[ref]]
elif op_type == QDMROperation.INTERSECTION:
interesect_expr = None
expressions = ['of both', 'in both', 'by both']
for expr in expressions:
if expr in op_string:
interesect_expr = expr
projection, intersection = op_string.split(interesect_expr)
# add the projection of the intersection, e.g.,
# "return x in both #1 and #2"
args += [projection]
# add all previous references as the intersection arguments
refs = extract_references(intersection)
for ref in refs:
args += [prev_refs_code[ref]]
elif op_type == QDMROperation.DISCARD:
# DISCARD either has the form '#x besides #y' / '#x besides something' / 'something besides #x'
refs = extract_references(op_string)
if len(refs) == 2:
# '#x besides #y'
# return the two references the source set, and the discarded set
src, discard = extract_references(op_string)
src_code = prev_refs_code[src]
discard_code = prev_refs_code[discard]
if len(refs) == 1:
# '#x besides something' / 'something besides #x'
prefix, suffix = op_string.split('besides')
pref_refs = extract_references(prefix)
if len(pref_refs) > 0:
# '#x besides something'
src = pref_refs[0]
src_code = prev_refs_code[src]
discard_code = suffix
else:
# 'something besides #x'
src_code = prefix
discard = extract_references(suffix)[0]
discard_code = prev_refs_code[discard]
args += [src_code, discard_code]
elif op_type == QDMROperation.SORT:
sorted_data_code = None
# check type of sort expression
sort_expr = None
for expr in ['ordered by', 'order by', 'sorted by']:
if expr in op_string:
sort_expr = expr
assert (sort_expr != None)
# split the sort step
prefix, suffix = op_string.split(sort_expr)
# extract the data to sort
data_refs = extract_references(prefix)
# union of data to sort
if len(data_refs) > 1:
refs_list = ['#' + str(ref) for ref in data_refs]
union_string = ' or '.join(refs_list)
sorted_data_union_code = ExecQDMR(QDMROperation.UNION, union_string, prev_refs_code)
sorted_data_code = sorted_data_union_code
else:
data_ref = data_refs[0]
sorted_data_code = prev_refs_code[data_ref]
args += [sorted_data_code]
# extract the sorting attribute
sort_refs = extract_references(suffix)
for ref in sort_refs:
args += [prev_refs_code[ref]]
sort_order = None
if len(sort_refs) == 0:
# no order reference only text 'alphabetical order'
sort_order = suffix
else:
# extract the order text by removing the references
final_ref = str(sort_refs[-1])
sort_order = suffix.split('#' + final_ref)[1].strip()
# add the sorting order
args += [sort_order]
elif op_type == QDMROperation.BOOLEAN:
# no boolean steps in Spider dataset
###################!!!!!!!!!!!!!!!!!
# "if/is/are [condition]"
# extract the boolean condition
condition = op_string.split()[1:]
condition = ' '.join(condition).strip()
# extract the condition comparator type
refs = extract_references(condition)
is_positional = extract_position_relations(condition)
# check if boolean condition is numeric
comparator_type = extract_comparator_expr(condition)
if is_positional != None and len(refs) > 1:
# condition regards positional relations
condition_type = extract_position_relations(condition)
args += [condition_type]
for ref_num in refs:
arg_code = prev_refs_code[ref_num]
args += [arg_code]
elif comparator_type != 'FILTER' and len(refs) <= 2:
# numeric comparator
first_arg = prev_refs_code[refs[0]]
# numeric appears as the suffix of the comparative step
comp_expr = op_string.split()[-1]
if extract_references(comp_expr) != []:
# numeric expression is itself a reference (e.g., average of)
refs = extract_references(comp_expr)
assert (len(refs) == 1)
ref = refs[0]
comp_expr = prev_refs_code[ref]
second_arg = comp_expr
args += [comparator_type, first_arg, second_arg]
elif ('there ' in condition) and ('any ' in condition) and \
(len(refs) == 1):
# existential boolean condition - "are there any #2"
ref = extract_references(condition)[0]
assert (condition.endswith('#' + str(ref)))
items = prev_refs_code[ref]
args += ['EXIST', items]
elif condition.endswith(' the same') and (len(refs) == 1) and \
(condition.startswith('all ') or condition.startswith('#') or \
condition.startswith('both ')):
# distinction boolean - "are all #1 the same"
ref = refs[0]
items = prev_refs_code[ref]
args += ['SAME', items]
else:
# FILTER condition
return False
elif op_type == QDMROperation.ARITHMETIC:
arithmetics = ['sum', 'difference', 'multiplication', 'division']
arithmetic = None
for op in arithmetics:
if op in op_string:
arithmetic = op
assert (arithmetic != None)
args += [arithmetic]
# extract the arguments of the artithmetic op
refs = extract_references(op_string)
for ref in refs:
args += [prev_refs_code[ref]]
# arithmetic with constant number
# "difference of 100 and #1"
if len(refs) == 1:
prefix, suffix = op_string.split('and')
numeric_expr = prefix.split()[-1] if (extract_references(prefix) == []) else suffix.split()[0]
elif op_type == QDMROperation.COMPARISON:
# which is lowest of #1, #2
comparison = extract_aggregator(op_string, True)
assert (comparison in ['MIN', 'MAX'])
args += [comparison]
# extract entities to be compared
refs = extract_references(op_string)
for ref in refs:
args += [prev_refs_code[ref]]
else:
return False
self.arguments = args
return self.arguments
def to_sql(self):
return "foo"
def extract_comparator_expr(comparative_step):
"""Extract comparator and numeric expression
of a comparative QDMR step
Parameters
----------
comparative_step : str
string of the QDMR comparative step
Returns
-------
str
returns string representation of the comparator expression
"""
comparator = None
if 'at least' in comparative_step:
comparator = '>='
elif 'at most' in comparative_step:
comparator = '=<'
elif ('more' in comparative_step) or \
('higher' in comparative_step) or ('larger' in comparative_step):
comparator = '>'
elif ('less' in comparative_step) or \
('smaller' in comparative_step) or ('lower' in comparative_step):
comparator = '<'
elif ('not ' in comparative_step) and (('same as' in comparative_step) or \
('equal' in comparative_step) or ('is' in comparative_step) or \
('was' in comparative_step) or ('are' in comparative_step)):
comparator = '!='
elif ('not ' not in comparative_step) and (('same as' in comparative_step) or \
('equal' in comparative_step) or ('is' in comparative_step) or \
('was' in comparative_step) or ('are' in comparative_step)) and \
('any' not in comparative_step):
comparator = '='
elif ('contain' in comparative_step):
comparator = 'CONTAINS'
else:
comparator = 'FILTER'
return comparator
def extract_aggregator(aggregate_step, include_boolean=False):
"""Extract aggregator type from QDMR aggregate step string
Parameters
----------
aggregate_step : str
string of the QDMR aggregate step.
include_boolean : bool
flag whether to include true/false as operators.
used in COMPARISON operators.
Returns
-------
str
string of the aggregate operation (sum/max/min/average/count).
"""
if 'number of' in aggregate_step:
return 'COUNT'
elif ('max' in aggregate_step) or ('highest' in aggregate_step) or \
('largest' in aggregate_step) or ('most' in aggregate_step) or \
('longest' in aggregate_step) or ('biggest' in aggregate_step) or \
('more' in aggregate_step) or ('last' in aggregate_step) or \
('longer' in aggregate_step) or ('higher' in aggregate_step) or \
('larger' in aggregate_step):
return 'MAX'
elif ('min' in aggregate_step) or ('lowest' in aggregate_step) or \
('smallest' in aggregate_step) or ('least' in aggregate_step) or \
('shortest' in aggregate_step) or ('less' in aggregate_step) or \
('first' in aggregate_step) or ('shorter' in aggregate_step) or \
('lower' in aggregate_step) or ('fewer' in aggregate_step) or \
('smaller' in aggregate_step):
return 'MIN'
elif ('sum' in aggregate_step) or ('total' in aggregate_step):
return 'SUM'
elif ('average' in aggregate_step) or ('avg' in aggregate_step) or \
('mean ' in aggregate_step):
return 'AVG'
if include_boolean:
if 'true ' in aggregate_step:
return 'TRUE'
elif 'false ' in aggregate_step:
return 'FALSE'
else:
return None
else:
return None
return None
# %%
def eqdmr_program(decomposition):
"""Returns an executable QDMR program representation
Parameters
----------
decomposition : str
string representation of a QDMR
Returns
-------
str
return string representation of the executable QDMR
"""
count = 1
steps = parse_decomposition(decomposition)
prev_refs_code = {}
for step in steps:
op_type = step_type(step, False)
new_op = ExecQDMR(op_type, step, prev_refs_code)
# print(count)#############
# print(op_name(op_type))############
# print(step)############
# print(new_op)############
prev_refs_code[count] = str(new_op)
count += 1
return prev_refs_code[count - 1]
def pretty_eqdmr(eqdmr):
"""Returns an executable QDMR program in a compositional manner
Parameters
----------
eqdmr : str
string representation of the executable QDMR
Returns
-------
bool
return True
"""
tab_count = 0
pretty_represenation = ''
for i in range(len(eqdmr)):
if eqdmr[i] == '(':
tab_count += 1
pretty_represenation += '(\n'
pretty_represenation += '\t' * tab_count
elif eqdmr[i] == ',':
pretty_represenation += ',\n'
pretty_represenation += '\t' * tab_count
elif eqdmr[i] == ')':
tab_count -= 1
pretty_represenation += '\n'
pretty_represenation += '\t' * tab_count
pretty_represenation += ')'
else:
pretty_represenation += eqdmr[i]
return pretty_represenation
#
# print(eqdmr_program("return customers ;return products #1 bought ;return #1 where #2 is food ;return names of #3"))
# pretty_print_eqdmr(
# eqdmr_program("return customers ;return products #1 bought ;return #1 where #2 is food ;return names of #3"))
# %%
def dataset_to_programs(dataset_path:str):
"""
Converts dataset file to programs
:param dataset_path: a dataset file path, e.g 'data/data_old_version/break_low_level.csv'
:return: creates files on the same directory with the converted programs
"""
# ERROR ANALYSIS OF SQL QDMR-TO-PROGRAM
df = pd.read_csv('{}.csv'.format(dataset_path))
# df = pd.read_csv('decompositions_qdmr_all.csv')
##valid_df = df[(df['correct']!=0)]
decompositions = df['decomposition']
dec_col = []
qid_col = []
qtext_col = []
eqdmr_col = []
count = 1
for i in range(len(decompositions)):
###print(str(count) + '. ' + '*'*100)
###count += 1
###print(dec)
question_id = df.loc[i, 'question_id']
question_text = df.loc[i, 'question_text']
dec = df.loc[i, 'decomposition']
try:
program = eqdmr_program(dec)
# print(question_id)
###print(question_text)
###print(dec)
###print(program)
####pretty_print_eqdmr(program)
except:
dec_col += [dec]
qid_col += [question_id]
qtext_col += [question_text]
eqdmr_col += ['ERROR']
else:
dec_col += [dec]
qid_col += [question_id]
qtext_col += [question_text]
eqdmr_col += [program]
###print('*'*100)
d = {'question_id': qid_col, 'question_text': qtext_col, 'decomposition': dec_col, 'program': eqdmr_col}
programs_df = pd.DataFrame(data=d)
programs_df.to_csv('{}__error_analysis.csv'.format(dataset_path), encoding='utf-8')
programs_df.to_html('{}__error_analysis.html'.format(dataset_path))
print('done...')
# %%
def samples_to_programs(smpl_dir:str):
"""
Add program column to predictions samples files.
The files are assumed to be pandas.Dataframe .json/.html files, in **/*_sample directory, with "gold" and "prediction"
columns (just like 'eval_find_interesting_samples.py' generates)
:param smpl_dir: root directory of samples
:return: creates a file enriched by program column for gold and prediction for each samples file in the same location
"""
paths = [p for p in Path(smpl_dir).glob("**/*_samples/**/*.*")
if not re.match(r".*__programs\..*$", p.name)]
for p in paths:
try:
dest_dir = p.parent
name, extension = os.path.splitext(p.name)
if extension == ".json":
with open(str(p), 'rt') as f:
df = pd.read_json(f)
elif extension == ".html":
df = pd.read_html(str(p))[0]
else:
raise Exception("Unsupported file extension {}".format(extension))
predictions_to_programs(df, "gold")
predictions_to_programs(df, "prediction")
dest_path = os.path.join(dest_dir, p.name.replace(extension, "__programs{}".format(extension)))
if extension == ".json":
df.to_json(dest_path)
elif extension == ".html":
df.to_html(dest_path)
else:
raise Exception("Unsupported file extension {}".format(extension))
except Exception as ex:
print("Error on '{}'. {}".format(p, ex))
traceback.print_exc()
def predictions_to_programs(df, qdmr_col):
"""
Fixes the qdmr (prediction) column to a proper qdmr representation, and add a parsed program it
:param df: pandas dataframe
:param qdmr_col: qdmr prediction column to convert
:return:
"""
prog_col = "{}_program".format(qdmr_col)
#df[qdmr_col].replace({delimiter: DELIMITER, ref:r'{}\g<1>'.format(REF)}, regex=True, inplace=True)
df[prog_col] = df[qdmr_col]
for index, row in df.iterrows():
dec = row[prog_col]
try:
fix = prediction_to_qdmr(dec)
df.loc[index, qdmr_col] = fix
program = eqdmr_program(fix)
except:
df.loc[index, prog_col] = "ERROR"
else:
df.loc[index, prog_col] = program
def prediction_to_qdmr(prediction:str, add_return:bool=True):
delimiter = "@@SEP@@"
ref = r'@@([0-9]+)@@'
fix = re.sub(ref, r'{}\g<1>'.format(REF), prediction)
return DELIMITER.join(["return {}".format(d) if (not d.startswith('return ') and add_return) else d for d in fix.split(delimiter)])
def qdmr_to_prediction(qdmr:str, remove_return:bool=True):
if not isinstance(qdmr,str):
raise Exception("excepted string, got {}".format(str(qdmr)))
delimiter = "@@SEP@@"
ref = r'#([0-9]+)'
fix = re.sub(ref, r'@@\g<1>@@', qdmr)
return delimiter.join([re.sub(r'^\s*return\s+', '', step) if remove_return else step for step in fix.split(DELIMITER)])
def prediction_to_program(prediction: str):
return pretty_eqdmr(eqdmr_program(prediction_to_qdmr(prediction)));
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="parse QDMR to programs in 3 available modes")
parser.add_argument("--smpl_dir", type=str, help="root directory of samples (generated by 'eval_find_interesting_samples.py')")
parser.add_argument("--qdmr", type=str, help="a single qdmr to parse")
parser.add_argument("--dataset", type=str, help="parse dataset file (e.g 'data /data_old_version/break_low_level') and plots statistics")
args = parser.parse_args()
assert not (args.smpl_dir and args.qdmr)
if args.qdmr:
print(prediction_to_program(args.qdmr))
elif args.dataset:
dataset_to_programs(args.dataset)
elif args.smpl_dir:
samples_to_programs(args.smpl_dir)
| break-evaluator-master | scripts/qdmr_to_program.py |
from evaluation.decomposition import Decomposition, draw_decomposition_graph
from evaluation.graph_matcher import AStarSearcher
examples = [
# 0
(Decomposition(["representatives from New York state or Indiana state",
"the life spans of @@1@@"]),
Decomposition(["representatives from new york state",
"representatives from indiana state",
"@@1@@ or @@2@@",
"life spans of @@3@@"])),
# 1
(Decomposition(["the team owned by Jerry Jones",
"the 1996 coach of @@1@@"]),
Decomposition(["the team owned by Jerry Jones",
"the 1996 coach of @@1@@"])),
# 2
(Decomposition(["the team with Baltimore Fight Song",
"year did @@1@@ win the Superbowl"]),
Decomposition(["the team with Baltimore Fight Song",
"what year did @@1@@ win the Superbowl"])),
# 3
(Decomposition(["a us air flight",
"@@1@@ from toronto to san diego",
"@@2@@ with a stopover in denver"]),
Decomposition(["us air flights",
"@@1@@ from toronto",
"@@2@@ to san diego",
"@@3@@ with a stopover in denver"])),
# 4
(Decomposition(["flights",
"@@1@@ from baltimore",
"@@2@@ to atlanta",
"@@3@@ that arrive before noon and i'd like to see flights",
"@@4@@ from denver",
"@@5@@ to atlanta",
"@@6@@ that arrive before noon"]),
Decomposition(["flights from baltimore",
"@@1@@ to atlanta",
"@@2@@ that arrive before noon",
"flights from denver",
"@@4@@ to atlanta",
"@@5@@ that arrive before noon",
"@@3@@, @@6@@"])),
# 5
(Decomposition(["the club \"Bootup Baltimore\"",
"all the female members of @@1@@",
"the first name and last name for @@2@@"]),
Decomposition(["all female members of the club bootcup baltimore",
"the first name and last name of @@1@@"])),
# 6
(Decomposition(["conferences,",
"the number of @@1@@",
"@@2@@ which have more than 60 papers ",
"@@3@@ containing keyword \" Relational Database \""]),
Decomposition(["papers containing keyword \"relational databases\"",
"conferences which have more than 60 @@1@@",
"the number of @@2@@"])),
# 7
(Decomposition(["the movie released in the year 2000 or earlier",
"the title and director of @@1@@",
"worldwide gross",
"@@2@@ that had the highest @@3@@"]),
Decomposition(["movies released in the year 2000 or earlier",
"@@1@@ that had the highest worldwide gross",
"the title and director of @@2@@"])),
# 8
(Decomposition(["team Tim Howard playing for it",
"@@1@@ owned by Malcolm Glazer"]),
Decomposition(["the teams owned by Malcolm Glazer",
"@@1@@ that has Tim Howard playing for it"])),
# 9
(Decomposition(["the parties",
"@@1@@ that have both representatives in New York state"
" and representatives in Pennsylvania state"]),
Decomposition(["representatives in new york state",
"representative in pennsylvania state",
"the parties of both @@1@@ and @@2@@"]))
]
searcher = AStarSearcher()
for i in range(len(examples)):
if i < 9:
continue
dec1, dec2 = examples[i]
graph1 = dec1.to_graph()
graph2 = dec2.to_graph()
# draw_decomposition_graph(graph1, title="prediction")
# draw_decomposition_graph(graph2, title="gold")
searcher.set_graphs(graph1, graph2)
# res = searcher.a_star_search(debug=True)
res12 = searcher.a_star_search(debug=False)
searcher.set_graphs(graph2, graph1)
res21 = searcher.a_star_search(debug=False)
print("\nexample {}".format(i))
for (res, desc) in [(res12, "1--2"), (res21, "2--1")]:
print("edit path {}: ".format(desc), res[0])
print("edit path string {}: ".format(desc), res[1])
print("cost {}: ".format(desc), res[2])
# print("normalized cost {}: ".format(desc), res[3])
| break-evaluator-master | evaluation/graph_matcher_tests.py |
import heapq
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from itertools import chain, combinations, permutations
from multiprocessing import Pool
from progressbar import ProgressBar, SimpleProgress
from tqdm import tqdm
from evaluation.sequence_matcher import SequenceMatchScorer
from utils.timeout import exit_after
class GraphMatchScorer(object):
def __init__(self):
self.sequence_matcher = SequenceMatchScorer(remove_stop_words=False)
def node_subst_cost_lexical(self, node1, node2):
return 1 - self.sequence_matcher.get_match_score(node1['label'], node2['label'])
###@exit_after(600)
@exit_after(180) ### quit after 3 minutes
def normalized_graph_edit_distance(self, graph1, graph2, structure_only):
"""Returns graph edit distance normalized between [0,1].
Parameters
----------
graph1 : graph
graph2 : graph
structure_only : whether to use node substitution cost 0 (e.g. all nodes are identical).
Returns
-------
float
The normalized graph edit distance of G1,G2.
Node substitution cost is normalized string edit distance of labels.
Insertions cost 1, deletion costs 1.
"""
if structure_only:
node_subst_cost = lambda x, y: 0
else:
node_subst_cost = self.node_subst_cost_lexical
approximated_distances = nx.optimize_graph_edit_distance(graph1, graph2,
node_subst_cost=node_subst_cost)
total_cost_graph1 = len(graph1.nodes) + len(graph1.edges)
total_cost_graph2 = len(graph2.nodes) + len(graph2.edges)
normalization_factor = max(total_cost_graph1, total_cost_graph2)
dist = None
for v in approximated_distances:
dist = v
return float(dist)/normalization_factor
def get_edit_distance_match_scores(self, predictions, targets, structure_only=False):
distances = []
num_examples = len(predictions)
for i in tqdm(range(num_examples)):
try:
dist = self.normalized_graph_edit_distance(predictions[i], targets[i],
structure_only)
except KeyboardInterrupt:
print(f"skipping example: {i}")
dist = None
distances.append(dist)
return distances
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)
def count(self):
return len(self.elements)
class AStarSearcher(object):
def __init__(self):
self.sequence_matcher = SequenceMatchScorer(remove_stop_words=False)
self.graph1 = None
self.graph2 = None
def _get_label(self, graph_number, node):
assert graph_number in [1, 2]
if graph_number == 1:
return self.graph1.nodes[node]['label']
else:
return self.graph2.nodes[node]['label']
def _get_edit_ops(self, edit_path):
edit_ops = set()
graph1_matched_nodes, graph2_matched_nodes = self._get_matched_nodes(edit_path)
graph1_unmatched_nodes = [node for node in self.graph1.nodes if node not in graph1_matched_nodes]
graph2_unmatched_nodes = [node for node in self.graph2.nodes if node not in graph2_matched_nodes]
if not edit_path:
i = 1
else:
i = min(graph1_unmatched_nodes)
subsets1 = self._get_all_subsets(graph1_unmatched_nodes)
subsets2 = self._get_all_subsets(graph2_unmatched_nodes)
# for i in graph1_unmatched_nodes:
# add {v_i -> u_j}, {v_i -> u_j+u_j+1}, ...
for subset in subsets2:
edit_ops.add(((i,), subset))
# add {v_i -> del}
edit_ops.add(((i,), (-1,)))
# add {v_i+v_i+1 -> u_j}, ...
for subset in subsets1:
if i in subset:
for j in graph2_unmatched_nodes:
edit_ops.add((subset, (j,)))
return list(edit_ops)
@staticmethod
def _get_edge_crossing_cost(edit_path):
segments = []
for edit_op in edit_path:
for source in edit_op[0]:
for target in edit_op[1]:
segments.append((source, target))
cost = 0.0
for i in range(len(segments)):
for j in range(i+1, len(segments)):
# all segments have the same orientation (going down from source to target).
if (segments[i][0] < segments[j][0] and segments[i][1] > segments[j][1]) or \
(segments[i][0] > segments[j][0] and segments[i][1] < segments[j][1]):
cost += 1.0
return cost
def _get_merge_op_cost(self, merge_nodes, target):
graph2_label = self._get_label(2, target)
min_merge_cost = 1
for permutation in permutations(merge_nodes):
graph1_labels = [self._get_label(1, node) for node in permutation]
graph1_label = ' '.join(graph1_labels)
permutation_merge_cost = 1 - self.sequence_matcher.get_match_score(graph1_label, graph2_label)
if permutation_merge_cost < min_merge_cost:
min_merge_cost = permutation_merge_cost
return min_merge_cost
def _get_split_op_cost(self, source, split_nodes):
graph1_label = self._get_label(1, source)
min_split_cost = 1
for permutation in permutations(split_nodes):
graph2_labels = [self._get_label(2, node) for node in permutation]
graph2_label = ' '.join(graph2_labels)
permutation_split_cost = 1 - self.sequence_matcher.get_match_score(graph1_label, graph2_label)
if permutation_split_cost < min_split_cost:
min_split_cost = permutation_split_cost
return min_split_cost
def _get_edit_path_cost(self, edit_path):
cost = 0
for edit_op in edit_path:
# node insertion
if edit_op[0] == (-1,):
cost += 1
# node deletion
elif edit_op[1] == (-1,):
cost += 1
# node substitution
elif len(edit_op[0]) == len(edit_op[1]):
graph1_label = self._get_label(1, edit_op[0][0])
graph2_label = self._get_label(2, edit_op[1][0])
substitution_cost = 1 - self.sequence_matcher.get_match_score(graph1_label, graph2_label)
cost += substitution_cost
# node merging
elif len(edit_op[0]) > 1:
min_merge_cost = self._get_merge_op_cost(edit_op[0], edit_op[1][0])
cost += min_merge_cost * len(edit_op[0])
# node splitting
elif len(edit_op[1]) > 1:
min_split_cost = self._get_split_op_cost(edit_op[0][0], edit_op[1])
cost += min_split_cost * len(edit_op[1])
else:
raise RuntimeError(
"get_edit_op_cost: edit op does not match any edit type: {}".format(edit_op)
)
edge_crossing_cost = self._get_edge_crossing_cost(edit_path)
return cost + edge_crossing_cost
def _get_heuristic_cost(self, edit_path):
# graph1_curr_nodes, graph2_curr_nodes = self._get_matched_nodes(edit_path)
# heuristic_cost = num_graph1_unmatched_nodes + num_graph2_unmatched_nodes
# num_graph1_unmatched_nodes = graph1.number_of_nodes() - len(graph1_curr_nodes)
# num_graph2_unmatched_nodes = graph2.number_of_nodes() - len(graph2_curr_nodes)
return 0
def _get_curr_edit_path_string(self, edit_path):
result = []
for edit_op in edit_path:
source = ','.join([self._get_label(1, node) if node != -1 else '-' for node in edit_op[0]])
target = ','.join([self._get_label(2, node) if node != -1 else '-' for node in edit_op[1]])
result.append('[{}]->[{}]'.format(source, target))
return ', '.join(result)
def _is_isomorphic_graphs(self):
nm = iso.categorical_node_match('label', '')
em = iso.numerical_edge_match('weight', 1)
return nx.is_isomorphic(self.graph1, self.graph2,
node_match=nm, edge_match=em)
@staticmethod
def get_edit_op_count(edit_path):
edit_op_counts = {
"insertion": 0,
"deletion": 0,
"substitution": 0,
"merging": 0,
"splitting": 0
}
for edit_op in edit_path:
if edit_op[0] == (-1,):
edit_op_counts["insertion"] += 1
elif edit_op[1] == (-1,):
edit_op_counts["deletion"] += 1
elif len(edit_op[1]) > 1:
edit_op_counts["splitting"] += 1
elif len(edit_op[0]) == len(edit_op[1]) == 1:
edit_op_counts["substitution"] += 1
elif len(edit_op[0]) > 1:
edit_op_counts["merging"] += 1
else:
raise RuntimeError("_get_edit_op_type: edit op type was not identified: {}".format(edit_op))
return edit_op_counts
@staticmethod
def _get_all_subsets(ss):
subsets = chain(*map(lambda x: combinations(ss, x), range(0, len(ss) + 1)))
subsets = [subset for subset in subsets if len(subset) > 0]
return subsets
@staticmethod
def _get_matched_nodes(edit_path):
graph1_matched_nodes, graph2_matched_nodes = [], []
for (graph1_nodes, graph2_nodes) in edit_path:
graph1_matched_nodes.extend([node for node in graph1_nodes if node != -1])
graph2_matched_nodes.extend([node for node in graph2_nodes if node != -1])
return graph1_matched_nodes, graph2_matched_nodes
def set_graphs(self, graph1, graph2):
self.graph1 = graph1
self.graph2 = graph2
def a_star_search(self, debug=False):
assert self.graph1 and self.graph2
if self._is_isomorphic_graphs():
self.graph1, self.graph2 = None, None
return [], "", 0, 0, 0, 0
found_best_path = False
queue = PriorityQueue()
edit_ops = self._get_edit_ops([])
for edit_op in edit_ops:
queue.put([edit_op],
self._get_edit_path_cost([edit_op]) +
self._get_heuristic_cost([edit_op]))
num_ops = len(edit_ops)
while True:
if queue.empty():
raise RuntimeError("a_star_search: could not find a complete edit path.")
curr_cost, curr_edit_path = queue.get()
graph1_curr_nodes, graph2_curr_nodes = self._get_matched_nodes(curr_edit_path)
if len(graph1_curr_nodes) < self.graph1.number_of_nodes():
edit_ops = self._get_edit_ops(curr_edit_path)
for edit_op in edit_ops:
curr_edit_path_extended = curr_edit_path + [edit_op]
queue.put(curr_edit_path_extended,
self._get_edit_path_cost(curr_edit_path_extended) +
self._get_heuristic_cost(curr_edit_path_extended))
num_ops += len(edit_ops)
elif len(graph2_curr_nodes) < self.graph2.number_of_nodes():
edit_ops = [((-1,), (node,)) for node in self.graph2.nodes
if node not in graph2_curr_nodes]
curr_edit_path_extended = curr_edit_path + edit_ops
queue.put(curr_edit_path_extended,
self._get_edit_path_cost(curr_edit_path_extended) +
self._get_heuristic_cost(curr_edit_path_extended))
num_ops += len(edit_ops)
elif debug:
if not found_best_path:
found_best_path = True
best_edit_path, best_cost = curr_edit_path, curr_cost
num_paths_true = queue.count() + 1
num_ops_true = num_ops
num_ops = 0
elif not queue.empty():
continue
else:
best_edit_path_string = self._get_curr_edit_path_string(best_edit_path)
explored_ops_ratio = (num_ops_true * 100.0) / (num_ops_true + num_ops)
# print("explored {:.2f}% of the ops.".format(explored_ops_ratio))
self.graph1, self.graph2 = None, None
return best_edit_path, best_edit_path_string, best_cost, num_paths_true, num_ops_true,\
explored_ops_ratio
else:
curr_edit_path_string = self._get_curr_edit_path_string(curr_edit_path)
num_paths = queue.count() + 1 # +1 for the current path we just popped
self.graph1, self.graph2 = None, None
return curr_edit_path, curr_edit_path_string, curr_cost, num_paths, num_ops, None
def get_ged_plus_score(idx, graph1, graph2, exclude_thr, debug):
if exclude_thr and \
(graph1.number_of_nodes() > exclude_thr or
graph2.number_of_nodes() > exclude_thr):
print(f"skipping example: {idx}")
return idx, None, None, None, None, None, None
a_start_searcher = AStarSearcher()
a_start_searcher.set_graphs(graph1, graph2)
curr_edit_path, _, curr_cost, curr_num_paths, curr_num_ops, curr_ops_ratio = \
a_start_searcher.a_star_search(debug=debug)
curr_edit_op_counts = a_start_searcher.get_edit_op_count(curr_edit_path)
return idx, curr_edit_path, curr_cost, curr_num_paths, curr_num_ops,\
curr_ops_ratio, curr_edit_op_counts
def get_ged_plus_scores(decomposition_graphs, gold_graphs,
exclude_thr=None, debug=False, num_processes=5):
samples = list(zip(decomposition_graphs, gold_graphs))
pool = Pool(num_processes)
pbar = ProgressBar(widgets=[SimpleProgress()], maxval=len(samples)).start()
results = []
_ = [pool.apply_async(get_ged_plus_score,
args=(i, samples[i][0], samples[i][1], exclude_thr, debug),
callback=results.append)
for i in range(len(samples))]
while len(results) < len(samples):
pbar.update(len(results))
pbar.finish()
pool.close()
pool.join()
edit_op_counts = {
"insertion": 0,
"deletion": 0,
"substitution": 0,
"merging": 0,
"splitting": 0
}
idxs, scores_tmp, num_paths, num_ops, ops_ratio = [], [], [], [], []
for result in results:
idx, curr_edit_path, curr_cost, curr_num_paths, curr_num_ops, curr_ops_ratio, curr_edit_op_counts = result
idxs.append(idx)
scores_tmp.append(curr_cost)
if not curr_cost:
continue
num_paths.append(float(curr_num_paths))
num_ops.append(float(curr_num_ops))
if debug:
ops_ratio.append(curr_ops_ratio)
for op in curr_edit_op_counts:
edit_op_counts[op] += curr_edit_op_counts[op]
scores = [score for (idx, score) in sorted(zip(idxs, scores_tmp))]
print("edit op statistics:", edit_op_counts)
print("number of explored paths: mean {:.2}, min {:.2}, max {:.2}".format(
np.mean(num_paths), np.min(num_paths), np.max(num_paths)))
print("number of edit ops: mean {:.2}, min {:.2}, max {:.2}".format(
np.mean(num_ops), np.min(num_ops), np.max(num_ops)))
if debug:
print("explored ops ratio: mean {:.2}, min {:.2}, max {:.2}".format(
np.mean(ops_ratio), np.min(ops_ratio), np.max(ops_ratio)))
return scores
| break-evaluator-master | evaluation/graph_matcher.py |
import matplotlib.pyplot as plt
import networkx as nx
import re
from utils.graph import get_graph_levels
class Decomposition(object):
def __init__(self, decomposition_list):
self.decomposition_list = [str(step) for step in decomposition_list]
def _get_graph_edges(self):
edges = []
for i, step in enumerate(self.decomposition_list):
references = self._get_references_ids(step)
step_edges = [(i+1, ref) for ref in references]
edges.extend(step_edges)
return edges
def to_string(self):
return " @@SEP@@ ".join([x.replace(" ", " ").strip() for x in self.decomposition_list])
@staticmethod
def _get_references_ids(step):
return [int(x) for x in re.findall(r"@@(\d+)@@", step)]
@staticmethod
def from_str(text, sep="@@SEP@@"):
decomposition_list = [re.sub(r"\s+", " ", x.strip()) for x in re.split(sep, text, flags=re.IGNORECASE)]
return Decomposition(decomposition_list)
@staticmethod
def from_tokens(decomposition_tokens, sep="@@SEP@@"):
decomposition_str = ' '.join(decomposition_tokens)
return Decomposition.from_str(decomposition_str, sep)
def to_graph(self, nodes_only=False):
# initiate a directed graph
graph = nx.DiGraph()
# add edges
if nodes_only:
edges = []
else:
edges = self._get_graph_edges()
graph.add_edges_from(edges)
# add nodes
nodes = self.decomposition_list
for i in range(len(nodes)):
graph.add_node(i+1, label=nodes[i])
# handle edge cases where artificial nodes need to be added
for node in graph.nodes:
if 'label' not in graph.nodes[node]:
graph.add_node(node, label='')
return graph
@staticmethod
def from_graph(graph: nx.DiGraph):
decomposition_list = [graph.nodes[i+1]["label"] for i in range(graph.number_of_nodes())]
return Decomposition(decomposition_list)
def draw_decomposition(self):
graph = self.to_graph(False)
draw_decomposition_graph(graph)
def draw_decomposition_graph(graph, title=None, pos=None):
options = {
'node_color': 'lightblue',
'node_size': 400,
'width': 1,
'arrowstyle': '-|>',
'arrowsize': 14,
}
if not pos:
try:
_, levels = get_graph_levels(graph)
pos = {}
max_num_nodes_per_layer = max([len(levels[l]) for l in levels]) + 1
for l in levels.keys():
num_layer_nodes = len(levels[l])
if num_layer_nodes == 0: continue
space_factor = max_num_nodes_per_layer // (num_layer_nodes + 1)
for i, n_id in enumerate(sorted(levels[l])):
pos[n_id] = ((i + 1) * space_factor, l)
except: # cyclic
print("a cyclic graph - change layout (no-layers)")
pos = nx.spring_layout(graph, k=0.5)
nx.draw_networkx(graph, pos=pos, arrows=True, with_labels=True, **options)
for node in graph.nodes:
plt.text(pos[node][0], pos[node][1]-0.1,
s=re.sub(r'@@(\d+)@@', r'#\g<1>', graph.nodes[node]['label']),
bbox=dict(facecolor='red', alpha=0.5),
horizontalalignment='center',
wrap=True, size=6)
if title:
plt.title(title)
plt.axis("off")
plt.show()
plt.clf()
| break-evaluator-master | evaluation/decomposition.py |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SARI score for evaluating paraphrasing and other text generation models.
The score is introduced in the following paper:
Optimizing Statistical Machine Translation for Text Simplification
Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen and Chris Callison-Burch
In Transactions of the Association for Computational Linguistics (TACL) 2015
http://cs.jhu.edu/~napoles/res/tacl2016-optimizing.pdf
This implementation has two differences with the GitHub [1] implementation:
(1) Define 0/0=1 instead of 0 to give higher scores for predictions that match
a target exactly.
(2) Fix an alleged bug [2] in the deletion score computation.
[1] https://github.com/cocoxu/simplification/blob/master/SARI.py
(commit 0210f15)
[2] https://github.com/cocoxu/simplification/issues/6
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
# import tensorflow as tf
# The paper that intoduces the SARI score uses only the precision of the deleted
# tokens (i.e. beta=0). To give more emphasis on recall, you may set, e.g.,
# beta=1.
BETA_FOR_SARI_DELETION_F_MEASURE = 0
def _get_ngram_counter(ids, n):
"""Get a Counter with the ngrams of the given ID list.
Args:
ids: np.array or a list corresponding to a single sentence
n: n-gram size
Returns:
collections.Counter with ID tuples as keys and 1s as values.
"""
# Remove zero IDs used to pad the sequence.
ids = [token_id for token_id in ids if token_id != 0]
ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)]
ngrams = set(ngram_list)
counts = collections.Counter()
for ngram in ngrams:
counts[ngram] = 1
return counts
def _get_fbeta_score(true_positives, selected, relevant, beta=1):
"""Compute Fbeta score.
Args:
true_positives: Number of true positive ngrams.
selected: Number of selected ngrams.
relevant: Number of relevant ngrams.
beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.
Returns:
Fbeta score.
"""
precision = 1
if selected > 0:
precision = true_positives / selected
if beta == 0:
return precision
recall = 1
if relevant > 0:
recall = true_positives / relevant
if precision > 0 and recall > 0:
beta2 = beta * beta
return (1 + beta2) * precision * recall / (beta2 * precision + recall)
else:
return 0
def get_addition_score(source_counts, prediction_counts, target_counts):
"""Compute the addition score (Equation 4 in the paper)."""
added_to_prediction_counts = prediction_counts - source_counts
true_positives = sum((added_to_prediction_counts & target_counts).values())
selected = sum(added_to_prediction_counts.values())
# Note that in the paper the summation is done over all the ngrams in the
# output rather than the ngrams in the following set difference. Since the
# former does not make as much sense we compute the latter, which is also done
# in the GitHub implementation.
relevant = sum((target_counts - source_counts).values())
return _get_fbeta_score(true_positives, selected, relevant)
def get_keep_score(source_counts, prediction_counts, target_counts):
"""Compute the keep score (Equation 5 in the paper)."""
source_and_prediction_counts = source_counts & prediction_counts
source_and_target_counts = source_counts & target_counts
true_positives = sum((source_and_prediction_counts &
source_and_target_counts).values())
selected = sum(source_and_prediction_counts.values())
relevant = sum(source_and_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant)
def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0):
"""Compute the deletion score (Equation 6 in the paper)."""
source_not_prediction_counts = source_counts - prediction_counts
source_not_target_counts = source_counts - target_counts
true_positives = sum((source_not_prediction_counts &
source_not_target_counts).values())
selected = sum(source_not_prediction_counts.values())
relevant = sum(source_not_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant, beta=beta)
def get_sari_score(source_ids, prediction_ids, list_of_targets,
max_gram_size=4, beta_for_deletion=0):
"""Compute the SARI score for a single prediction and one or more targets.
Args:
source_ids: a list / np.array of SentencePiece IDs
prediction_ids: a list / np.array of SentencePiece IDs
list_of_targets: a list of target ID lists / np.arrays
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
beta_for_deletion: beta for deletion F score.
Returns:
the SARI score and its three components: add, keep, and deletion scores
"""
addition_scores = []
keep_scores = []
deletion_scores = []
for n in range(1, max_gram_size + 1):
source_counts = _get_ngram_counter(source_ids, n)
prediction_counts = _get_ngram_counter(prediction_ids, n)
# All ngrams in the targets with count 1.
target_counts = collections.Counter()
# All ngrams in the targets with count r/num_targets, where r is the number
# of targets where the ngram occurs.
weighted_target_counts = collections.Counter()
num_nonempty_targets = 0
for target_ids_i in list_of_targets:
target_counts_i = _get_ngram_counter(target_ids_i, n)
if target_counts_i:
weighted_target_counts += target_counts_i
num_nonempty_targets += 1
for gram in weighted_target_counts.keys():
weighted_target_counts[gram] /= num_nonempty_targets
target_counts[gram] = 1
keep_scores.append(get_keep_score(source_counts, prediction_counts,
weighted_target_counts))
deletion_scores.append(get_deletion_score(source_counts, prediction_counts,
weighted_target_counts,
beta_for_deletion))
addition_scores.append(get_addition_score(source_counts, prediction_counts,
target_counts))
avg_keep_score = sum(keep_scores) / max_gram_size
avg_addition_score = sum(addition_scores) / max_gram_size
avg_deletion_score = sum(deletion_scores) / max_gram_size
sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0
return sari, avg_keep_score, avg_addition_score, avg_deletion_score
def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4):
"""Computes the SARI scores from the given source, prediction and targets.
Args:
source_ids: A 2D tf.Tensor of size (batch_size , sequence_length)
prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length)
target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets,
sequence_length)
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
Returns:
A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and
the keep, addition and deletion scores.
"""
# def get_sari_numpy(source_ids, prediction_ids, target_ids):
"""Iterate over elements in the batch and call the SARI function."""
sari_scores = []
keep_scores = []
add_scores = []
deletion_scores = []
# Iterate over elements in the batch.
for source_ids_i, prediction_ids_i, target_ids_i in zip(
source_ids, prediction_ids, target_ids):
sari, keep, add, deletion = get_sari_score(
source_ids_i, prediction_ids_i, target_ids_i, max_gram_size,
BETA_FOR_SARI_DELETION_F_MEASURE)
sari_scores.append(sari)
keep_scores.append(keep)
add_scores.append(add)
deletion_scores.append(deletion)
return (np.asarray(sari_scores), np.asarray(keep_scores),
np.asarray(add_scores), np.asarray(deletion_scores))
# sari, keep, add, deletion = tf.py_func(
# get_sari_numpy,
# [source_ids, prediction_ids, target_ids],
# [tf.float64, tf.float64, tf.float64, tf.float64])
# return sari, keep, add, deletion
| break-evaluator-master | evaluation/sari_hook.py |
import spacy
from edit_distance import SequenceMatcher
from tqdm import tqdm
class SequenceMatchScorer(object):
def __init__(self, remove_stop_words):
self.parser = spacy.load('en_core_web_sm', disable=['ner'])
self.remove_stop_words = remove_stop_words
# TODO: extend the default stop words list?
def clean_base(self, text):
parsed = self.parser(text)
res = []
for i in range(len(parsed)):
if not self.remove_stop_words or not parsed[i].is_stop:
res.append(parsed[i].lemma_)
return res
@staticmethod
def clean_structural(text):
return [token for token in text.split(' ') if token.startswith('@@')]
def get_match_score(self, prediction, target, processing="base"):
assert processing in ["base", "structural"]
if processing == "structural":
prediction_clean = self.clean_structural(prediction)
target_clean = self.clean_structural(target)
if prediction_clean == [] and target_clean == []:
return 1.0
else:
prediction_clean = self.clean_base(prediction)
target_clean = self.clean_base(target)
sm = SequenceMatcher(a=prediction_clean, b=target_clean)
# editdistance workaround on empty sequences
if not prediction_clean and not target_clean:
return 1
return sm.ratio()
def get_match_scores(self, predictions, targets, processing):
scores = []
num_examples = len(predictions)
for i in tqdm(range(num_examples)):
score = self.get_match_score(predictions[i], targets[i], processing)
scores.append(score)
return scores
| break-evaluator-master | evaluation/sequence_matcher.py |
from __future__ import annotations
from typing import Callable
from abc import ABC, abstractmethod
import os
import re
import networkx as nx
import spacy
from spacy.tokens.token import Token
import _pickle as pk
import logging
from evaluation.decomposition import Decomposition
_logger = logging.getLogger(__name__)
class ReferenceToken(object):
def __init__(self, text, pos="NOUN", tag="NNS", i_min=None, i_max=None):
self.text = self.lemma_ = text
self.pos_ = pos
self.tag_ = tag
self.i_min = i_min
self.i_max = i_max
def __str__(self):
return self.text
def __repr__(self):
return self.__str__()
def contains_index(self, i:int) -> bool:
return (self.i_min is not None) and (self.i_max is not None) and (self.i_min <= i <= self.i_max)
def get_id(self):
return self.get_reference_id(self.text)
@staticmethod
def is_reference(text):
return re.match(r"^@@\d+@@$",text)
@staticmethod
def get_reference_id(text):
return int(text.replace("@", ""))
@staticmethod
def from_token(text:str, token:Token):
rf = ReferenceToken.from_span(text, span=[token])
return rf
@staticmethod
def from_span(text: str, span: [Token | ReferenceToken], **kwargs):
if not span:
raise ValueError("span must be non empty tokens list")
i_min = span[0].i_min if isinstance(span[0], ReferenceToken) else span[0].i
i_max = span[-1].i_max if isinstance(span[-1], ReferenceToken) else span[-1].i
rf = ReferenceToken(text, i_min=i_min, i_max=i_max, **kwargs)
return rf
# todo: preserved dep_, head_?
cache = {}
def load_cache(path: str) -> {}:
global cache
if not os.path.exists(path):
_logger.warning(f"no available cache:{path}")
return
with open(path, 'rb') as f:
cache = pk.load(f)
def save_cache(path: str):
if os.path.exists(path):
_logger.warning(f"already exists cache:{path}")
return
with open(path, 'wb') as f:
pk.dump(cache, f)
def prepare_node(parser, node, mask_references=True):
def cached_parser(text:str):
if text in cache:
return cache[text]
res = parser(text)
cache[text] = res
return res
clean_label = re.sub(r"\s+", " ", node["label"]).strip().lower()
doc = cached_parser(clean_label)
ref_ids = {i: t.text for i, t in enumerate(doc) if ReferenceToken.is_reference(t.text)}
# replace @@#@@ with "objects" for stable parsing
if mask_references:
re_doc = cached_parser(" ".join([("objects" if i in ref_ids else doc[i].text) for i in range(len(doc))]))
if len(re_doc) == len(doc):
doc = re_doc
node["doc"] = [(t if i not in ref_ids else ReferenceToken.from_token(ref_ids[i], t)) for i, t in enumerate(doc)]
node.pop("label")
class DecomposeRule(ABC):
def __init__(self):
self.preserved_tokens = [] # self preserved tokens
self._preserved_tokens = [] # total preserved tokens on decompose
def __str__(self):
return type(self).__name__
def __repr__(self):
return self.__str__()
def decompose(self, node_id: int, graph: nx.DiGraph, preserved_tokens: [str] = None) -> [int]:
self._preserved_tokens = preserved_tokens and [t for t in preserved_tokens if t not in self.preserved_tokens]
node = graph.nodes[node_id]
if "label" in node:
return False, None
doc = node["doc"]
try:
return self._decompose(node_id=node_id, graph=graph, doc=doc)
except:
_logger.exception(f"Decomposition Error: {self._get_doc_str(doc=doc)}")
return False, None
@abstractmethod
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
# todo: doc might contain 'ReferenceToken' which is not token
raise NotImplementedError
@staticmethod
def _get_doc_str(doc: [Token or ReferenceToken]):
return ' '.join([d.text for d in doc])
@staticmethod
def _is_reference(token: Token) -> bool:
return token.text.startswith("@@")
@staticmethod
def _get_reference(node_id: int, span: [Token|ReferenceToken]) -> ReferenceToken:
return ReferenceToken.from_span(f"@@{node_id}@@", span)
@staticmethod
def _add_neighbor(node_id, graph: nx.DiGraph, is_node_to_new: bool = True, **neighbor):
new_node_id = graph.number_of_nodes()+1
graph.add_node(new_node_id, **neighbor)
if is_node_to_new:
graph.add_edge(node_id, new_node_id)
else:
# update predecessors
for p in graph.predecessors(node_id):
graph.remove_edge(p, node_id)
graph.add_edge(p, new_node_id)
graph.add_edge(new_node_id, node_id)
return new_node_id
@staticmethod
def update_sucessors(graph, n_id, doc:[Token]):
refs = Decomposition._get_references_ids(" ".join([t.text for t in doc]))
graph.remove_edges_from([(n_id, s_id) for s_id in graph.successors(n_id)])
graph.add_edges_from([(n_id, r) for r in refs])
def extract_spans(self, node_id: int, graph: nx.DiGraph, doc, spans: [(int,int)], is_transactional:bool=False):
"""
Extract spans (inclusive) from doc to a new node, and point it.
Extraction is not allowed if:
* the span is already a reference
* the span is the entire doc
* the span violates preserved tokens (see decompose())
:param node_id:
:param graph:
:param doc:
:param spans:
:param is_transactional:
:return: a tuple extracted_refs, new_ids
extracted_refs: a list of tuples: [ref_value] where its None for no-extracted and ReferenceToken for extracted
new_ids: new nodes ids
"""
# todo: distinguish between: cannot extract - its a reference, and cannot extract - its invalidate,
# and partial extraction
if not spans:
return [],[]
for start, end in spans:
if start > end:
raise ValueError(f"invalid span [{start},{end}]")
extraction_status = []
for start, end in spans:
if (start, end) == (0, len(doc)-1) or not self._validate_preserved(doc=doc, span=(start, end)):
extraction_status.append([False, None])
if is_transactional:
return [None]*len(spans), []
elif start == end and isinstance(doc[start], ReferenceToken):
extraction_status.append([False, doc[start]])
else:
extraction_status.append([True, None]) # place holder
new_doc = []
new_ids = []
last_i = 0
for cur_span, res in zip(spans,extraction_status):
if not res[0]:
continue
new_doc.extend(doc[last_i:cur_span[0]])
# todo: if span contains reference it should
neighbor_doc = doc[cur_span[0]:cur_span[1] + 1]
id = self._add_neighbor(node_id=node_id, graph=graph, doc=neighbor_doc,
is_node_to_new=True)
self.update_sucessors(graph, id, neighbor_doc)
new_ids.append(id)
ref = self._get_reference(id, neighbor_doc)
res[1] = ref
new_doc.append(ref)
last_i = cur_span[1] + 1
new_doc.extend(doc[last_i:])
self.update_sucessors(graph, node_id, new_doc)
graph.nodes[node_id]["doc"] = new_doc
return [ref for _, ref in extraction_status], new_ids
def _validate_preserved(self, doc: [Token|ReferenceToken], span:(int, int)) -> bool:
if not self._preserved_tokens:
return True
start, end = span
span_str = self._get_doc_str(doc[start: end+1])
for preserved in self._preserved_tokens:
preserved_strip = preserved.strip()
if span_str == preserved_strip:
return False
preserved_tokens = preserved_strip.split(' ')
preserved_len = len(preserved_tokens)
prev_offset = 1 if preserved.startswith(' ') and start>0 else 0 # make sure its not: [preserved ...]
post_offset = 1 if preserved.endswith(' ') and end+1<len(doc) else 0 # make sure its not: [... preserved]
"""
preser[ved ...]:
violate starting at [s-(len-1), s-1] (=> ends at [s-(len-1)+(len-1), s-1+(len-1)])
=> not in [s-(len-1), s+(len-1)-1]
"""
prev = self._get_doc_str(doc[max(0,start-(preserved_len-1)):
min(len(doc), start+(preserved_len-1) + prev_offset)])
"""
[... preser]ved:
violate ending at [e+1, e+(len-1)] (=> starts at [e+1-(len-1), e+(len-1)-(len-1)])
=> not in [e+1-(len-1), e+(len-1)]
"""
post = self._get_doc_str(doc[max(0,end+1-(preserved_len-1)-post_offset):
min(len(doc),end+preserved_len)])
if (f" {preserved_strip} " in f" {prev} ") or (f" {preserved_strip} " in f" {post} "):
return False
return True
def _get_test_cases__graphic(self) -> [((int, dict), ([(int, dict)], [(int, int)]))]:
return []
def _get_test_cases__str(self) -> (str, [str]):
return []
def _test(self):
parser = spacy.load('en_core_web_sm', disable=['ner'])
def graphic_test_create_graphs(inp, output: [((int, dict),([(int,dict)], [(int,int)]))]):
node_id, node_att = inp
in_g = nx.DiGraph()
in_g.add_node(node_id, **node_att)
out_nodes, out_edges = output
out_g = nx.DiGraph()
out_g.add_nodes_from(out_nodes)
out_g.add_edges_from(out_edges)
return in_g, out_g
def str_test_create_graphs(inp: str, output:[str]):
in_g = Decomposition([inp]).to_graph()
out_g = Decomposition(output).to_graph()
return in_g, out_g
def run_test(tests, create_graphs):
for i, test_case in enumerate(tests):
inp, output = test_case
in_g, out_g = create_graphs(inp, output)
for node in in_g.nodes.values():
prepare_node(parser=parser, node=node)
norm_g = in_g.copy()
self.decompose(1, norm_g)
def compare_nodes(n1, n2):
if "label" not in n2 and "doc" in n2:
n2["label"] = " ".join([t.text for t in n2["doc"]])
for k in n1:
if k not in n2 or n1[k].lower() != n2[k].lower():
return False
return True
def print_case():
return "\n".join([
f"{str(self)}:",
f"test: {list(in_g.nodes.values())}",
f"expected: {list(out_g.nodes.values())}",
f"norm: {list(norm_g.nodes.values())}"
])
assert nx.algorithms.is_isomorphic(out_g, norm_g, node_match=compare_nodes), print_case()
str_tests, graphic_tests = self._get_test_cases__str(), self._get_test_cases__graphic()
run_test(tests=graphic_tests, create_graphs=graphic_test_create_graphs)
run_test(tests=str_tests, create_graphs=str_test_create_graphs)
class NounsExtractionDecomposeRule(DecomposeRule):
def __init__(self):
super().__init__()
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
noun_components = []
i = 0
while i < len(doc):
while i < len(doc) and not doc[i].tag_.startswith("NN"):
i += 1
first = i
while i < len(doc) and doc[i].tag_.startswith("NN"):
i += 1
last = i-1
if first < last or (first == last and not isinstance(doc[first], ReferenceToken)):
noun_components.append((first, last))
# todo: break spans on preserved words? eg: object *blocking* (both are NN, but since blocking is preserved
# the extraction fails...)
_, new_ids = self.extract_spans(node_id=node_id, graph=graph, doc=doc, spans=noun_components)
return len(new_ids)>0, new_ids
def _get_test_cases__str(self) -> (str, [str]):
return [
("blue cubes", ["blue @@2@@", "cubes"]),
("cubes", ["cubes"]),
("@@1@@", ["@@1@@"]),
("@@1@@ in texas", ["@@1@@ in @@2@@", "texas"]),
("river in @@1@@", ["@@2@@ in @@1@@", "river"]),
("@@1@@ on VLDB conference", ["@@1@@ on @@2@@", "VLDB conference"]),
#("number of papers", ["number of @@2@@", "papers"]),
]
class CompoundNounExtractionDecomposeRule(DecomposeRule):
""" Assumption: Only nouns """
def __init__(self):
super().__init__()
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
# todo: fix func returned value: bool, [int]
# NN -[compund]-> NN -[compound]-> ...
if len(doc) < 2:
return False, None
for token in doc:
if not token.tag_.startswith("NN"):
return False, None
for token in doc[:-1]:
if isinstance(token, ReferenceToken) or token.dep_ not in ["compound","amod"]:
return False, None
cur_node_id = node_id
cur_doc = doc
total_added_ids = []
while len(cur_doc) > 1:
extracted_refs, new_ids = self.extract_spans(node_id=cur_node_id, graph=graph, doc=cur_doc,
spans=[(1,len(cur_doc)-1)], is_transactional=True)
if None in extracted_refs:
_logger.warning(f"could not decompose: {cur_doc}")
break
total_added_ids.extend(new_ids)
cur_node_id = extracted_refs[0].get_id()
cur_doc = cur_doc[1:]
return len(total_added_ids)>0, total_added_ids
def _get_test_cases__str(self) -> (str, [str]):
return [
("gold metal ball", ["gold @@2@@", "metal @@3@@", "ball"]),
("gold metal @@1@@", ["gold @@2@@", "metal @@1@@"]),
]
class RemoveByConditionDecomposeRule(DecomposeRule):
def __init__(self, condition: Callable[[Token], bool]):
super().__init__()
self.condition = condition
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
to_keep = [token for i,token in enumerate(doc) if (isinstance(token, ReferenceToken) or not self.condition(token)
or not self._validate_preserved(doc=doc, span=(i,i)))]
if len(to_keep) < len(doc):
graph.nodes[node_id]["doc"] = to_keep
return True, []
return False, None
class RemoveDETDecomposeRule(RemoveByConditionDecomposeRule):
def __init__(self):
super().__init__(condition=lambda token: token.pos_ == "DET" and token.head.lemma_ != "be")
def _get_test_cases__str(self) -> (str, [str]):
return [
("the population of @@1@@", ["population of @@1@@"]),
("cube that is blue", ["cube that is blue"]),
("@@2@@ that contain the keyword Relational Database", ["@@2@@ contain keyword Relational Database"]),
("is @@1@@ gold or green", ["is @@1@@ gold or green"]),
("is there any @@1@@", ["is there @@1@@"]),
("Is there any @@5@@", ["Is there @@5@@"]),
]
class AdjectiveDecomposeRule(DecomposeRule):
def __init__(self):
super().__init__()
# todo: should run after extract nouns? doesn't catch multi-noun phrase (e.g New York)
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
extract_spans = []
for i,token in enumerate(doc):
if token.pos_ == "ADJ":
if i < len(doc)-1 and isinstance(doc[i+1], ReferenceToken):# doc[i+1].tag_.startswith("NN"):
extract_spans.append((i,i+1))
elif i > 1 and doc[i-1].lemma_ == "be" and isinstance(doc[i-2], ReferenceToken): # doc[i-2].tag_.startswith("NN"):
# NN is ADJ
extract_spans.append((i-2,i))
elif i > 2 and doc[i-1].lemma_ == "be" and doc[i-2].pos_ == "DET" and isinstance(doc[i-3], ReferenceToken): # doc[i-3].tag_.startswith("NN"):
# NN that is ADJ
extract_spans.append((i-3,i))
if not extract_spans:
return False, None
_, new_ids = self.extract_spans(node_id=node_id, graph=graph, doc=doc, spans=extract_spans)
return len(new_ids)>0, new_ids
def _get_test_cases__str(self) -> (str, [str]):
return [
("@@1@@ behind the blue @@2@@", ["@@1@@ behind the @@3@@","", "blue @@2@@"]),
("@@1@@ behind the @@2@@ that is blue", ["@@1@@ behind the @@3@@","", "@@2@@ that is blue"]),
]
class AdjectiveLikeNounDecomposeRule(DecomposeRule):
def __init__(self):
super().__init__()
# todo: should run after extract nouns? doesn't catch multi-noun phrase (e.g New York)
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
extract_spans = []
for i,token in enumerate(doc):
if token.tag_.startswith("NN"):
if i > 1 and doc[i-1].lemma_ == "be" and isinstance(doc[i-2], ReferenceToken): # doc[i-2].tag_.startswith("NN"):
# NN is NN
extract_spans.append((i-2,i))
elif i > 2 and doc[i-1].lemma_ == "be" and doc[i-2].pos_ == "DET" and isinstance(doc[i-3], ReferenceToken): # doc[i-3].tag_.startswith("NN"):
# NN that is NN
extract_spans.append((i-3,i))
if not extract_spans:
return False, None
_, new_ids = self.extract_spans(node_id=node_id, graph=graph, doc=doc, spans=extract_spans)
return len(new_ids)>0, new_ids
def _get_test_cases__str(self) -> (str, [str]):
return [
("@@1@@ behind the @@2@@ that is metal", ["@@1@@ behind the @@3@@","", "@@2@@ that is metal"]),
("if any @@1@@ is gold", ["if any @@2@@", "@@1@@ is gold"]),
("if any @@1@@ is @@2@@", ["if any @@3@@","", "@@1@@ is @@2@@"]),
]
class ADPDecomposeRule(DecomposeRule):
def __init__(self):
super().__init__()
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
if len(doc)>=3 and isinstance(doc[0], ReferenceToken) and doc[1].pos_ in ["ADP", "PART"]:
end = 3
while end < len(doc) and doc[end].tag_.startswith("NN"):
end +=1
_, new_ids = self.extract_spans(node_id=node_id, graph=graph, doc=doc, spans=[(0,end-1)])
return len(new_ids) > 0, new_ids
else:
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("@@1@@ from london to paris", ["@@2@@ to paris", "@@1@@ from london"]),
]
def run_tests(cls_root=DecomposeRule):
import inspect
def all_subclasses(cls):
return sorted(set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)]), key=lambda c: c.__name__)
rules = [c() for c in all_subclasses(cls_root) if issubclass(c, ABC) and
all([(p.default != inspect.Parameter.empty or
p.kind in [inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD])
for p in list(inspect.signature(c.__init__).parameters.values())[1:]])]
for r in rules:
_logger.info(r)
r._test()
_logger.info(f"{len(rules)} rules ended")
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
run_tests()
| break-evaluator-master | evaluation/normal_form/normalization_rules.py |
from overrides import overrides
import networkx as nx
from queue import Queue, deque
import logging
import re
import spacy
from evaluation.decomposition import Decomposition, draw_decomposition_graph
from utils.graph import get_graph_levels
from evaluation.normal_form.normalization_rules import prepare_node
import evaluation.normal_form.normalization_rules as norm_rules
import evaluation.normal_form.operations_normalization_rules as op_norm_rules
from scripts.qdmr_to_program import QDMROperation
_logger = logging.getLogger(__name__)
class NormalizedGraphMatchScorer:
def __init__(self, extract_params=True):
super().__init__()
self.parser = spacy.load('en_core_web_sm', disable=['ner'])
self.rules = [
norm_rules.RemoveDETDecomposeRule(),
##op_norm_rules.AggregateDecomposeRule(),
op_norm_rules.FilterAdjectiveLikeNounDecomposeRule(is_extract_params=extract_params),
norm_rules.NounsExtractionDecomposeRule(),
norm_rules.ADPDecomposeRule(),
norm_rules.CompoundNounExtractionDecomposeRule(),
norm_rules.AdjectiveDecomposeRule(),
norm_rules.AdjectiveLikeNounDecomposeRule(),
op_norm_rules.FilterAdjectiveDecomposeRule(is_extract_params=extract_params),
op_norm_rules.FilterADPDecomposeRule(is_extract_params=extract_params),
op_norm_rules.FilterCompoundNounDecomposeRule(is_extract_params=extract_params),
op_norm_rules.FilterConditionDecomposeRule(is_extract_params=extract_params),
##op_norm_rules.SelectionDecomposeRule(is_extract_params=extract_params),
op_norm_rules.WrapperFixesAggregateDecomposeRule(is_extract_params=extract_params),
op_norm_rules.WrapperFixesBooleanDecomposeRule(is_extract_params=extract_params),
op_norm_rules.WrapperDecomposeRule(is_extract_params=extract_params),
]
self._preserved_tokens = [p for r in self.rules for p in r.preserved_tokens]
def normalized_decomposition(self, decomposition:Decomposition, verbose: bool = False) -> Decomposition:
norm_g = self.normalize_graph(graph=decomposition.to_graph(), verbose=verbose)
return Decomposition.from_graph(graph=norm_g)
def normalize_graph(self, graph: nx.DiGraph, verbose: bool = False) -> nx.DiGraph:
graph = graph.copy()
unvisited_nodes = Queue()
unvisited_nodes.queue = deque(graph.nodes.keys())
# init: dependencies parsing & POS
for node_id in unvisited_nodes.queue:
node = graph.nodes[node_id]
prepare_node(self.parser, node)
# decomposition rules
def run_rules(node_id, is_use_preserved=True):
is_decomposed = False
for rule in self.rules:
preserved_tokens = None
if is_use_preserved:
# todo: preserved words just for no-operational?
preserved_tokens = self._preserved_tokens #if (not isinstance(rule, op_norm_rules.OperationDecomposeRule)) else None
decomposed, added_nodes_ids = rule.decompose(node_id,graph, preserved_tokens=preserved_tokens)
if decomposed:
for id in added_nodes_ids: unvisited_nodes.put(id)
is_decomposed = True
if verbose:
copy = graph.copy()
self._update_labels_from_doc(graph=copy)
_logger.info(f"{rule}{' -reserved' if is_use_preserved else ''} (node: {node_id})\t{Decomposition.from_graph(graph=copy).to_string()}")
return is_decomposed
while not unvisited_nodes.empty():
node_id = unvisited_nodes.get()
run_rules(node_id, is_use_preserved=True)
run_rules(node_id, is_use_preserved=False)
# update "label" from "doc" if needed
self._update_labels_from_doc(graph=graph)
# re-order operation chain
self.reorder_operations(graph)
# re-order graph alphabetically
self.reorder(graph)
# todo: reorder args: intersection, union, ... args order
return graph
@staticmethod
def _update_labels_from_doc(graph):
for node in graph.nodes.values():
if "label" not in node:
node["label"] = " ".join([t.lemma_ for t in node["doc"]])
@staticmethod
def reorder_operations(graph: nx.DiGraph):
op_to_nodes = {op: {} for op in QDMROperation}
for node_id, node in graph.nodes.items():
op = node.get("operation", QDMROperation.NONE)
op_to_nodes[op][node_id] = node
# todo: work on a copy of graph (in case of failure)
NormalizedGraphMatchScorer.reorder_filters_chain(graph, op_to_nodes)
@staticmethod
def unwind_refs(graph, field="label"):
unwind = {}
_, levels = get_graph_levels(graph)
for l in sorted(levels.keys()):
for n_id in levels[l]:
new = re.sub(r'@@(\d+)@@', lambda x: f"{unwind[int(x.group(1))]}", graph.nodes[n_id][field])
unwind[n_id] = new
return unwind
@staticmethod
def reorder_filters_chain(graph, op_to_nodes):
filter_nodes = {k: v for k, v in op_to_nodes[QDMROperation.FILTER].items() if not v.get("meta", [])}
if not filter_nodes:
return
def get_args(node_id):
node = graph.nodes[node_id]
arg0, arg1 = re.match(r".*\((.*)\)", node["label"]).group(1).split(",")
assert norm_rules.ReferenceToken.is_reference(arg0)
return arg0, arg1
def head_criteria(node_id):
pred = list(graph.predecessors(node_id))
if len(pred) == 1 and pred[0] in filter_nodes:
arg0, _ = get_args(pred[0])
return norm_rules.ReferenceToken.get_reference_id(arg0) == node_id
return False
head_candidates = {k: v for k, v in filter_nodes.items() if head_criteria(k)}
visited = {}
for c_id, c in head_candidates.items():
try:
if c_id in visited:
continue
visited[c_id] = None
# find chain head
head_id = c_id
for i in range(len(head_candidates)+1):
if i == len(head_candidates):
raise ValueError("a cycle was detected")
arg0, arg1 = get_args(head_id)
arg0_id = norm_rules.ReferenceToken.get_reference_id(arg0)
if arg0_id == head_id:
raise ValueError("a cycle was detected")
if arg0_id not in head_candidates:
break
head_id = arg0_id
# find chain items
chain_items = []
cur_node_id = head_id
for i in range(len(filter_nodes)+1):
if i == len(filter_nodes) or cur_node_id in chain_items:
raise ValueError("a cycle was detected")
chain_items.append(cur_node_id)
if cur_node_id not in head_candidates:
break
cur_node_id = list(graph.predecessors(cur_node_id))[0]
for i in chain_items:
visited[i] = None
# reorder
unwind = None
def order_by_arg1(node_id):
nonlocal unwind
_, arg1 = get_args(node_id)
if norm_rules.ReferenceToken.is_reference(arg1):
arg1_id = norm_rules.ReferenceToken.get_reference_id(arg1)
unwind = unwind or NormalizedGraphMatchScorer.unwind_refs(graph)
return unwind[arg1_id]
#return graph.nodes[arg1_id]["label"]
return re.sub(r"@@(\d+)@@", "", arg1).replace(" ", " ")
sorted_chain = sorted(chain_items, key=lambda x: order_by_arg1(x))
head_arg0, head_arg1 = get_args(chain_items[0])
chain_successor = norm_rules.ReferenceToken.get_reference_id(head_arg0)
chain_predecessors = list(graph.predecessors(chain_items[-1]))
# remove current edges
graph.remove_edge(chain_items[0], chain_successor)
for i in range(len(chain_items)-1):
graph.remove_edge(chain_items[i+1], chain_items[i])
for p in chain_predecessors:
graph.remove_edge(p, chain_items[-1])
# add new edges
def update_arg0(node_id, value):
node = graph.nodes[node_id]
arg0, _ = get_args(node_id)
node["label"] = node["label"].replace(arg0, f"@@{value}@@")
graph.add_edge(node_id, value)
update_arg0(sorted_chain[0], chain_successor)
for i in range(len(sorted_chain)-1):
update_arg0(sorted_chain[i+1], sorted_chain[i])
for p in chain_predecessors:
graph.add_edge(p, sorted_chain[-1])
p_node = graph.nodes[p]
p_node["label"] = p_node["label"].replace(f"@@{chain_items[-1]}@@", f"@@{sorted_chain[-1]}@@")
except ValueError as ex:
_logger.warning("skip - reorder head in reorder filter chain")
@staticmethod
def reorder(graph: nx.DiGraph):
try:
_, levels = get_graph_levels(graph)
except Exception as ex:
_logger.warning("skip - Failed to invoke reorder()")
return
# order by levels
relabel_map = {}
next_node_id = 1
for level in sorted(levels.keys()):
nodes_by_label = {}
for n_id in levels[level]:
# for consistency
n = graph.nodes[n_id]
n["label"] = re.sub(r"@@(\d+)@@", lambda x: f"@@{relabel_map[int(x.group(1))]}@@", n["label"])
if n["label"] not in nodes_by_label:
nodes_by_label[n["label"]] = n_id
else:
# duplicate label
# safe merge since previous levels are fixed
exists_node_id = nodes_by_label[n["label"]]
# successors - should be the same due to label
predecessors = graph.predecessors(n_id)
for p_id in predecessors:
graph.add_edge(p_id, exists_node_id)
graph.nodes[p_id]["label"] = re.sub(f"@@{n_id}@@", lambda x: f"@@{exists_node_id}@@", graph.nodes[p_id]["label"])
graph.remove_node(n_id)
nodes_order = sorted(nodes_by_label.keys())
for label in nodes_order:
n_id = nodes_by_label[label]
relabel_map[n_id]=next_node_id
next_node_id += 1
# update labels
# double mapping since new and old labels are overlap
nx.relabel.relabel_nodes(graph, {k:str(v) for k,v in relabel_map.items()}, copy=False)
nx.relabel.relabel_nodes(graph, {str(v):v for v in relabel_map.values()}, copy=False)
# note: node "labels" are updated
def test_exact_match(html:str, dest=None, verbose = False):
import pandas as pd
import traceback
from model.model_base import ModelBase
import time
start_time = time.time()
df = pd.read_html(html)[0]
norm_g = NormalizedGraphMatchScorer()
def predictions_to_norm(df, qdmr_col, norm_col):
df[norm_col] = df[qdmr_col]
for index, row in df.iterrows():
dec = row[norm_col]
try:
decomposition = norm_g.normalized_decomposition(Decomposition.from_str(dec))
df.loc[index, norm_col] = decomposition.to_string()
except Exception as ex:
print(f"error in index {index}:{str(ex)}\n{dec}", flush=True)
traceback.print_exc()
df.loc[index, norm_col] = "ERROR"
norm_rules.load_cache(html.replace(".html", "__cache"))
predictions_to_norm(df, "gold", "gold_norm")
predictions_to_norm(df, "prediction", "prediction_norm")
norm_rules.save_cache(html.replace(".html", "__cache"))
df["raw exact match"] = df["gold"].str.lower() == df["prediction"].str.lower()
df["norm exact match"] = (df["gold_norm"] == df["prediction_norm"]) & (df["gold_norm"] != 'ERROR') & (
df["prediction_norm"] != 'ERROR')
print(html, flush=True)
print("--- %s minutes ---" % ((time.time() - start_time)/60))
print(f"rules:{norm_g.rules}")
diff = df[df["raw exact match"]!=df["norm exact match"]]
if verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(diff[["raw exact match", "norm exact match", "gold","prediction","gold_norm", "prediction_norm"]])
ModelBase.print_score_stats({
"raw exact match": df["raw exact match"].tolist(),
"norm exact match": df["norm exact match"].tolist()
})
if dest is not None:
df.to_html(dest)
diff.to_html(dest.replace(".html", "__diff.html"))
regression = df[(df["raw exact match"] == True) & (df["norm exact match"] == False)]
if len(regression.index) > 0:
message = f"regression: {len(regression.index)}"
print(message, flush=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(regression[["raw exact match", "norm exact match", "gold","prediction","gold_norm", "prediction_norm"]])
raise Exception(message)
return df
def compare_performances(df1, df2, suffixes:(str,str)=(' (1)',' (2)')):
import pandas as pd
s1, s2 = suffixes
assert s1 != s2
cols = ["question", "gold", "prediction","gold_norm", "prediction_norm", "raw exact match", "norm exact match"]
df1 = df1[cols].rename(columns={c:c+s1 for c in cols})
df2 = df2[cols[3:]].rename(columns={c:c+s2 for c in cols})
df = pd.concat([df1, df2], axis=1, sort=False)
diff = df[df["norm exact match"+s1]!=df["norm exact match"+s2]]
regression_amount = len(diff[(diff["norm exact match"+s1]==True) & (diff["norm exact match"+s2]==False)].index)
return diff, regression_amount
def compare_to(df, norm_html:str):
import pandas as pd
import os
df2 = pd.read_html(norm_html)[0]
diff, reg = compare_performances(df2, df, suffixes=('', ' (new)'))
diff.to_html(os.path.join(os.path.dirname(norm_html), f"compared_diff__{os.path.basename(norm_html)}"))
_logger.info(f"regression of: {reg}")
def plot_normal_decomposition(decomp: str):
normalizer = NormalizedGraphMatchScorer()
decomposition = Decomposition.from_str(decomp)
print("decomposition:", decomposition.to_string())
norm_decomposition = normalizer.normalized_decomposition(decomposition, verbose=True)
print("normal form:", norm_decomposition.to_string())
print("=========================================================")
draw_decomposition_graph(decomposition.to_graph(), title="decomposition")
draw_decomposition_graph(norm_decomposition.to_graph(), title="normal form")
def test(save: bool = False, verbose: bool = False):
save_path_random = "/media/disk1/matan/questions_decomp/_debug/qdecomp/tmp/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level__random_norm.html" if save else None
df_random = test_exact_match("/media/disk1/matan/questions_decomp/qdecomp/tmp/g02/tune/data_old_version/seq2seq-copynet--bert_low_level/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level/evals/dev_low_level_evals_full_samples/html/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level__random.html",
save_path_random,
verbose=verbose)
compare_to(df_random, "/media/disk1/matan/questions_decomp/_debug/qdecomp/tmp/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level__random_norm.html")
save_path_dev = "/media/disk1/matan/questions_decomp/_debug/qdecomp/tmp/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level__all_norm.html" if save else None
df_dev = test_exact_match("/media/disk1/matan/questions_decomp/qdecomp/tmp/g02/tune/data_old_version/seq2seq-copynet--bert_low_level/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level/evals/dev_low_level_evals_full_samples/html/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level__all.html",
save_path_dev
)
compare_to(df_dev, "/media/disk1/matan/questions_decomp/_debug/qdecomp/tmp/seq2seq-copynet--bert_layers2_lr0.001_hd450_dop0.2_low_level__all_norm.html")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(message)s')
test()
#plot_normal_decomposition("") | break-evaluator-master | evaluation/normal_form/normalized_graph_matcher.py |
from abc import ABC
import logging
import networkx as nx
from spacy.tokens.token import Token
from scripts.qdmr_to_program import QDMROperation
import scripts.qdmr_to_program as qdmr
from evaluation.normal_form.normalization_rules import DecomposeRule, ReferenceToken, run_tests
_logger = logging.getLogger(__name__)
class OperationDecomposeRule(DecomposeRule, ABC):
def __init__(self, is_extract_params=False):
super().__init__()
self.operation:QDMROperation = None
self.is_extract_params = is_extract_params
def update_node(self, node_id, graph, params_span:[(int, int)], meta:[str]=[], keep_order: bool=True):
node = graph.nodes[node_id]
for start,end in params_span:
if not self._validate_preserved(doc=node["doc"], span=(start, end)):
return False, None
if self.is_extract_params:
extracted_refs, new_ids = self.extract_spans(node_id=node_id, graph=graph, doc=node["doc"], spans=params_span, is_transactional=True)
if None in extracted_refs:
return False, None
self._update_node(node=node, params=[[x] for x in extracted_refs], meta=meta)
return True, new_ids or []
self._update_node(node=node, params=[node["doc"][s:e+1] for (s,e) in params_span], meta=meta, keep_order=keep_order)
return True, []
def _update_node(self, node, params:[[Token]], meta:[str]=[], keep_order: bool=True):
params = [' '.join([t.lemma_ for t in param]) for param in params]
if not keep_order:
params = sorted(params)
node["label"] = f"{self.operation.name}{'['+','.join(meta)+']' if meta else ''}({','.join(params)})"
node["operation"] = self.operation
node["meta"] = meta
@staticmethod
def _is_contains_index(token: Token or ReferenceToken, index:int):
if isinstance(token, ReferenceToken):
return token.contains_index(index)
return token.i == index
class AggregateDecomposeRule(OperationDecomposeRule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.AGGREGATE
self.metadata_map = {
'COUNT': ['number of'],
'MAX': ['biggest', 'higher', 'highest', 'larger', 'largest', 'last', 'longer', 'longest',
'max', 'maximum', 'more', 'most'],
'MIN': ['fewer', 'least', 'less', 'lower', 'lowest', 'min', 'minimum', 'shortest', 'smaller', 'smallest'],
'SUM': ['sum', 'total'],
'AVG': ['average', 'avg', 'mean'],
}
self._spans_to_metadata = {s:meta for meta,spans in self.metadata_map.items() for s in spans}
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
doc_len = len(doc)
meta = None
rest_start_index= None
if doc_len >= 3 and not isinstance(doc[0], ReferenceToken) and doc[0].dep_ == "ROOT" and doc[1].lemma_ == "of":
# ROOT[number (of), sum, mean] of <...> => AG(@@#@@); <...>
meta = self._spans_to_metadata.get(doc[0].text, None) or self._spans_to_metadata.get(f"{doc[0].text} of", None)
if meta:
rest_start_index = 2
if not meta and doc_len>=2 and (doc[0].text in self._spans_to_metadata) and \
((doc[1].tag_.startswith("NN")) or (doc[1].lemma_ == "of" and doc_len>=3 and doc[2].tag_.startswith("NN"))):
# ADJ[MAX, MIN, total] (of) ROOT = > AG(@@#@@); <...>
meta = self._spans_to_metadata[doc[0].text]
rest_start_index = 1 if doc[1].lemma_ != "of" else 2
if meta and rest_start_index:
return self.update_node(node_id=node_id, graph=graph, params_span=[(rest_start_index, doc_len-1)], meta=[meta])
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("number of @@1@@", ["AGGREGATE[COUNT](@@1@@)"]),
("largest balls", ["AGGREGATE[MAX](ball)"]),
]
class FilterAdjectiveDecomposeRule(OperationDecomposeRule):
""" Assumption: first parameter is ReferenceToken """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.FILTER
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
adj_index, nn_index = -1,-1
if len(doc) == 2 and doc[0].pos_ == "ADJ" and isinstance(doc[1],ReferenceToken):
# ADJ NN
adj_index, nn_index = 0, 1
elif len(doc) == 3 and isinstance(doc[0], ReferenceToken) and doc[1].lemma_ == "be" and doc[2].pos_ == "ADJ":
# NN is ADJ
nn_index, adj_index = 0, 2
elif len(doc) == 4 and isinstance(doc[0], ReferenceToken) and doc[1].pos_ == "DET" and doc[2].lemma_ == "be" and doc[3].pos_ == "ADJ":
# NN that be ADJ
nn_index, adj_index = 0, 3
if adj_index >= 0 and nn_index >= 0:
return self.update_node(node_id=node_id, graph=graph, params_span=[(nn_index,nn_index), (adj_index,adj_index)])
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("blue cubes", ["blue cubes"]),
("blue @@1@@", ["FILTER(@@1@@,blue)"]),
("@@1@@ is blue", ["FILTER(@@1@@,blue)"]),
("@@1@@ that are blue", ["FILTER(@@1@@,blue)"]),
]
class FilterAdjectiveLikeNounDecomposeRule(OperationDecomposeRule):
""" Assumption: first parameter is ReferenceToken """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.FILTER
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
adj_index, nn_index = -1,-1
if len(doc) == 2 and isinstance(doc[0], ReferenceToken) and doc[1].pos_ == "NOUN":
# NN NN
nn_index, adj_index = 0, 1
if len(doc) == 3 and isinstance(doc[0], ReferenceToken) and doc[1].lemma_ == "be" and doc[2].pos_ == "NOUN":
# NN is NN
nn_index, adj_index = 0, 2
elif len(doc) == 4 and isinstance(doc[0], ReferenceToken) and doc[1].pos_ == "DET" and doc[2].lemma_ == "be" and doc[3].pos_ == "NOUN":
# NN that be NN
nn_index, adj_index = 0, 3
if adj_index >= 0 and nn_index >= 0:
return self.update_node(node_id=node_id, graph=graph, params_span=[(nn_index, nn_index), (adj_index, adj_index)])
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("@@1@@ is metal", ["FILTER(@@1@@,metal)"]),
("@@1@@ that are metal", ["FILTER(@@1@@,metal)"]),
("@@1@@ matte", ["FILTER(@@1@@,matte)"]),
]
class FilterADPDecomposeRule(OperationDecomposeRule):
""" Assumption: first parameter is ReferenceToken """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.FILTER
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
if len(doc) == 3 and isinstance(doc[0], ReferenceToken) and doc[1].pos_ in ["ADP", "PART"]:
return self.update_node(node_id=node_id, graph=graph, params_span=[(0, 0), (1, 2)])
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("@@1@@ to london", ["FILTER(@@1@@,to london)"]),
]
class FilterCompoundNounDecomposeRule(OperationDecomposeRule):
""" Assumption: Only nouns """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.FILTER
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
# todo: fix func returned value: bool, [int]
# NN -[compund]-> NN -(ref)
if len(doc) != 2:
return False, None
if doc[0].tag_.startswith("NN") and isinstance(doc[1], ReferenceToken): # todo: and doc[0].dep_ in ["compound","amod"]:
return self.update_node(node_id=node_id, graph=graph, params_span=[(1, 1), (0, 0)])
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("gold @@1@@", ["FILTER(@@1@@,gold)"]),
]
class FilterConditionDecomposeRule(OperationDecomposeRule):
""" Assumption: first parameter is ReferenceToken """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.FILTER
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
# @@#@@@(NN) [that] [is] ... VERB ...
if not doc or not isinstance(doc[0], ReferenceToken):
return False, None
verb_index = None
for i, t in enumerate(doc):
if t.pos_ == "VERB" and t.lemma_ != "be" and doc[0].contains_index(t.head.i):
verb_index = i
break
if not verb_index:
return False, None
end_index = verb_index+1
while end_index < len(doc) and (isinstance(doc[end_index], ReferenceToken) or doc[verb_index].is_ancestor(doc[end_index])):
# todo: check if VERB is ancestor of ReferenceToken
end_index += 1
end_index -= 1
if not end_index == len(doc)-1:
return False, None
condition_start = 1
if condition_start + 1 < len(doc) and doc[condition_start].lemma_ == "that" and doc[condition_start + 1].lemma_ == "be":
condition_start += 2
elif condition_start < len(doc) and (doc[condition_start].lemma_ in ["that", "be"]):
condition_start += 1
return self.update_node(node_id=node_id, graph=graph, params_span=[(0, 0), (condition_start, end_index+1)])
def _get_test_cases__str(self) -> (str, [str]):
return [
("@@1@@ that is partially hidden by a ball", ["FILTER(@@1@@,partially hide by a ball)"]),
("@@1@@ partially hidden by a ball", ["FILTER(@@1@@,partially hide by a ball)"]),
("@@1@@ that contain the keyword Relational Database", ["FILTER(@@1@@,contain the keyword Relational Database)"]),
]
class SelectionDecomposeRule(OperationDecomposeRule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.SELECT
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
node = graph.nodes[node_id]
if len(doc) == 1: # single noun
token = doc[0]
if token.pos_ != "NOUN" or isinstance(token, ReferenceToken):
return False, None
else: # noun phrase
if not all(t.pos_ == "NOUN" for t in doc) or len([t for t in doc if isinstance(t, ReferenceToken) or t.dep_ != "compound"])>1:
return False, None
return self.update_node(node_id=node_id, graph=graph, params_span=[(0, len(doc)-1)])
def _get_test_cases__str(self) -> (str, [str]):
return [
("cubes",["SELECT(cube)"]),
("University of Michigan", ["University of Michigan"]),
("VLDB conference", ["SELECT(vldb conference)"]),
]
def _get_test_cases__graphic(self) -> [((int, dict), [(int, int, dict)])]:
return [
((1, {"label": "cubes"}), ([(1, {"label": "SELECT(cube)"})], [])),
((1, {"label": "@@1@@"}), ([(1, {"label": "@@1@@"})], [])),
]
class WrapperDecomposeRule(OperationDecomposeRule):
_preserved_tokens_map = {
QDMROperation.AGGREGATE:
[
'number of ', 'highest', 'largest', 'lowest', 'smallest', 'maximum', 'minimum', 'max', 'min', 'sum',
'total', 'average ', 'avg ', 'mean ',
'most', 'longest', 'biggest', 'more', 'last', 'longer', 'higher',
'larger', 'smallest', 'least', 'shortest', 'less', 'first', 'shorter', 'lower', 'fewer',
'smaller', 'true ', 'false '
],
QDMROperation.GROUP:
[
'for each '
],
QDMROperation.COMPARATIVE:
[
'same as ', 'higher than ', 'larger than ', 'smaller than ', 'lower than ', 'more than ', 'less than ',
'more', 'less', 'at least', 'at most', 'equal', 'contain ', 'include ', 'has ', 'have ', 'end with ',
'start with ', 'ends with ', 'starts with ', 'begin',
'higher', 'larger', 'smaller', 'lower', 'not ', 'same as', 'the same',
'equal to ', 'where '
],
QDMROperation.SUPERLATIVE:
[
'highest', 'largest', 'most', 'smallest', 'lowest', 'smallest', 'least', 'longest', 'shortest',
'biggest'
],
QDMROperation.UNION:
[
' and ', ' or ', ' , '
],
QDMROperation.INTERSECTION:
[
'both', ' and ', 'of both ', 'in both ', 'by both '
],
QDMROperation.DISCARD:
[
'besides ', 'not in '
],
QDMROperation.SORT:
[
'sorted by ', 'order by ', 'ordered by '
],
QDMROperation.BOOLEAN:
[
'if ', 'is ', 'are ', ' any'
],
QDMROperation.ARITHMETIC:
[
'sum', 'difference', 'multiplication', 'division'
],
QDMROperation.FILTER:
[
# positional
' left ', ' right ', ' between ', ' behind ', ' in front ', ' infront ', ' touch', ' reflect', ' cover',
' obscur', ' blocking', ' blocked', ' hidden', ' obstruct', ' near', ' next', ' next to ', ' close ',
' closer ', ' closest ', ' adjacent '
] + [
# filter
' that is ', ' that are '
]
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = None
self.preserved_tokens = [i for v in self._preserved_tokens_map.values() for i in v]
@staticmethod
def fix_preserved_tokens(preserved_tokens, operation):
if preserved_tokens is not None:
not_allowed = [t for k, v in WrapperDecomposeRule._preserved_tokens_map.items() for t in v if k != operation]
allowed = WrapperDecomposeRule._preserved_tokens_map.get(operation, [])
preserved_tokens = [x for x in (preserved_tokens + not_allowed) if x not in allowed]
return preserved_tokens
class _Dummy(dict):
def __getitem__(self, key):
return f"#{key}"
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
try:
qdmr_step = ' '.join([(f"#{t.get_id()}" if isinstance(t, ReferenceToken) else t.text) for t in doc])
self.operation = qdmr.step_type(qdmr_step, is_high_level=False) # todo: deal with high_level
if self.operation in [QDMROperation.NONE, QDMROperation.SELECT]:
return False, None
prev_refs_code = self._Dummy() # dummy values - keep references the same
exec_qdmr = qdmr.ExecQDMR(self.operation, qdmr_step, prev_refs_code)
if not exec_qdmr.arguments:
return False, None
# align args to doc
arguments_spans = []
meta = []
# todo: # => @@#@@
for arg in exec_qdmr.arguments:
arg = qdmr.qdmr_to_prediction(arg)
arg_tokens = arg.split(' ')
start_index = 0
span = None
while start_index + len(arg_tokens) <= len(doc):
if all([at == dt.text for at,dt in zip(arg_tokens, doc[start_index:start_index+len(arg_tokens)])]):
span = (start_index, start_index+len(arg_tokens)-1)
break
start_index += 1
if span:
arguments_spans.append(span)
else:
meta.append(arg)
if self.operation == QDMROperation.AGGREGATE:
assert len(meta) == 1 and len(arguments_spans) == 1, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.BOOLEAN:
assert len(meta) == 1 and len(arguments_spans) >= 1, f"unexpected args parse {len(meta), len(arguments_spans)}"
# todo: elaborate
elif self.operation == QDMROperation.COMPARISON:
assert len(meta) == 1 and len(arguments_spans) >= 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.COMPARATIVE:
assert len(meta) == 1 and len (arguments_spans) == 3, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.DISCARD:
assert len(meta) == 0 and len(arguments_spans) == 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.FILTER:
assert len(meta) <= 1 and len(arguments_spans) == 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.GROUP:
assert len(meta) == 1 and len(arguments_spans) == 1, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.INTERSECTION:
assert len(meta) == 0 and len(arguments_spans) >= 3, f"unexpected args parse {len(meta), len(arguments_spans)}"
# elif self.operation == QDMROperation.PROJECT:
# # project is buggy right now.
# assert len(meta) == 0 and len(arguments_spans) == 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.SORT:
assert len(meta) == 0 and len(arguments_spans) == 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.SUPERLATIVE:
assert len(meta) == 1 and len(arguments_spans) == 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
elif self.operation == QDMROperation.UNION:
assert len(meta) == 0 and len(arguments_spans) >= 2, f"unexpected args parse {len(meta), len(arguments_spans)}"
else:
return False, None
# fix preserved
self._preserved_tokens = self.fix_preserved_tokens(preserved_tokens=self._preserved_tokens,
operation=self.operation)
return self.update_node(node_id=node_id, graph=graph, params_span=arguments_spans, meta=meta)
except Exception as ex:
_logger.debug(self._get_doc_str(doc=doc), exc_info=True)
return False, None
def _get_test_cases__str(self) -> (str, [str]):
return [
("the number of @@1@@", ["AGGREGATE[COUNT](@@1@@)"]),
("if @@1@@ be the same as @@2@@", ["BOOLEAN[=](@@1@@,@@2@@)"]),
("which is more @@1@@ or @@2@@", ["COMPARISON[MAX](@@1@@,@@2@@)"]),
("@@2@@ besides @@1@@", ["DISCARD(@@2@@,@@1@@)"]),
("@@1@@ that is partially hidden by @@2@@", ["FILTER[POS_COVERS](@@1@@,@@2@@)"]),
("@@1@@ that is left of @@2@@", ["FILTER[POS_LEFT_OF](@@1@@,@@2@@)"]),
# ("colors of @@1@@", ["PROJECT(color,@@1@@)"]),
("both @@1@@ and @@2@@", ["UNION(@@1@@,@@2@@)"]),
]
class WrapperFixesAggregateDecomposeRule(OperationDecomposeRule):
""" Assume to be run before wrapper """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.AGGREGATE
self.preserved_tokens = WrapperDecomposeRule._preserved_tokens_map.get(self.operation, [])
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
if not doc:
return False, None
arguments_spans = []
meta = []
if len(doc)>=4 and isinstance(doc[0], ReferenceToken) and doc[1].lemma_ == "that" and doc[2].lemma_ == "be" \
and " ".join(x.text for x in doc[3:]) in WrapperDecomposeRule._preserved_tokens_map.get(QDMROperation.AGGREGATE, []):
arguments_spans = [(0, 0)]
elif len(doc)>=2 and isinstance(doc[-1], ReferenceToken) and " ".join(x.text for x in doc[:-1]) in WrapperDecomposeRule._preserved_tokens_map.get(QDMROperation.AGGREGATE, []):
arguments_spans = [(len(doc)-1, len(doc)-1)]
else:
return False, None
agg = qdmr.extract_aggregator(" ".join([x.text for x in doc]))
if not agg:
return False, None
meta = [agg]
return self.update_node(node_id=node_id, graph=graph, params_span=arguments_spans, meta=meta)
def _get_test_cases__str(self) -> (str, [str]):
return [
("largest @@1@@", ["AGGREGATE[MAX](@@1@@)"]),
("@@1@@ that is largest", ["AGGREGATE[MAX](@@1@@)"]),
]
class WrapperFixesBooleanDecomposeRule(OperationDecomposeRule):
""" Assume to be run before wrapper """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operation = QDMROperation.BOOLEAN
self.preserved_tokens = WrapperDecomposeRule._preserved_tokens_map.get(self.operation, [])
def _decompose(self, node_id: int, graph: nx.DiGraph, doc: [Token]) -> [int]:
if not doc:
return False, None
arguments_spans = []
meta = []
if doc[0].lemma_ in ["be", "if"] and all([(x.lemma_ in ["there", "be", "a", "an","any"]) for x in doc[1:-1]]) \
and isinstance(doc[-1],ReferenceToken):
arguments_spans = [(len(doc)-1, len(doc)-1)]
meta = ['EXIST']
else:
return False, None
return self.update_node(node_id=node_id, graph=graph, params_span=arguments_spans, meta=meta)
def _get_test_cases__str(self) -> (str, [str]):
return [
("if any @@1@@", ["BOOLEAN[EXIST](@@1@@)"]),
("if there is a @@1@@", ["BOOLEAN[EXIST](@@1@@)"]),
]
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
run_tests(OperationDecomposeRule) | break-evaluator-master | evaluation/normal_form/operations_normalization_rules.py |
#!/usr/bin/env python
import logging
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
from arc_solvers.commands import main # pylint: disable=wrong-import-position
import arc_solvers.service.predictors
import arc_solvers.data.dataset_readers
import arc_solvers.models
if __name__ == "__main__":
main(prog="python -m skidls.run")
| ARC-Solvers-main | arc_solvers/run.py |
ARC-Solvers-main | arc_solvers/__init__.py |
|
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.nn.util import replace_masked_values
from allennlp.nn.util import get_text_field_mask
import allennlp
from typing import Union, Dict
import torch
from allennlp.modules import MatrixAttention, Seq2SeqEncoder
def masked_mean(tensor, dim, mask):
"""
``Performs a mean on just the non-masked portions of the ``tensor`` in the
``dim`` dimension of the tensor.
=====================================================================
From Decomposable Graph Entailment Model code replicated from SciTail repo
https://github.com/allenai/scitail
=====================================================================
"""
if mask is None:
return torch.mean(tensor, dim)
if tensor.dim() != mask.dim():
raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
masked_tensor = replace_masked_values(tensor, mask, 0.0)
# total value
total_tensor = torch.sum(masked_tensor, dim)
# count
count_tensor = torch.sum((mask != 0), dim)
# set zero count to 1 to avoid nans
zero_count_mask = (count_tensor == 0)
count_plus_zeros = (count_tensor + zero_count_mask).float()
# average
mean_tensor = total_tensor / count_plus_zeros
return mean_tensor
def seq2vec_seq_aggregate(seq_tensor, mask, aggregate, bidirectional, dim=1):
"""
Takes the aggregation of sequence tensor
:param seq_tensor: Batched sequence requires [batch, seq, hs]
:param mask: binary mask with shape batch, seq_len, 1
:param aggregate: max, avg, sum
:param dim: The dimension to take the max. for batch, seq, hs it is 1
:return:
"""
seq_tensor_masked = seq_tensor * mask.unsqueeze(-1)
aggr_func = None
if aggregate == "last":
raise NotImplemented("This is currently not supported with AllenNLP 0.2.")
seq = allennlp.nn.util.get_final_encoder_states(seq_tensor, mask, bidirectional)
elif aggregate == "max":
aggr_func = torch.max
seq, _ = aggr_func(seq_tensor_masked, dim=dim)
elif aggregate == "min":
aggr_func = torch.min
seq, _ = aggr_func(seq_tensor_masked, dim=dim)
elif aggregate == "sum":
aggr_func = torch.sum
seq = aggr_func(seq_tensor_masked, dim=dim)
elif aggregate == "avg":
aggr_func = torch.sum
seq = aggr_func(seq_tensor_masked, dim=dim)
seq_lens = torch.sum(mask, dim=dim) # this returns batch_size, 1
seq = seq / seq_lens.view([-1, 1])
return seq
def embed_encode_and_aggregate_text_field(question: Dict[str, torch.LongTensor],
text_field_embedder,
embeddings_dropout,
encoder,
aggregation_type):
"""
Given a batched token ids (2D) runs embeddings lookup with dropout, context encoding and aggregation
:param question:
:param text_field_embedder: The embedder to be used for embedding lookup
:param embeddings_dropout: Dropout
:param encoder: Context encoder
:param aggregation_type: The type of aggregation - max, sum, avg, last
:return:
"""
embedded_question = text_field_embedder(question)
question_mask = get_text_field_mask(question).float()
embedded_question = embeddings_dropout(embedded_question)
encoded_question = encoder(embedded_question, question_mask)
# aggregate sequences to a single item
encoded_question_aggregated = seq2vec_seq_aggregate(encoded_question, question_mask, aggregation_type,
None, 1) # bs X d
return encoded_question_aggregated
def embed_encode_and_aggregate_list_text_field(texts_list: Dict[str, torch.LongTensor],
text_field_embedder,
embeddings_dropout,
encoder: Seq2SeqEncoder,
aggregation_type,
init_hidden_states=None):
"""
Given a batched list of token ids (3D) runs embeddings lookup with dropout, context encoding and aggregation on
:param texts_list: List of texts
:param text_field_embedder: The embedder to be used for embedding lookup
:param embeddings_dropout: Dropout
:param encoder: Context encoder
:param aggregation_type: The type of aggregation - max, sum, avg, last
:param get_last_states: If it should return the last states.
:param init_hidden_states: Hidden states initialization
:return:
"""
embedded_texts = text_field_embedder(texts_list)
embedded_texts = embeddings_dropout(embedded_texts)
batch_size, choices_cnt, choice_tokens_cnt, d = tuple(embedded_texts.shape)
embedded_texts_flattened = embedded_texts.view([batch_size * choices_cnt, choice_tokens_cnt, -1])
# masks
texts_mask_dim_3 = get_text_field_mask(texts_list).float()
texts_mask_flatened = texts_mask_dim_3.view([-1, choice_tokens_cnt])
# context encoding
multiple_texts_init_states = None
if init_hidden_states is not None:
if init_hidden_states.shape[0] == batch_size and init_hidden_states.shape[1] != choices_cnt:
if init_hidden_states.shape[1] != encoder.get_output_dim():
raise ValueError("The shape of init_hidden_states is {0} but is expected to be {1} or {2}".format(str(init_hidden_states.shape),
str([batch_size, encoder.get_output_dim()]),
str([batch_size, choices_cnt, encoder.get_output_dim()])))
# in this case we passed only 2D tensor which is the default output from question encoder
multiple_texts_init_states = init_hidden_states.unsqueeze(1).expand([batch_size, choices_cnt, encoder.get_output_dim()]).contiguous()
# reshape this to match the flattedned tokens
multiple_texts_init_states = multiple_texts_init_states.view([batch_size * choices_cnt, encoder.get_output_dim()])
else:
multiple_texts_init_states = init_hidden_states.view([batch_size * choices_cnt, encoder.get_output_dim()])
encoded_texts_flattened = encoder(embedded_texts_flattened, texts_mask_flatened, hidden_state=multiple_texts_init_states)
aggregated_choice_flattened = seq2vec_seq_aggregate(encoded_texts_flattened, texts_mask_flatened,
aggregation_type,
encoder,
1) # bs*ch X d
aggregated_choice_flattened_reshaped = aggregated_choice_flattened.view([batch_size, choices_cnt, -1])
return aggregated_choice_flattened_reshaped
| ARC-Solvers-main | arc_solvers/nn/util.py |
ARC-Solvers-main | arc_solvers/nn/__init__.py |
|
"""
Script to compute the QA score from the entailment predictions for each supporting sentence and
answer choice.
USAGE:
python scripts/evaluate_predictions.py predictions_file qa_file output_file
Minimal expected format of files.
1. predictions_file:
{"id": "Mercury_SC_415702",
"question": {
"choice": {"text": "dry palms", "label": "A"},
}
"score": 0.31790056824684143
}
2. qa_file:
{
"id":"Mercury_SC_415702",
"question": {
"stem":"George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?",
"choices":[
{"text":"dry palms","label":"A"},
{"text":"wet palms","label":"B"},
{"text":"palms covered with oil","label":"C"},
{"text":"palms covered with lotion","label":"D"}
]
},
"answerKey":"A"
}
"""
import os
import sys
import json
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))))
from operator import itemgetter
from typing import List, Dict
from allennlp.common.util import JsonDict
def evaluate_predictions(predictions_file, qa_file, output_file):
print("Writing qa predictions to {} from entailment predictions at {}".format(
output_file, predictions_file))
qid_choice_scores = get_scores_per_qid_and_choice(predictions_file)
score_predictions(qid_choice_scores, qa_file, output_file)
def get_scores_per_qid_and_choice(predictions_file) -> Dict[str, Dict[str, List[JsonDict]]]:
"""
Reads the file with entailment predictions to produce predictions per answer choice per qid.
:return: dictionary from qid -> (dictionary from choice text -> list of entailment predictions)
"""
with open(predictions_file, 'r') as predictions_handle:
qid_choice_predictions = dict()
for line in predictions_handle:
json_line = json.loads(line)
qid = json_line["id"]
if "score" not in json_line:
raise Exception("Missing score in line:" + line)
choice_score = json_line["score"]
choice_support = json_line["question"]["support"]
choice_text = json_line["question"]["choice"]["text"]
choice_prediction = {
"score": choice_score,
"support": choice_support,
}
if qid in qid_choice_predictions:
choice_scores = qid_choice_predictions[qid]
if choice_text not in choice_scores:
choice_scores[choice_text] = []
choice_scores[choice_text].append(choice_prediction)
else:
qid_choice_predictions[qid] = dict()
qid_choice_predictions[qid][choice_text] = [choice_prediction]
return qid_choice_predictions
def score_predictions(qid_choice_predictions: Dict[str, Dict[str, List[JsonDict]]],
qa_file: str, output_file: str) -> None:
"""
Uses the entailment predictions per answer choice per qid to compute the QA score
:param qid_choice_predictions: qid -> (choice text -> predictions)
:param qa_file: Original QA JSONL file
:param output_file: Output file with selected choices ("selected_answers" key) and score (
"question_score" key) per multiple-choice question.
"""
with open(qa_file, 'r') as qa_handle, open(output_file, 'w') as output_handle:
total_score = 0
num_questions = 0
for line in qa_handle:
json_line = json.loads(line)
id = json_line["id"]
answer_choices = json_line["question"]["choices"]
for choice in answer_choices:
choice_text = choice["text"]
# if we have any entailment prediction for this answer choice, pick the
if id in qid_choice_predictions and choice_text in qid_choice_predictions[id]:
update_choice_with_scores(qid_choice_predictions[id][choice_text], choice)
else:
update_choice_with_scores([], choice)
# Get the maximum answer choice score
max_choice_score = max(answer_choices, key=itemgetter("score"))["score"]
# Collect all answer choices with the same score
selected_answers = [choice["label"] for choice in answer_choices
if choice["score"] == max_choice_score]
answer_key = json_line["answerKey"]
if answer_key in selected_answers:
question_score = 1 / len(selected_answers)
else:
question_score = 0
total_score += question_score
json_line["selected_answers"] = ",".join(selected_answers)
json_line["question_score"] = question_score
num_questions += 1
output_handle.write(json.dumps(json_line) + "\n")
def update_choice_with_scores(choice_predictions: List[JsonDict],
input_choice: JsonDict) -> None:
"""
Uses the entailment predictions to compute the solvers score for the answer choice. This
function will update input answer choice json with two new keys "score" and "support"
corresponding to the solver score and best supporting sentence for this choice respectively.
:param choice_predictions: list of predictions for this choice
:param input_choice: input json for this answer choice that will be updated in-place
"""
if len(choice_predictions):
sorted_predictions = sorted(choice_predictions,
key=itemgetter("score"), reverse=True)
score = score_choice_predictions([pred["score"] for pred in sorted_predictions])
support = sorted_predictions[0]["support"]
input_choice["score"] = score
input_choice["support"] = support
else:
input_choice["score"] = 0
input_choice["support"] = ""
# Returns the score for an answer choice given the scores per supporting sentence
def score_choice_predictions(choice_predictions: List[float]) -> float:
# Round to four decimal points
return round(max(choice_predictions), 4)
if __name__ == "__main__":
if len(sys.argv) < 4:
raise ValueError("Provide at least three arguments: "
"predictions_file, original qa file, output file")
evaluate_predictions(sys.argv[1], sys.argv[2], sys.argv[3])
| ARC-Solvers-main | arc_solvers/processing/evaluate_predictions.py |
from typing import Dict, List
from elasticsearch import Elasticsearch
import re
class EsHit:
def __init__(self, score: float, position: int, text: str, type: str):
"""
Basic information about an ElasticSearch Hit
:param score: score returned by the query
:param position: position in the retrieved results (before any filters are applied)
:param text: retrieved sentence
:param type: type of the hit in the index (by default, only documents of type "sentence"
will be retrieved from the index)
"""
self.score = score
self.position = position
self.text = text
self.type = type
class EsSearch:
def __init__(self,
es_client: str = "localhost",
indices: str = "arc_corpus",
max_question_length: int = 1000,
max_hits_retrieved: int = 500,
max_hit_length: int = 300,
max_hits_per_choice: int = 100):
"""
Class to search over the text corpus using ElasticSearch
:param es_client: Location of the ElasticSearch service
:param indices: Comma-separated list of indices to search over
:param max_question_length: Max number of characters used from the question for the
query (for efficiency)
:param max_hits_retrieved: Max number of hits requested from ElasticSearch
:param max_hit_length: Max number of characters for accepted hits
:param max_hits_per_choice: Max number of hits returned per answer choice
"""
self._es = Elasticsearch([es_client], retries=3)
self._indices = indices
self._max_question_length = max_question_length
self._max_hits_retrieved = max_hits_retrieved
self._max_hit_length = max_hit_length
self._max_hits_per_choice = max_hits_per_choice
# Regex for negation words used to ignore Lucene results with negation
self._negation_regexes = [re.compile(r) for r in ["not\\s", "n't\\s", "except\\s"]]
def get_hits_for_question(self, question: str, choices: List[str]) -> Dict[str, List[EsHit]]:
"""
:param question: Question text
:param choices: List of answer choices
:return: Dictionary of hits per answer choice
"""
choice_hits = dict()
for choice in choices:
choice_hits[choice] = self.filter_hits(self.get_hits_for_choice(question, choice))
return choice_hits
# Constructs an ElasticSearch query from the input question and choice
# Uses the last self._max_question_length characters from the question and requires that the
# text matches the answer choice and the hit type is a "sentence"
def construct_qa_query(self, question, choice):
return {"from": 0, "size": self._max_hits_retrieved,
"query": {
"bool": {
"must": [
{"match": {
"text": question[-self._max_question_length:] + " " + choice
}}
],
"filter": [
{"match": {"text": choice}},
{"type": {"value": "sentence"}}
]
}
}}
# Retrieve unfiltered hits for input question and answer choice
def get_hits_for_choice(self, question, choice):
res = self._es.search(index=self._indices, body=self.construct_qa_query(question, choice))
hits = []
for idx, es_hit in enumerate(res['hits']['hits']):
es_hit = EsHit(score=es_hit["_score"],
position=idx,
text=es_hit["_source"]["text"],
type=es_hit["_type"])
hits.append(es_hit)
return hits
# Remove hits that contain negation, are too long, are duplicates, are noisy.
def filter_hits(self, hits: List[EsHit]) -> List[EsHit]:
filtered_hits = []
selected_hit_keys = set()
for hit in hits:
hit_sentence = hit.text
hit_sentence = hit_sentence.strip().replace("\n", " ")
if len(hit_sentence) > self._max_hit_length:
continue
for negation_regex in self._negation_regexes:
if negation_regex.search(hit_sentence):
# ignore hit
continue
if self.get_key(hit_sentence) in selected_hit_keys:
continue
if not self.is_clean_sentence(hit_sentence):
continue
filtered_hits.append(hit)
selected_hit_keys.add(self.get_key(hit_sentence))
return filtered_hits[:self._max_hits_per_choice]
# Check if the sentence is not noisy
def is_clean_sentence(self, s):
# must only contain expected characters, should be single-sentence and only uses hyphens
# for hyphenated words
return (re.match("^[a-zA-Z0-9][a-zA-Z0-9;:,\(\)%\-\&\.'\"\s]+\.?$", s) and
not re.match(".*\D\. \D.*", s) and
not re.match(".*\s\-\s.*", s))
# Create a de-duplication key for a HIT
def get_key(self, hit):
# Ignore characters that do not effect semantics of a sentence and URLs
return re.sub('[^0-9a-zA-Z\.\-^;&%]+', '', re.sub('http[^ ]+', '', hit)).strip().rstrip(".")
| ARC-Solvers-main | arc_solvers/processing/es_search.py |
"""
Script to convert the retrieved HITS into an entailment dataset
USAGE:
python scripts/convert_to_entailment.py hits_file output_file
JSONL format of files
1. hits_file:
{
"id": "Mercury_SC_415702",
"question": {
"stem": "George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?"
"choice": {"text": "dry palms", "label": "A"},
"support": {
"text": "Use hand sanitizers according to directions, which usually involves rubbing for
at least ten seconds, then allowing hands to air dry."
...
}
},
"answerKey":"A"
}
2. output_file:
{
"id": "Mercury_SC_415702",
"question": {
"stem": "George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?"
"choice": {"text": "dry palms", "label": "A"},
"support": {
"text": "Use hand sanitizers according to directions, which usually involves rubbing for
at least ten seconds, then allowing hands to air dry."
...
}
},
"answerKey":"A",
"premise": "Use hand sanitizers according to directions, which usually involves rubbing for
at least ten seconds, then allowing hands to air dry.",
"hypothesis": "George wants to warm his hands quickly by rubbing them. Dry palms skin
surface will produce the most heat."
}
"""
import json
import re
import sys
from allennlp.common.util import JsonDict
# String used to indicate a blank
BLANK_STR = "___"
def convert_to_entailment(qa_file: str, output_file: str):
with open(output_file, 'w') as output_handle, open(qa_file, 'r') as qa_handle:
print("Writing to {} from {}".format(output_file, qa_file))
for line in qa_handle:
json_line = json.loads(line)
output_dict = convert_qajson_to_entailment(json_line)
output_handle.write(json.dumps(output_dict))
output_handle.write("\n")
# Convert the QA file json to output dictionary containing premise and hypothesis
def convert_qajson_to_entailment(qa_json: JsonDict):
question_text = qa_json["question"]["stem"]
choice = qa_json["question"]["choice"]["text"]
support = qa_json["question"]["support"]["text"]
hypothesis = create_hypothesis(get_fitb_from_question(question_text), choice)
output_dict = create_output_dict(qa_json, support, hypothesis)
return output_dict
# Get a Fill-In-The-Blank (FITB) statement from the question text. E.g. "George wants to warm his
# hands quickly by rubbing them. Which skin surface will produce the most heat?" ->
# "George wants to warm his hands quickly by rubbing them. ___ skin surface will produce the most
# heat?
def get_fitb_from_question(question_text: str) -> str:
fitb = replace_wh_word_with_blank(question_text)
if not re.match(".*_+.*", fitb):
print("Can't create hypothesis from: '{}'. Appending {} !".format(question_text, BLANK_STR))
# Strip space, period and question mark at the end of the question and add a blank
fitb = re.sub("[\.\? ]*$", "", question_text.strip()) + BLANK_STR
return fitb
# Create a hypothesis statement from the the input fill-in-the-blank statement and answer choice.
def create_hypothesis(fitb: str, choice: str) -> str:
if ". " + BLANK_STR in fitb or fitb.startswith(BLANK_STR):
choice = choice[0].upper() + choice[1:]
else:
choice = choice.lower()
# Remove period from the answer choice, if the question doesn't end with the blank
if not fitb.endswith(BLANK_STR):
choice = choice.rstrip(".")
# Some questions already have blanks indicated with 2+ underscores
hypothesis = re.sub("__+", choice, fitb)
return hypothesis
# Identify the wh-word in the question and replace with a blank
def replace_wh_word_with_blank(question_str: str):
wh_word_offset_matches = []
wh_words = ["which", "what", "where", "when", "how", "who", "why"]
for wh in wh_words:
# Some Turk-authored SciQ questions end with wh-word
# E.g. The passing of traits from parents to offspring is done through what?
m = re.search(wh + "\?[^\.]*[\. ]*$", question_str.lower())
if m:
wh_word_offset_matches = [(wh, m.start())]
break
else:
# Otherwise, find the wh-word in the last sentence
m = re.search(wh + "[ ,][^\.]*[\. ]*$", question_str.lower())
if m:
wh_word_offset_matches.append((wh, m.start()))
# If a wh-word is found
if len(wh_word_offset_matches):
# Pick the first wh-word as the word to be replaced with BLANK
# E.g. Which is most likely needed when describing the change in position of an object?
wh_word_offset_matches.sort(key=lambda x: x[1])
wh_word_found = wh_word_offset_matches[0][0]
wh_word_start_offset = wh_word_offset_matches[0][1]
# Replace the last question mark with period.
question_str = re.sub("\?$", ".", question_str.strip())
# Introduce the blank in place of the wh-word
fitb_question = (question_str[:wh_word_start_offset] + BLANK_STR +
question_str[wh_word_start_offset + len(wh_word_found):])
# Drop "of the following" as it doesn't make sense in the absence of a multiple-choice
# question. E.g. "Which of the following force ..." -> "___ force ..."
return fitb_question.replace(BLANK_STR + " of the following", BLANK_STR)
elif re.match(".*[^\.\?] *$", question_str):
# If no wh-word is found and the question ends without a period/question, introduce a
# blank at the end. e.g. The gravitational force exerted by an object depends on its
return question_str + " " + BLANK_STR
else:
# If all else fails, assume "this ?" indicates the blank. Used in Turk-authored questions
# e.g. Virtually every task performed by living organisms requires this?
return re.sub(" this[ \?]", " ___ ", question_str)
# Create the output json dictionary from the input json, premise and hypothesis statement
def create_output_dict(input_json: JsonDict, premise: str, hypothesis: str) -> JsonDict:
input_json["premise"] = premise
input_json["hypothesis"] = hypothesis
return input_json
if __name__ == "__main__":
if len(sys.argv) < 3:
raise ValueError("Provide at least two arguments: "
"json file with hits, output file name")
convert_to_entailment(sys.argv[1], sys.argv[2])
| ARC-Solvers-main | arc_solvers/processing/convert_to_entailment.py |
"""
Script to compute the QA score from the scores per choice
USAGE:
python scripts/calculate_scores.py predictions_file
Minimal expected format of predictions_file:
{
"question": {
"stem":"George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?",
"choices":[
{"text":"dry palms","label":"A", "score": 0.6},
{"text":"wet palms","label":"B", "score": 0.4},
{"text":"palms covered with oil","label":"C", "score": 0.2},
{"text":"palms covered with lotion","label":"D", "score": 0.3}
]
},
"answerKey":"A"
}
"""
import json
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))))
from operator import itemgetter
def calculate_scores(qa_predictions: str) -> None:
"""
Uses the scores per answer choice to compute the QA score
:param qa_predictions: QA predictions with scores per choice
"""
with open(qa_predictions, 'r') as qa_handle:
total_score = 0
num_questions = 0
partially_correct = 0
correct = 0
incorrect = 0
for line in qa_handle:
json_line = json.loads(line)
answer_choices = json_line["question"]["choices"]
max_choice_score = max(answer_choices, key=itemgetter("score"))["score"]
# Collect all answer choices with the same score
selected_answers = [choice["label"] for choice in answer_choices
if choice["score"] == max_choice_score]
answer_key = json_line["answerKey"]
if answer_key in selected_answers:
question_score = 1 / len(selected_answers)
if question_score < 1:
partially_correct += 1
else:
correct += 1
else:
question_score = 0
incorrect += 1
total_score += question_score
num_questions += 1
print("""Metrics:
Total Points={:.2f}
Questions:{}
Exam Score:{:.2f}
Correct: {}
Incorrect: {}
Partial: {}
""".format(total_score, num_questions, (total_score / num_questions)*100,
correct, incorrect, partially_correct))
if __name__ == "__main__":
if len(sys.argv) < 1:
raise ValueError("Provide at least one argument: "
"predictions_file")
calculate_scores(sys.argv[1])
| ARC-Solvers-main | arc_solvers/processing/calculate_scores.py |
"""
Script to convert the retrieved hits into a paragraph comprehension dataset. Questions with no
hits are mapped to a blank paragraph.
USAGE:
python scripts/convert_to_para_comprehension.py hits_file qa_file output_file
JSONL format of files
1. hits_file:
{
"id": "Mercury_SC_415702",
"question": {
"stem": "George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?"
"choice": {"text": "dry palms", "label": "A"},
"support": {
"text": "Use hand sanitizers according to directions, which usually involves rubbing for
at least ten seconds, then allowing hands to air dry."
...
}
},
"answerKey":"A"
}
2. output_file:
{
"id": "Mercury_SC_415702",
"question": {
"stem": "George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?"
"choices":[
{"text":"dry palms","label":"A"},
{"text":"wet palms","label":"B"},
{"text":"palms covered with oil","label":"C"},
{"text":"palms covered with lotion","label":"D"}
]
},
"para": "Use hand sanitizers according to directions, which usually involves rubbing for
at least ten seconds, then allowing hands to air dry. ..."
},
"answerKey":"A"
}
"""
import json
import sys
def convert_to_para_comprehension(hits_file: str, qa_file: str, output_file: str):
qid_choices = dict()
qid_stem = dict()
qid_answer = dict()
qid_sentences = dict()
with open(qa_file, 'r') as qa_handle:
for line in qa_handle:
json_line = json.loads(line)
qid = json_line["id"]
choices = json_line["question"]["choices"]
qid_choices[qid] = choices
qid_sentences[qid] = []
qid_stem[qid] = json_line["question"]["stem"]
qid_answer[qid] = json_line["answerKey"]
with open(hits_file, 'r') as hits_handle:
print("Writing to {} from {}".format(output_file, hits_file))
for line in hits_handle:
json_line = json.loads(line)
qid = json_line["id"]
sentence = json_line["question"]["support"]["text"]
if not sentence.endswith("."):
sentence = sentence + "."
qid_sentences[qid].append(sentence)
with open(output_file, 'w') as output_handle:
for qid, sentences in qid_sentences.items():
if len(sentences):
output_dict = {
"id": qid,
"question": {
"stem": qid_stem[qid],
"choices": qid_choices[qid]
},
"para": " ".join(sentences),
"answerKey": qid_answer[qid]
}
output_handle.write(json.dumps(output_dict))
output_handle.write("\n")
if __name__ == "__main__":
if len(sys.argv) < 4:
raise ValueError("Provide at least three arguments: "
"json file with hits, qa file, output file name")
convert_to_para_comprehension(sys.argv[1], sys.argv[2], sys.argv[3])
| ARC-Solvers-main | arc_solvers/processing/convert_to_para_comprehension.py |
ARC-Solvers-main | arc_solvers/processing/__init__.py |
|
"""
Script to retrieve HITS for each answer choice and question
USAGE:
python scripts/add_retrieved_text.py qa_file output_file
JSONL format of files
1. qa_file:
{
"id":"Mercury_SC_415702",
"question": {
"stem":"George wants to warm his hands quickly by rubbing them. Which skin surface will
produce the most heat?",
"choices":[
{"text":"dry palms","label":"A"},
{"text":"wet palms","label":"B"},
{"text":"palms covered with oil","label":"C"},
{"text":"palms covered with lotion","label":"D"}
]
},
"answerKey":"A"
}
2. output_file:
{
"id": "Mercury_SC_415702",
"question": {
"stem": "..."
"choice": {"text": "dry palms", "label": "A"},
"support": {
"text": "...",
"type": "sentence",
"ir_pos": 0,
"ir_score": 2.2,
}
},
"answerKey":"A"
}
...
{
"id": "Mercury_SC_415702",
"question": {
"stem": "..."
"choice": {"text":"palms covered with lotion","label":"D"}
"support": {
"text": "...",
"type": "sentence",
"ir_pos": 1,
"ir_score": 1.8,
}
"answerKey":"A"
}
"""
import json
import os
import sys
from typing import List, Dict
from allennlp.common.util import JsonDict
from tqdm._tqdm import tqdm
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))))
from arc_solvers.processing.es_search import EsSearch, EsHit
MAX_HITS = 8
es_search = EsSearch(max_hits_per_choice=MAX_HITS, max_hits_retrieved=100)
def add_retrieved_text(qa_file, output_file):
with open(output_file, 'w') as output_handle, open(qa_file, 'r') as qa_handle:
print("Writing to {} from {}".format(output_file, qa_file))
line_tqdm = tqdm(qa_handle, dynamic_ncols=True)
for line in line_tqdm:
json_line = json.loads(line)
num_hits = 0
for output_dict in add_hits_to_qajson(json_line):
output_handle.write(json.dumps(output_dict) + "\n")
num_hits += 1
line_tqdm.set_postfix(hits=num_hits)
def add_hits_to_qajson(qa_json: JsonDict):
question_text = qa_json["question"]["stem"]
choices = [choice["text"] for choice in qa_json["question"]["choices"]]
hits_per_choice = es_search.get_hits_for_question(question_text, choices)
output_dicts_per_question = []
filter_hits_across_choices(hits_per_choice, MAX_HITS)
for choice in qa_json["question"]["choices"]:
choice_text = choice["text"]
hits = hits_per_choice[choice_text]
for hit in hits:
output_dict_per_hit = create_output_dict(qa_json, choice, hit)
output_dicts_per_question.append(output_dict_per_hit)
return output_dicts_per_question
def filter_hits_across_choices(hits_per_choice: Dict[str, List[EsHit]],
top_k: int):
"""
Filter the hits from all answer choices(in-place) to the top_k hits based on the hit score
"""
# collect ir scores
ir_scores = [hit.score for hits in hits_per_choice.values() for hit in hits]
# if more than top_k hits were found
if len(ir_scores) > top_k:
# find the score of the top_kth hit
min_score = sorted(ir_scores, reverse=True)[top_k - 1]
# filter hits below this score
for choice, hits in hits_per_choice.items():
hits[:] = [hit for hit in hits if hit.score >= min_score]
# Create the output json dictionary from the QA file json, answer choice json and retrieved HIT
def create_output_dict(qa_json: JsonDict, choice_json: JsonDict, hit: EsHit):
output_dict = {
"id": qa_json["id"],
"question": {
"stem": qa_json["question"]["stem"],
"choice": choice_json,
"support": {
"text": hit.text,
"type": hit.type,
"ir_pos": hit.position,
"ir_score": hit.score,
}
},
"answerKey": qa_json["answerKey"]
}
return output_dict
if __name__ == "__main__":
if len(sys.argv) < 3:
raise ValueError("Provide at least two arguments: "
"question-answer json file, output file name")
add_retrieved_text(sys.argv[1], sys.argv[2])
| ARC-Solvers-main | arc_solvers/processing/add_retrieved_text.py |
from arc_solvers.models.entailment.tree_attention import TreeAttention
from arc_solvers.models.qa.multi_choice.qa_multi_choice_max_att import QAMultiChoiceMaxAttention | ARC-Solvers-main | arc_solvers/models/__init__.py |
ARC-Solvers-main | arc_solvers/models/entailment/__init__.py |
|
"""
=====================================================================
Decomposable Graph Entailment Model code replicated from SciTail repo
https://github.com/allenai/scitail
=====================================================================
"""
from typing import Dict, List, Any, Tuple
import numpy
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import TextFieldEmbedder
from allennlp.modules.matrix_attention import MatrixAttention
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.similarity_functions.similarity_function import SimilarityFunction
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, last_dim_softmax, weighted_sum, \
replace_masked_values
from allennlp.training.metrics import CategoricalAccuracy
from numpy.core.arrayprint import array2string, set_printoptions
from torch import FloatTensor
from arc_solvers.modules.single_time_distributed import SingleTimeDistributed
from arc_solvers.nn.util import masked_mean
@Model.register("tree_attention")
class TreeAttention(Model):
"""
This ``Model`` implements the decomposable graph entailment model using graph structure from
the hypothesis and aligning premise words onto this structure.
The basic outline of this model is to get attention over the premise for each node in the
graph and use these attentions to compute the probability of each node being true and each
edge being true.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``premise`` and ``hypothesis`` and nodes
premise_encoder : ``Seq2SeqEncoder``
After embedding the premise, we apply an encoder to get the context-based representation
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_probability: FeedForward,
edge_probability: FeedForward,
premise_encoder: Seq2SeqEncoder,
edge_embedding: Embedding,
use_encoding_for_node: bool,
ignore_edges: bool,
attention_similarity: SimilarityFunction,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super(TreeAttention, self).__init__(vocab)
self._text_field_embedder = text_field_embedder
self._premise_encoder = premise_encoder
self._nodes_attention = SingleTimeDistributed(MatrixAttention(attention_similarity), 0)
self._num_labels = vocab.get_vocab_size(namespace="labels")
self._phrase_probability = TimeDistributed(phrase_probability)
self._ignore_edges = ignore_edges
if not self._ignore_edges:
self._num_edges = vocab.get_vocab_size(namespace="edges")
self._edge_probability = TimeDistributed(edge_probability)
self._edge_embedding = edge_embedding
self._use_encoding_for_node = use_encoding_for_node
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self,
premise: Dict[str, torch.LongTensor],
hypothesis: Dict[str, torch.LongTensor],
nodes: Dict[str, torch.LongTensor],
edge_sources: torch.LongTensor,
edge_targets: torch.LongTensor,
edge_labels: torch.LongTensor,
metadata: List[Dict[str, Any]] = None,
label: torch.IntTensor = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
premise: Dict[str, torch.LongTensor]
From a ``TextField`` on the premise
hypothesis: Dict[str, torch.LongTensor]
From a ``TextField`` on the hypothesis
nodes: Dict[str, torch.LongTensor]
From a ``ListField`` of ``TextField`` for the list of node phrases in the hypothesis
edge_sources: torch.LongTensor
From a ``ListField`` of ``IndexField`` for the list of edges in the hypothesis. The
indices correspond to the index of source node in the list of nodes
edge_targets: torch.LongTensor
From a ``ListField`` of ``IndexField`` for the list of edges in the hypothesis. The
indices correspond to the index of target node in the list of nodes
edge_labels: torch.LongTensor
From a ``ListField`` of ``LabelField`` for the list of edge labels in the hypothesis
metadata: List[Dict[str, Any]]
Metadata information
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of the entailment label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the
entailment label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
# batch x premise words x emb. dim
embedded_premise = self._text_field_embedder(premise)
premise_mask = get_text_field_mask(premise).float()
# mask over the nodes. dim: batch x node x node words
nodes_mask = get_text_field_mask(nodes)
if self._use_encoding_for_node or not self._ignore_edges:
encoded_premise = self._premise_encoder(embedded_premise, premise_mask)
# embeddings for each node. dim: batch x nodes x node words x emb. dim
embedded_nodes = self._text_field_embedder(nodes)
set_printoptions(threshold=numpy.inf, linewidth=numpy.inf)
# node model
if self._use_encoding_for_node:
premise_representation = encoded_premise
else:
premise_representation = embedded_premise
mean_node_premise_attention, mean_phrase_distribution = self._get_node_probabilities(
embedded_nodes, premise_representation,
nodes_mask, premise_mask, metadata)
if not self._ignore_edges:
# edge model
mean_edge_distribution = self._get_edge_probabilities(encoded_premise,
mean_node_premise_attention,
edge_sources, edge_targets,
edge_labels, metadata)
label_logits = mean_phrase_distribution + mean_edge_distribution
else:
label_logits = mean_phrase_distribution
label_probs = torch.nn.functional.softmax(label_logits)
output_dict = {"label_logits": label_logits, "label_probs": label_probs}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label.squeeze(-1))
output_dict["loss"] = loss
return output_dict
def _get_node_probabilities(self, embedded_nodes, embedded_premise, nodes_mask, premise_mask,
metadata) -> Tuple[FloatTensor, FloatTensor]:
"""
Compute the average entailment distribution based on the nodes in the hypothesis.
Returns a tuple of (attention of each node over the premise, average entailment
distribution) with dimensions batch x nodes x premise words and batch x num classes
respectively.
"""
# attention for each node. dim: batch x nodes x node words x premise words
node_premise_attention = self._nodes_attention(embedded_nodes, embedded_premise)
normalized_node_premise_attention = last_dim_softmax(node_premise_attention, premise_mask)
expanded_nodes_mask_premise = nodes_mask.unsqueeze(-1).expand_as(
normalized_node_premise_attention).float()
# aggregate representation. dim: batch x nodes x premise words
mean_node_premise_attention = masked_mean(normalized_node_premise_attention, 2,
expanded_nodes_mask_premise)
# convert batch x nodes and batch x premise to batch x nodes x premise mask
nodes_only_mask = (torch.sum(nodes_mask, -1) > 0).float()
node_premise_mask = nodes_only_mask.unsqueeze(-1).expand_as(mean_node_premise_attention) \
* premise_mask.unsqueeze(1).expand_as(mean_node_premise_attention)
masked_mean_node_premise_attention = replace_masked_values(mean_node_premise_attention,
node_premise_mask, 0)
# aggreate node representation over premise. dim: batch x nodes x emb. dim
aggregate_node_premise_representation = weighted_sum(embedded_premise,
masked_mean_node_premise_attention)
expanded_nodes_mask_embedding = nodes_mask.unsqueeze(-1).expand_as(
embedded_nodes).float()
# dim: batch x nodes x emb. dim
aggregate_node_representation = masked_mean(embedded_nodes, 2,
expanded_nodes_mask_embedding)
sub_representation = aggregate_node_premise_representation - aggregate_node_representation
dot_representation = aggregate_node_premise_representation * aggregate_node_representation
# dim: batch x nodes x emb. dim * 4
combined_node_representation = torch.cat([aggregate_node_premise_representation,
aggregate_node_representation,
sub_representation,
dot_representation], 2)
# dim: batch x nodes x num_classes
phrase_prob_distribution = self._phrase_probability(combined_node_representation)
# ignore nodes with no text and expand to num of output classes
# dim: batch x node x node words -> batch x node -> batch x node x num_classes
nodes_class_mask = nodes_only_mask.unsqueeze(-1).expand_as(
phrase_prob_distribution).float()
mean_phrase_distribution = masked_mean(phrase_prob_distribution, 1, nodes_class_mask)
return mean_node_premise_attention, mean_phrase_distribution
def _get_edge_probabilities(self, encoded_premise, mean_node_premise_attention, edge_sources,
edge_targets, edge_labels, metadata) -> FloatTensor:
# dim: batch x nodes x emb. dim
aggregate_node_premise_lstm_representation = weighted_sum(encoded_premise,
mean_node_premise_attention)
# dim: batch x edges x 1
edge_mask = (edge_sources != -1).float()
edge_source_lstm_repr = self._select_embeddings_using_index(
aggregate_node_premise_lstm_representation,
replace_masked_values(edge_sources.float(), edge_mask, 0))
edge_target_lstm_repr = self._select_embeddings_using_index(
aggregate_node_premise_lstm_representation,
replace_masked_values(edge_targets.float(), edge_mask, 0))
# edge label embeddings. dim: batch x edges x edge dim
masked_edge_labels = replace_masked_values(edge_labels.float(), edge_mask, 0).squeeze(
2).long()
edge_label_embeddings = self._edge_embedding(masked_edge_labels)
# dim: batch x edges x (2* emb dim + edge dim)
combined_edge_representation = torch.cat([edge_source_lstm_repr, edge_label_embeddings,
edge_target_lstm_repr], 2)
edge_prob_distribution = self._edge_probability(combined_edge_representation)
edges_only_mask = edge_mask.expand_as(edge_prob_distribution).float()
mean_edge_distribution = masked_mean(edge_prob_distribution, 1, edges_only_mask)
return mean_edge_distribution
@staticmethod
def _get_unpadded_matrix_for_example(input_matrix, idx, mask) -> str:
input_matrix_for_example = input_matrix.data.cpu().numpy()[idx]
mask_for_example = mask.data.cpu().numpy()[idx]
if mask_for_example.shape != input_matrix_for_example.shape:
raise ValueError("Different shapes for mask and input: {} vs {}".format(
mask_for_example.shape, input_matrix_for_example.shape))
if mask_for_example.ndim != 2:
raise ValueError("Cannot handle more than two dimensions. Found {}".format(
mask_for_example.shape))
# Find the max rows and columns to print
zero_rows = numpy.argwhere(mask_for_example[:, 0] == 0)
max_rows = numpy.min(zero_rows) if zero_rows.size != 0 else mask_for_example.shape[0]
zero_cols = numpy.argwhere(mask_for_example[0, :] == 0)
max_cols = numpy.min(zero_cols) if zero_cols.size != 0 else mask_for_example.shape[1]
return array2string(input_matrix_for_example[:max_rows, :max_cols],
precision=4, suppress_small=True)
@staticmethod
def _select_embeddings_using_index(embedding_matrix, index_tensor) -> FloatTensor:
"""
Uses the indices in index_tensor to select vectors from embedding_matrix
:param embedding_matrix: Embeddings with dim: batch x N x emb. dim
:param index_tensor: Indices with dim: batch x M x 1
:return: selected embeddings with dim: batch x M x emb. dim
"""
if index_tensor.size()[-1] != 1:
raise ValueError("Expecting last index to be 1. Found {}".format(index_tensor.size()))
expanded_index_size = [x for x in index_tensor.size()[:-1]] + [embedding_matrix.size()[-1]]
# dim: batch x M x emb. dim
expanded_index_tensor = index_tensor.expand(expanded_index_size).long()
return torch.gather(embedding_matrix, 1, expanded_index_tensor)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
'accuracy': self._accuracy.get_metric(reset),
}
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TreeAttention':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
premise_encoder_params = params.pop("premise_encoder", None)
premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
attention_similarity = SimilarityFunction.from_params(params.pop('attention_similarity'))
phrase_probability = FeedForward.from_params(params.pop('phrase_probability'))
edge_probability = FeedForward.from_params(params.pop('edge_probability'))
edge_embedding = Embedding.from_params(vocab, params.pop('edge_embedding'))
use_encoding_for_node = params.pop('use_encoding_for_node')
ignore_edges = params.pop('ignore_edges', False)
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
phrase_probability=phrase_probability,
edge_probability=edge_probability,
premise_encoder=premise_encoder,
edge_embedding=edge_embedding,
use_encoding_for_node=use_encoding_for_node,
attention_similarity=attention_similarity,
ignore_edges=ignore_edges,
initializer=initializer)
| ARC-Solvers-main | arc_solvers/models/entailment/tree_attention.py |
ARC-Solvers-main | arc_solvers/models/qa/__init__.py |
|
ARC-Solvers-main | arc_solvers/models/qa/multi_choice/__init__.py |
|
from allennlp.modules.matrix_attention import MatrixAttention
from typing import Dict, Optional, AnyStr, List, Any
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, MatrixAttention, SimilarityFunction
from allennlp.modules import TextFieldEmbedder
from allennlp.nn import InitializerApplicator
from allennlp.training.metrics import CategoricalAccuracy
from arc_solvers.nn.util import embed_encode_and_aggregate_list_text_field, embed_encode_and_aggregate_text_field
@Model.register("qa_multi_choice_max_att")
class QAMultiChoiceMaxAttention(Model):
"""
This ``QAMultiChoiceMaxAttention`` implements an attention interaction between question and choices context-encoded representations:
1. Obtain a BiLSTM context representation of the token sequences of the
`question` and each `choice`.
2. Get an aggregated (single vector) representations for `question` and `choice` using element-wise `max` operation.
3. Compute the attention score between `question` and `choice` as `linear_layer([u, v, u - v, u * v])`, where `u` and `v` are the representations from Step 2.
4. Select as answer the `choice` with the highest attention with the `question`.
Pseudo-code looks like:
question_encoded = context_enc(question_words) # context_enc can be any AllenNLP supported or None. Bi-directional LSTM is used
choice_encoded = context_enc(choice_words)
question_aggregate = aggregate_method(question_encoded) # aggregate_method can be max, min, avg. ``max`` is used.
choice_aggregate = aggregate_method(choice_encoded)
inter = concat([question_aggregate, choice_aggregate, choice_aggregate - question_aggregate, question_aggregate
* choice_aggregate)
choice_to_question_att = linear_layer(inter) # the output is a scalar value for each question-to-choice interaction
# The choice_to_question_att of the four choices are normalized using ``softmax``
# and the choice with the highest attention is selected as the answer.
The model is inspired by the BiLSTM Max-Out model from Conneau, A. et al. (2017) ‘Supervised Learning of
Universal Sentence Representations from Natural Language Inference Data’.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``choice`` ``TextFields`` we get as input to the
model.
att_question_to_choice : ``SimilarityFunction``
The similarity (attention) function used to model the relation between the question and choices represenations.
question_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the question, we can optionally apply an encoder. If this is ``None``, we
will do nothing.
choice_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the choice, we can optionally apply an encoder. If this is ``None``,
we will use the ``question_encoder`` for the encoding (doing nothing if ``question_encoder``
is also ``None``).
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
aggregate_question : ``str``, optional (default=``max``, allowed options [max, avg, sum])
The aggregation method for the encoded question.
aggregate_choice : ``str``, optional (default=``max``, allowed options [max, avg, sum])
The aggregation method for the encoded choice.
embeddings_dropout_value : ``float``, optional (default=0.0)
The dropout rate used after the embeddings layer. If set, it is used only during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
att_question_to_choice: SimilarityFunction,
question_encoder: Optional[Seq2SeqEncoder] = None,
choice_encoder: Optional[Seq2SeqEncoder] = None,
initializer: InitializerApplicator = InitializerApplicator(),
aggregate_question: Optional[str] = "max",
aggregate_choice: Optional[str] = "max",
embeddings_dropout_value: Optional[float] = 0.0
) -> None:
super(QAMultiChoiceMaxAttention, self).__init__(vocab)
self._use_cuda = (torch.cuda.is_available() and torch.cuda.current_device() >= 0)
self._text_field_embedder = text_field_embedder
if embeddings_dropout_value > 0.0:
self._embeddings_dropout = torch.nn.Dropout(p=embeddings_dropout_value)
else:
self._embeddings_dropout = lambda x: x
self._question_encoder = question_encoder
# choices encoding
self._choice_encoder = choice_encoder
self._question_aggregate = aggregate_question
self._choice_aggregate = aggregate_choice
self._num_labels = vocab.get_vocab_size(namespace="labels")
question_output_dim = self._text_field_embedder.get_output_dim()
if self._question_encoder is not None:
question_output_dim = self._question_encoder.get_output_dim()
choice_output_dim = self._text_field_embedder.get_output_dim()
if self._choice_encoder is not None:
choice_output_dim = self._choice_encoder.get_output_dim()
if question_output_dim != choice_output_dim:
raise ConfigurationError("Output dimension of the question_encoder (dim: {}) "
"and choice_encoder (dim: {})"
"must match! "
.format(question_output_dim,
choice_output_dim))
# Check input tensor dimensions for the question to choices attention (similarity function)
if hasattr(att_question_to_choice, "tensor_1_dim"):
tensor_1_dim = att_question_to_choice.tensor_1_dim
if tensor_1_dim != question_output_dim:
raise ConfigurationError("Output dimension of the question_encoder (dim: {}) "
"and tensor_1_dim (dim: {}) of att_question_to_choice"
"must match! "
.format(question_output_dim,
tensor_1_dim))
if hasattr(att_question_to_choice, "tensor_2_dim"):
tensor_2_dim = att_question_to_choice.tensor_2_dim
if tensor_2_dim != question_output_dim:
raise ConfigurationError("Output dimension of the choice_encoder (dim: {}) "
"and tensor_2_dim (dim: {}) of att_question_to_choice"
"must match! "
.format(choice_output_dim,
tensor_2_dim))
self._matrix_attention_question_to_choice = MatrixAttention(att_question_to_choice)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
choices_list: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``
choices_list : Dict[str, torch.LongTensor]
From a ``List[TextField]``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of each choice being the correct answer.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing
probabilities of each choice being the correct answer.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
encoded_choices_aggregated = embed_encode_and_aggregate_list_text_field(choices_list,
self._text_field_embedder,
self._embeddings_dropout,
self._choice_encoder,
self._choice_aggregate) # bs, choices, hs
encoded_question_aggregated = embed_encode_and_aggregate_text_field(question, self._text_field_embedder,
self._embeddings_dropout,
self._question_encoder,
self._question_aggregate) # bs, hs
q_to_choices_att = self._matrix_attention_question_to_choice(encoded_question_aggregated.unsqueeze(1),
encoded_choices_aggregated).squeeze()
label_logits = q_to_choices_att
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {"label_logits": label_logits, "label_probs": label_probs}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label.squeeze(-1))
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
'accuracy': self._accuracy.get_metric(reset),
}
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'QAMultiChoiceMaxAttention':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
embeddings_dropout_value = params.pop("embeddings_dropout", 0.0)
# question encoder
question_encoder_params = params.pop("question_encoder", None)
question_enc_aggregate = params.pop("question_encoder_aggregate", "max")
share_encoders = params.pop("share_encoders", False)
if question_encoder_params is not None:
question_encoder = Seq2SeqEncoder.from_params(question_encoder_params)
else:
question_encoder = None
if share_encoders:
choice_encoder = question_encoder
choice_enc_aggregate = question_enc_aggregate
else:
# choice encoder
choice_encoder_params = params.pop("choice_encoder", None)
choice_enc_aggregate = params.pop("choice_encoder_aggregate", "max")
if choice_encoder_params is not None:
choice_encoder = Seq2SeqEncoder.from_params(choice_encoder_params)
else:
choice_encoder = None
# question to choice attention
att_question_to_choice_params = params.get("att_question_to_choice")
att_question_to_choice = SimilarityFunction.from_params(att_question_to_choice_params)
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
question_encoder=question_encoder,
choice_encoder=choice_encoder,
initializer=initializer,
aggregate_choice=choice_enc_aggregate,
aggregate_question=question_enc_aggregate,
embeddings_dropout_value=embeddings_dropout_value,
att_question_to_choice=att_question_to_choice)
| ARC-Solvers-main | arc_solvers/models/qa/multi_choice/qa_multi_choice_max_att.py |
ARC-Solvers-main | arc_solvers/training_config/qa/multi_choice/__init__.py |
|
from allennlp.commands import main as main_allennlp
def main(prog: str = None) -> None:
predictor_overrides = {
"decomposable_attention": "decompatt",
"tree_attention": "dgem",
"bidaf": "bidaf_qa"
}
main_allennlp(prog,
predictor_overrides=predictor_overrides)
| ARC-Solvers-main | arc_solvers/commands/__init__.py |
ARC-Solvers-main | arc_solvers/service/__init__.py |
|
import logging
from allennlp.common.util import JsonDict, sanitize
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.models.model import Model
from allennlp.service.predictors.predictor import Predictor
from overrides import overrides
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Predictor.register("dgem")
class DgemPredictor(Predictor):
"""
Converts the QA JSON into an instance that is expected by DGEM model.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
labels = self._model.vocab.get_index_to_token_vocabulary("labels").values()
#
if "entails" in labels:
self._entailment_idx = self._model.vocab.get_token_index("entails", "labels")
elif "entailment" in labels:
self._entailment_idx = self._model.vocab.get_token_index("entailment", "labels")
else:
raise Exception("No label for entailment found in the label space: {}".format(
",".join(labels)))
@overrides
def _json_to_instance(self, # type: ignore
json_dict: JsonDict) -> Instance:
# pylint: disable=arguments-differ
premise_text = json_dict["premise"]
hypothesis_text = json_dict["hypothesis"]
hypothesis_structure = json_dict["hypothesisStructure"]
return self._dataset_reader.text_to_instance(premise_text, hypothesis_text,
hypothesis_structure)
@overrides
def predict_json(self, inputs: JsonDict, cuda_device: int = -1):
instance = self._json_to_instance(inputs)
outputs = self._model.forward_on_instance(instance, cuda_device)
json_output = inputs
json_output["score"] = outputs["label_probs"][self._entailment_idx]
return sanitize(json_output) | ARC-Solvers-main | arc_solvers/service/predictors/dgem_predictor.py |
import logging
from operator import itemgetter
from typing import List
from allennlp.common.util import JsonDict, sanitize
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.models.model import Model
from allennlp.service.predictors.predictor import Predictor
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from overrides import overrides
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Predictor.register("bidaf_qa")
class BidafQaPredictor(Predictor):
"""
Converts the QA JSON into an instance that is expected by BiDAF model.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
self._stemmer = PorterStemmer()
self._stop_words = set(stopwords.words('english'))
@overrides
def _json_to_instance(self, # type: ignore
json_dict: JsonDict) -> Instance:
# pylint: disable=arguments-differ
"""
Expects JSON that looks like ``{"question": { "stem": "..."}, "para": "..."}``.
"""
question_text = json_dict["question"]["stem"]
passage_text = json_dict["para"]
return self._dataset_reader.text_to_instance(question_text, passage_text)
@overrides
def predict_json(self, inputs: JsonDict, cuda_device: int = -1):
instance = self._json_to_instance(inputs)
outputs = self._model.forward_on_instance(instance, cuda_device)
json_output = inputs
span_str = outputs["best_span_str"]
# If the file has an answer key, calculate the score
if "answerKey" in json_output:
answer_choices = json_output["question"]["choices"]
# Score each answer choice based on its overlap with the predicted span.
for choice in answer_choices:
choice_text = choice["text"]
choice_score = self._overlap_score(choice_text, span_str)
choice["score"] = choice_score
# Get the maximum answer choice score
max_choice_score = max(answer_choices, key=itemgetter("score"))["score"]
# Collect all answer choices with the same score
selected_answers = [choice["label"] for choice in answer_choices
if choice["score"] == max_choice_score]
answer_key = json_output["answerKey"]
if answer_key in selected_answers:
question_score = 1 / len(selected_answers)
else:
question_score = 0
json_output["selected_answers"] = ",".join(selected_answers)
json_output["question_score"] = question_score
json_output["best_span_str"] = span_str
return sanitize(json_output)
def _overlap_score(self, answer: str, predicted_span: str) -> float:
"""
Scores the predicted span against the correct answer by calculating the proportion of the
stopword-filtered stemmed words in the correct answer covered by the predicted span
:param answer: correct answer
:param predicted_span: predicted span
:return:
"""
answer_tokens = self._get_tokens(answer)
# degenerate case: if the answer only has stopwords, we can not score it.
if not len(answer_tokens):
return 0.0
span_tokens = self._get_tokens(predicted_span)
overlap = [tok for tok in answer_tokens if tok in span_tokens]
score = len(overlap) / len(answer_tokens)
return score
def _get_tokens(self, phrase: str) -> List[str]:
# Get the stopword-filtered lowercase stemmed tokens from input phrase
return [self._stemmer.stem(word) for word in word_tokenize(phrase)
if word.lower() not in self._stop_words]
| ARC-Solvers-main | arc_solvers/service/predictors/bidaf_qa_predictor.py |
from arc_solvers.service.predictors.decompatt_qa_predictor import DecompAttPredictor
from arc_solvers.service.predictors.dgem_predictor import DgemPredictor
from arc_solvers.service.predictors.bidaf_qa_predictor import BidafQaPredictor
| ARC-Solvers-main | arc_solvers/service/predictors/__init__.py |
import logging
from allennlp.common.util import JsonDict, sanitize
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.models.model import Model
from allennlp.service.predictors.predictor import Predictor
from overrides import overrides
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Predictor.register("decompatt")
class DecompAttPredictor(Predictor):
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
labels = self._model.vocab.get_index_to_token_vocabulary("labels").values()
#
if "entails" in labels:
self._entailment_idx = self._model.vocab.get_token_index("entails", "labels")
elif "entailment" in labels:
self._entailment_idx = self._model.vocab.get_token_index("entailment", "labels")
else:
raise Exception("No label for entailment found in the label space: {}".format(
",".join(labels)))
@overrides
def _json_to_instance(self, # type: ignore
json_dict: JsonDict) -> Instance:
"""
Converts the QA JSON into an instance that is expected by the Decomposable Attention Model.
"""
premise_text = json_dict["premise"]
hypothesis_text = json_dict["hypothesis"]
return self._dataset_reader.text_to_instance(premise_text, hypothesis_text)
@overrides
def predict_json(self, inputs: JsonDict, cuda_device: int = -1):
instance = self._json_to_instance(inputs)
outputs = self._model.forward_on_instance(instance, cuda_device)
json_output = inputs
json_output["score"] = outputs["label_probs"][self._entailment_idx]
return sanitize(json_output)
| ARC-Solvers-main | arc_solvers/service/predictors/decompatt_qa_predictor.py |
from arc_solvers.modules.single_time_distributed import SingleTimeDistributed
| ARC-Solvers-main | arc_solvers/modules/__init__.py |
"""
=====================================================================
Decomposable Graph Entailment Model code replicated from SciTail repo
https://github.com/allenai/scitail
=====================================================================
"""
import torch
class SingleTimeDistributed(torch.nn.Module):
"""
Given an input shaped like ``(batch_size, time_steps, [rest])`` and a ``Module`` that takes
inputs like ``(batch_size, [rest])``, ``SingleTimeDistributed`` reshapes the tensor in the
distribute_input position of the input arguments to be ``(batch_size * time_steps, [rest])``,
applies the contained ``Module``, then reshapes it back.
The key difference from TimeDistributed is that it will only perform the distribution of
the second dimension on the tensor in the ``distribute_input`` position in the forward call
to produce the tensor of size: ``(batch_size * time_steps, [rest])``. For all the other
tensors shaped like ``(batch_size, [rest])``, it will repeat the tensor to match the
dimension of the tensor in the ``distribute_input`` position, i.e. ``(batch_size * time_steps,
[rest])``.
"""
def __init__(self, distributed_module, distribute_input):
super(SingleTimeDistributed, self).__init__()
self._module = distributed_module
self._distribute_input = distribute_input
def forward(self, *inputs):
reshaped_inputs = []
input_index = 0
# second dimension of the selected input tensor used for distributing the tensors
squashed_steps = inputs[self._distribute_input].size()[1]
for input_tensor in inputs:
if input_index == self._distribute_input:
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError("No dimension to distribute: " + str(input_size))
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, input_size).
squashed_shape = [-1] + [x for x in input_size[2:]]
reshaped_inputs.append(input_tensor.contiguous().view(*squashed_shape))
else:
# For others, repeat the tensor for the squashed time steps to ensure the same
# dimensionality for the new "batch" dimension: batch_size * time_steps
input_size = input_tensor.size()
# first shape into batch x squashed_steps x ...
new_shape = [input_size[0], squashed_steps] + [x for x in input_size[1:]]
expanded_tensor = input_tensor.unsqueeze(1).expand(*new_shape)
# re-shape to the (batch_size * time_steps, input_size)
squashed_shape = [-1] + [x for x in new_shape[2:]]
reshaped_inputs.append(expanded_tensor.contiguous().view(*squashed_shape))
input_index += 1
reshaped_outputs = self._module(*reshaped_inputs)
# Now get the output back into the right shape.
# (batch_size, time_steps, [hidden_size])
new_shape = [-1, squashed_steps] + [x for x in reshaped_outputs.size()[1:]]
outputs = reshaped_outputs.contiguous().view(*new_shape)
return outputs
| ARC-Solvers-main | arc_solvers/modules/single_time_distributed.py |
from arc_solvers.data.dataset_readers.arc_multichoice_json_reader import ArcMultiChoiceJsonReader | ARC-Solvers-main | arc_solvers/data/__init__.py |
"""
=====================================================================
Decomposable Graph Entailment Model code replicated from SciTail repo
https://github.com/allenai/scitail
=====================================================================
"""
import logging
from builtins import ValueError
from typing import Dict, List, Set, Tuple
import tqdm
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset import Dataset
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField
from allennlp.data.fields.index_field import IndexField
from allennlp.data.fields.list_field import ListField
from allennlp.data.fields.metadata_field import MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from overrides import overrides
logger = logging.getLogger(__name__)
@DatasetReader.register("entailment_tuple")
class EntailmentTupleReader(DatasetReader):
"""
Reads a file with entailment data with additional tuple structure for the hypothesis. The
input file is in the format "premise\thypothesis\tlabel\ttuple structure" where the tuple
structure is represented using "$$$" to split tuples and "<>" to split fields.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
Used to tokenize the premise, hypothesis and nodes in the hypothesis structure
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens":
SingleIdTokenIndexer()}``)
Used to index the tokens extracted by the tokenizer
"""
def __init__(self,
max_tokens: int, max_tuples: int,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
self._max_tokens = max_tokens
self._max_tuples = max_tuples
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
instances = []
with open(file_path, 'r') as entailment_file:
logger.info("Reading entailment instances from TSV dataset at: %s", file_path)
for line in tqdm.tqdm(entailment_file):
fields = line.split("\t")
if len(fields) != 4:
raise ValueError("Expected four fields: "
"premise hypothesis label hypothesis_structure. "
"Found {} fields in {}".format(len(fields), line))
premise, hypothesis, label, hypothesis_structure = fields
instances.append(self.text_to_instance(premise, hypothesis, hypothesis_structure,
label))
if not instances:
raise ConfigurationError("No instances were read from the given filepath {}. "
"Is the path correct?".format(file_path))
return Dataset(instances)
@overrides
def text_to_instance(self,
premise: str,
hypothesis: str,
hypothesis_structure: str,
label: str = None) -> Instance:
fields: Dict[str, Field] = {}
premise_tokens = self._tokenizer.tokenize(premise)[-self._max_tokens:]
hypothesis_tokens = self._tokenizer.tokenize(hypothesis)[-self._max_tokens:]
fields['premise'] = TextField(premise_tokens, self._token_indexers)
fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
metadata = {
'premise': premise,
'hypothesis': hypothesis,
'premise_tokens': [token.text for token in premise_tokens],
'hypothesis_tokens': [token.text for token in hypothesis_tokens]
}
fields['metadata'] = MetadataField(metadata)
self._add_structure_to_fields(hypothesis_structure, fields)
if label:
fields['label'] = LabelField(label)
return Instance(fields)
def _add_structure_to_fields(self, structure, fields) -> None:
"""
Add structure (nodes and edges) to the instance fields. Specifically, convert
"plants<>produce<>oxygen" into ("produce", subj, "plants"), ("produce", obj, "oxygen"),
("plants", subj-obj, "oxygen"). Each quoted string forms a node represented using a
TextField. Each source and target node in an edge is represented using IndexField into
the list of nodes and the edge label is represented using a LabelField with "edges"
namespace.
"""
# take the last tuples
tuples = structure.split("$$$")[-self._max_tuples:]
node_list, edge_list = self._extract_nodes_and_edges_from_tuples(tuples)
if not len(node_list):
print("No nodes in {} for premise:{} and hypothesis: {}".format(
structure, fields['metadata'].metadata["premise"],
fields['metadata'].metadata["hypothesis"]))
nodes_field = ListField(node_list)
edge_source_list = []
edge_target_list = []
edge_label_list = []
for edge in edge_list:
source_field = IndexField(edge[0], nodes_field)
target_field = IndexField(edge[2], nodes_field)
label_field = LabelField(edge[1], "edges")
edge_source_list.append(source_field)
edge_target_list.append(target_field)
edge_label_list.append(label_field)
fields['nodes'] = nodes_field
# Currently AllenNLP doesn't allow for ListFields containing ListFields,
# so creating separate ListFields for source, target and labels for the edges
fields['edge_sources'] = ListField(edge_source_list)
fields['edge_targets'] = ListField(edge_target_list)
fields['edge_labels'] = ListField(edge_label_list)
def _extract_nodes_and_edges_from_tuples(self, tuples: List[str]) -> Tuple[List[TextField],
List[Tuple]]:
"""
Extract the nodes and edges from the list of tuples. Returns a list of nodes and list of
edges where the nodes are represented as list of ``TextField`` and edges as list of
(source index, edge label, target index). The source and target indices refer to the
index of the node in the list of nodes.
"""
# list of string representation of the nodes used to find the index of the source/target
# node for each edge
node_strings = []
node_text_fields = []
edge_tuples = []
for openie_tuple in tuples:
tuple_fields = openie_tuple.split("<>")
nodes, edges = self._extract_nodes_and_edges_from_fields(tuple_fields)
# first, collect the nodes in the graph
for node in nodes:
if node not in node_strings:
node_tokens = self._tokenizer.tokenize(node)
if not node_tokens:
raise ValueError("Empty phrase from {}".format(node))
node_strings.append(node)
node_text_fields.append(TextField(node_tokens, self._token_indexers))
# convert the edge representation using strings into the edge representation with
# indices into the list of nodes compute above
for edge in edges:
source_idx = node_strings.index(edge[0])
if source_idx is None:
raise ValueError("'{}' not found in node list: [{}]".format(
edge[0], ",".join(node_strings)))
target_idx = node_strings.index(edge[2])
if target_idx is None:
raise ValueError("'{}' not found in node list: [{}]".format(
edge[2], ",".join(node_strings)))
edge_label = edge[1]
edge_tuple = (source_idx, edge_label, target_idx)
edge_tuples.append(edge_tuple)
return node_text_fields, edge_tuples
def _extract_nodes_and_edges_from_fields(self, fields) -> (Set[str], List[List[str]]):
"""
Extract the nodes and edges from the fields of a tuple. Nodes are represented using their
string and edges as [source node, edge label, target node].
"""
nodes = set()
edges = []
if len(fields) < 2:
print("Less than two fields in ({})".format(",".join(fields)))
return nodes, edges
subj = self._get_tokenized_rep(fields[0])
pred = self._get_tokenized_rep(fields[1])
if subj:
nodes.add(subj)
if pred:
nodes.add(pred)
# create a subj edge between the predicate and subject
if subj and pred:
edges.append([pred, "subj", subj])
if len(fields) > 2:
obj1 = self._get_tokenized_rep(fields[2])
if obj1:
nodes.add(obj1)
# create a subj-obj edge between the subject and object
if subj:
edges.append([subj, "subj-obj", obj1])
for obj in fields[2:]:
last_ent = pred
# identify the object type and split longer objects, if needed
for phrase, ptype in self._split_object_phrase(obj):
clean_phr = self._get_tokenized_rep(phrase)
if not clean_phr:
logger.warning("Unexpected empty phrase from {}".format(obj))
nodes.add(clean_phr)
edges.append([last_ent, ptype, clean_phr])
last_ent = clean_phr
return nodes, edges
def _get_tokenized_rep(self, field):
"""
Get a clean representation of the field based on the tokens. This ensures that
strings with the same tokens have the same string representation.
"""
return " ".join([x.text for x in self._tokenizer.tokenize(field.strip())])
def _split_object_phrase(self, field: str) -> List[Tuple[str, str]]:
"""
Break longer object phrases into shorter phrases based on the prepositions. E.g. break
"the process of changing liquid water into water vapor" into {(the process, obj),
(changing liquid water, of), (water vapor, into)}
"""
clean_obj, base_type = self._get_base_object_and_type(field)
tokens = [x.text for x in self._tokenizer.tokenize(clean_obj)]
split_objects = []
object_types = []
current_obj = ""
current_type = base_type
for token in tokens:
if token in self.PREPOSITION_LIST and current_obj != "":
split_objects.append(current_obj)
object_types.append(current_type)
current_obj = ""
current_type = token
else:
current_obj = current_obj + " " + token if current_obj != "" else token
if current_obj != "":
split_objects.append(current_obj)
object_types.append(current_type)
return list(zip(split_objects, object_types))
def _get_base_object_and_type(self, field: str) -> Tuple[str, str]:
"""Identify the object type for the object in the OpenIE tuple"""
if field.startswith("L:"):
return field[2:], "L"
if field.startswith("T:"):
return field[2:], "T"
for prep in self.PREPOSITION_LIST:
if field.startswith(prep + " "):
return field[len(prep) + 1:], prep
# if no match found, use the generic obj type
return field, "obj"
PREPOSITION_LIST = ["with", "at", "from", "into", "during", "including", "until", "against",
"among", "throughout", "despite", "towards", "upon", "concerning", "of",
"to", "in", "for", "on", "by", "about", "like", "through", "over",
"before", "between", "after", "since", "without", "under", "within",
"along", "following", "across", "behind", "beyond", "plus", "except",
"but", "up", "out", "around", "down", "off", "above", "near"]
@classmethod
def from_params(cls, params: Params) -> 'EntailmentTupleReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
max_tuples = params.pop('max_tuples', 30)
max_tokens = params.pop('max_tokens', 200)
params.assert_empty(cls.__name__)
return EntailmentTupleReader(max_tokens=max_tokens,
max_tuples=max_tuples,
tokenizer=tokenizer,
token_indexers=token_indexers)
| ARC-Solvers-main | arc_solvers/data/dataset_readers/entailment_tuple_reader.py |
from typing import Dict, List, Any
import json
import logging
from allennlp.data import Dataset
from overrides import overrides
from allennlp.common import Params
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, ListField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("arc-multi-choice-json")
class ArcMultiChoiceJsonReader(DatasetReader):
"""
Reading instances in ARC jsonl format. This data is
formatted as jsonl, one json-formatted instance per line. An example of the json in the data is:
{"id":"MCAS_2000_4_6",
"question":{"stem":"Which technology was developed most recently?",
"choices":[
{"text":"cellular telephone","label":"A"},
{"text":"television","label":"B"},
{"text":"refrigerator","label":"C"},
{"text":"airplane","label":"D"}
]},
"answerKey":"A"
}
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We use this ``Tokenizer`` for both the premise and the hypothesis. See :class:`Tokenizer`.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
"""
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__()
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
instances = []
with open(file_path, 'r') as data_file:
logger.info("Reading instances in ARC jsonl format from dataset at: %s", file_path)
for line in data_file:
item_json = json.loads(line.strip())
item_id = item_json["id"]
question_text = item_json["question"]["stem"]
choice_label_to_id = {}
choice_text_list = []
for choice_id, choice_item in enumerate(item_json["question"]["choices"]):
choice_label = choice_item["label"]
choice_label_to_id[choice_label] = choice_id
choice_text = choice_item["text"]
choice_text_list.append(choice_text)
answer_id = choice_label_to_id[item_json["answerKey"]]
instances.append(self.text_to_instance(item_id, question_text, choice_text_list, answer_id))
return Dataset(instances)
@overrides
def text_to_instance(self, # type: ignore
item_id: Any,
question_text: str,
choice_text_list: List[str],
answer_id: int) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
question_tokens = self._tokenizer.tokenize(question_text)
choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
fields['question'] = TextField(question_tokens, self._token_indexers)
fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
fields['label'] = LabelField(answer_id, skip_indexing=True)
metadata = {
"id": item_id,
"question_text": question_text,
"choice_text_list": choice_text_list,
"question_tokens": [x.text for x in question_tokens],
"choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
}
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@classmethod
def from_params(cls, params: Params) -> 'ArcMultiChoiceJsonReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
return ArcMultiChoiceJsonReader(tokenizer=tokenizer,
token_indexers=token_indexers) | ARC-Solvers-main | arc_solvers/data/dataset_readers/arc_multichoice_json_reader.py |
from arc_solvers.data.dataset_readers.entailment_tuple_reader import EntailmentTupleReader
| ARC-Solvers-main | arc_solvers/data/dataset_readers/__init__.py |
#!/usr/bin/python3
# This script uses the Python Elasticsearch API to index a user-specified text corpus in an
# ElasticSearch cluster. The corpus is expected to be a text file with a sentence per line.
# Each sentence is indexed as a separate document, and per the mappings defined here, the
# Snowball Stemmer is used to stem all tokens.
# If an index with the requested name does not exists, creates it, if not simply adds
# documents to existing index.
import argparse, elasticsearch, json
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser(
description='Add lines from a file to a simple text Elasticsearch index.')
parser.add_argument('file', help='Path of file to index, e.g. /path/to/my_corpus.txt')
parser.add_argument('index', help='Name of index to create')
parser.add_argument('host', help='Elasticsearch host.')
parser.add_argument('-p', '--port', default=9200, help='port, default is 9200')
args = parser.parse_args()
# Get Index Name
index_name = args.index
# Document Type constant
TYPE = "sentence"
# Get an ElasticSearch client
es = Elasticsearch(hosts=[{"host": args.host, "port": args.port}], retries=3, timeout=60)
# Mapping used to index all corpora used in Aristo solvers
mapping = '''
{
"mappings": {
"sentence": {
"dynamic": "false",
"properties": {
"docId": {
"type": "keyword"
},
"text": {
"analyzer": "snowball",
"type": "text",
"fields": {
"raw": {
"type": "keyword"
}
}
},
"tags": {
"type": "keyword"
}
}
}
}
}'''
# Function that constructs a json body to add each line of the file to index
def make_documents(f):
doc_id = 0
for l in f:
doc = {
'_op_type': 'create',
'_index': index_name,
'_type': TYPE,
'_id': doc_id,
'_source': {'text': l.strip()}
}
doc_id += 1
yield (doc)
# Create an index, ignore if it exists already
try:
res = es.indices.create(index=index_name, ignore=400, body=mapping)
# Bulk-insert documents into index
with open(args.file, "r") as f:
res = bulk(es, make_documents(f))
doc_count = res[0]
# Test Search.
print("Index {0} is ready. Added {1} documents.".format(index_name, doc_count))
query = input("Enter a test search phrase: ")
result = es.search(index=index_name, doc_type=TYPE,
body={"query": {"match": {"text": query.strip()}}})
if result.get('hits') is not None and result['hits'].get('hits') is not None:
print(result['hits']['hits'])
else:
print({})
except Exception as inst:
print(inst)
| ARC-Solvers-main | scripts/index-corpus.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.