python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
#!/usr/bin/env python
# Tsung-Yi Lin <[email protected]>
# Ramakrishna Vedantam <[email protected]>
import copy
from collections import defaultdict
import numpy as np
import pdb
import math
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True)
class CiderScorer(object):
"""CIDEr scorer.
"""
def copy(self):
''' copy the refs.'''
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
''' singular instance '''
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.document_frequency = defaultdict(float)
self.cook_append(test, refs)
self.ref_len = None
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
self.ctest.append(cook_test(test)) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new CiderScorer instances
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
for refs in self.crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.items()]):
self.document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
def compute_cider(self):
def counts2vec(cnts):
"""
Function maps counts of ngram to vector of tfidf weights.
The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
The n-th entry of array denotes length of n-grams.
:param cnts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram,term_freq) in cnts.items():
# give word count 1 if it doesn't appear in reference corpus
df = np.log(max(1.0, self.document_frequency[ngram]))
# ngram index
n = len(ngram)-1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq)*(self.ref_len - df)
# compute norm for the vector. the norm will be used for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
'''
Compute the cosine similarity of two vectors.
:param vec_hyp: array of dictionary for vector corresponding to hypothesis
:param vec_ref: array of dictionary for vector corresponding to reference
:param norm_hyp: array of float for vector corresponding to hypothesis
:param norm_ref: array of float for vector corresponding to reference
:param length_hyp: int containing length of hypothesis
:param length_ref: int containing length of reference
:return: array of score for each n-grams cosine similarity
'''
delta = float(length_hyp - length_ref)
# measure consine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
# ngram
for (ngram,count) in vec_hyp[n].items():
# vrama91 : added clipping
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
val[n] /= (norm_hyp[n]*norm_ref[n])
assert(not math.isnan(val[n]))
# vrama91: added a length based gaussian penalty
val[n] *= np.e**(-(delta**2)/(2*self.sigma**2))
return val
# compute log reference length
self.ref_len = np.log(float(len(self.crefs)))
scores = []
for test, refs in zip(self.ctest, self.crefs):
# compute vector for test captions
vec, norm, length = counts2vec(test)
# compute vector for ref captions
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
vec_ref, norm_ref, length_ref = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
# change by vrama91 - mean of ngram scores, instead of sum
score_avg = np.mean(score)
# divide by number of references
score_avg /= len(refs)
# multiply score by 10
score_avg *= 10.0
# append score of an image to the score list
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
# compute idf
self.compute_doc_freq()
# assert to check document frequency
assert(len(self.ctest) >= max(self.document_frequency.values()))
# compute cider score
score = self.compute_cider()
# debug
# print score
return np.mean(np.array(score)), np.array(score)
| comet-atomic-2020-master | system_eval/evaluation/cider/cider_scorer.py |
#!/usr/bin/env python
# Python wrapper for METEOR implementation, by Xinlei Chen
# Acknowledge Michael Denkowski for the generous discussion and help
import os
import sys
import nltk
from nltk.translate.meteor_score import meteor_score
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
#METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self):
pass
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
for i in imgIds:
assert(len(res[i]) == 1)
score = round(meteor_score(gts[i], res[i][0]), 4)
scores.append(score)
#print('{}\n'.format(eval_line))
#self.meteor_p.stdin.write('{}\n'.format(eval_line))
#print(self.meteor_p.stdout.readline().strip())
#for i in range(0,len(imgIds)):
# scores.append(float(self.meteor_p.stdout.readline().strip()))
#score = float(self.meteor_p.stdout.readline().strip())
#self.lock.release()
return sum(scores)/len(scores), scores
def method(self):
return "METEOR"
| comet-atomic-2020-master | system_eval/evaluation/meteor/meteor_nltk.py |
__author__ = 'tylin'
| comet-atomic-2020-master | system_eval/evaluation/meteor/__init__.py |
#!/usr/bin/env python
# Python wrapper for METEOR implementation, by Xinlei Chen
# Acknowledge Michael Denkowski for the generous discussion and help
import os
import sys
import subprocess
import threading
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, \
'-', '-', '-stdio', '-l', 'en',
'-norm',
# '-t', 'adq'
# '-p', '0.85 0.2 0.6 0.75' # alpha beta gamma delta'',
# '-a', 'data/paraphrase-en.gz', '-m', 'exact stem paraphrase']
]
self.meteor_p = subprocess.Popen(self.meteor_cmd, \
cwd=os.path.dirname(os.path.abspath(__file__)), \
stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
# Used to guarantee thread safety
self.lock = threading.Lock()
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert(len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
print('{}\n'.format(eval_line))
self.meteor_p.stdin.write('{}\n'.format(eval_line))
print(self.meteor_p.stdout.readline().strip())
for i in range(0,len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score, scores
def method(self):
return "METEOR"
def _stat(self, hypothesis_str, reference_list):
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
# print score_line
str_in = '{}\n'.format(score_line)
#self.meteor_p.communicate(str_in.encode('utf=8'))
self.meteor_p.stdin.write(str_in.encode('utf=8'))
return self.meteor_p.stdout.readline().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
# EVAL ||| stats
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
# bug fix: there are two values returned by the jar file, one average, and one all, so do it twice
# thanks for Andrej for pointing this out
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release()
| comet-atomic-2020-master | system_eval/evaluation/meteor/meteor.py |
#!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
from evaluation.bleu.bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
#score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
#score, scores = bleu_scorer.compute_score(option='average', verbose=0)
# return (bleu, bleu_info)
return score, scores
def method(self):
return "Bleu"
| comet-atomic-2020-master | system_eval/evaluation/bleu/bleu.py |
__author__ = 'tylin'
| comet-atomic-2020-master | system_eval/evaluation/bleu/__init__.py |
#!/usr/bin/env python
# bleu_scorer.py
# David Chiang <[email protected]>
# Copyright (c) 2004-2006 University of Maryland. All rights
# reserved. Do not redistribute without permission from the
# author. Not for commercial use.
# Modified by:
# Hao Fang <[email protected]>
# Tsung-Yi Lin <[email protected]>
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
'''
import copy
import sys, math, re
from collections import defaultdict
def precook(s, n=4, out=False):
"""Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well."""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return (len(words), counts)
def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
reflen = []
maxcounts = {}
for ref in refs:
rl, counts = precook(ref, n)
reflen.append(rl)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
# Calculate effective reference sentence length.
if eff == "shortest":
reflen = min(reflen)
elif eff == "average":
reflen = float(sum(reflen))/len(reflen)
## lhuang: N.B.: leave reflen computaiton to the very end!!
## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design)
return (reflen, maxcounts)
def cook_test(test, tup, eff=None, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflen, refmaxcounts) = tup
testlen, counts = precook(test, n, True)
result = {}
# Calculate effective reference sentence length.
if eff == "closest":
result["reflen"] = min((abs(l-testlen), l) for l in reflen)[1]
else: ## i.e., "average" or "shortest" or None
result["reflen"] = reflen
result["testlen"] = testlen
result["guess"] = [max(0,testlen-k+1) for k in range(1,n+1)]
result['correct'] = [0]*n
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
class BleuScorer(object):
"""Bleu scorer.
"""
__slots__ = "n", "crefs", "ctest", "_score", "_ratio", "_testlen", "_reflen", "special_reflen"
# special_reflen is used in oracle (proportional effective ref len for a node).
def copy(self):
''' copy the refs.'''
new = BleuScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
new._score = None
return new
def __init__(self, test=None, refs=None, n=4, special_reflen=None):
''' singular instance '''
self.n = n
self.crefs = []
self.ctest = []
self.cook_append(test, refs)
self.special_reflen = special_reflen
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
cooked_test = cook_test(test, self.crefs[-1])
self.ctest.append(cooked_test) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
self._score = None ## need to recompute
def ratio(self, option=None):
self.compute_score(option=option)
return self._ratio
def score_ratio(self, option=None):
'''return (bleu, len_ratio) pair'''
return (self.fscore(option=option), self.ratio(option=option))
def score_ratio_str(self, option=None):
return "%.4f (%.2f)" % self.score_ratio(option)
def reflen(self, option=None):
self.compute_score(option=option)
return self._reflen
def testlen(self, option=None):
self.compute_score(option=option)
return self._testlen
def retest(self, new_test):
if type(new_test) is str:
new_test = [new_test]
assert len(new_test) == len(self.crefs), new_test
self.ctest = []
for t, rs in zip(new_test, self.crefs):
self.ctest.append(cook_test(t, rs))
self._score = None
return self
def rescore(self, new_test):
''' replace test(s) with new test(s), and returns the new score.'''
return self.retest(new_test).compute_score()
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new BleuScorer instances
self.cook_append(other[0], other[1])
else:
assert self.compatible(other), "incompatible BLEUs."
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
self._score = None ## need to recompute
return self
def compatible(self, other):
return isinstance(other, BleuScorer) and self.n == other.n
def single_reflen(self, option="average"):
return self._single_reflen(self.crefs[0][0], option)
def _single_reflen(self, reflens, option=None, testlen=None):
if option == "shortest":
reflen = min(reflens)
elif option == "average":
reflen = float(sum(reflens))/len(reflens)
elif option == "closest":
reflen = min((abs(l-testlen), l) for l in reflens)[1]
else:
assert False, "unsupported reflen option %s" % option
return reflen
def recompute_score(self, option=None, verbose=0):
self._score = None
return self.compute_score(option, verbose)
def compute_score(self, option=None, verbose=0):
n = self.n
small = 1e-9
tiny = 1e-15 ## so that if guess is 0 still return 0
bleu_list = [[] for _ in range(n)]
if self._score is not None:
return self._score
if option is None:
option = "average" if len(self.crefs) == 1 else "closest"
self._testlen = 0
self._reflen = 0
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
# for each sentence
for comps in self.ctest:
testlen = comps['testlen']
self._testlen += testlen
if self.special_reflen is None: ## need computation
reflen = self._single_reflen(comps['reflen'], option, testlen)
else:
reflen = self.special_reflen
self._reflen += reflen
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
# append per image bleu score
bleu = 1.
for k in range(n):
bleu *= (float(comps['correct'][k]) + tiny) \
/(float(comps['guess'][k]) + small)
bleu_list[k].append(bleu ** (1./(k+1)))
ratio = (testlen + tiny) / (reflen + small) ## N.B.: avoid zero division
if ratio < 1:
for k in range(n):
bleu_list[k][-1] *= math.exp(1 - 1/ratio)
if verbose > 1:
print(comps, reflen)
totalcomps['reflen'] = self._reflen
totalcomps['testlen'] = self._testlen
bleus = []
bleu = 1.
for k in range(n):
bleu *= float(totalcomps['correct'][k] + tiny) \
/ (totalcomps['guess'][k] + small)
bleus.append(bleu ** (1./(k+1)))
ratio = (self._testlen + tiny) / (self._reflen + small) ## N.B.: avoid zero division
if ratio < 1:
for k in range(n):
bleus[k] *= math.exp(1 - 1/ratio)
if verbose > 0:
print(totalcomps)
print("ratio:", ratio)
self._score = bleus
return self._score, bleu_list
| comet-atomic-2020-master | system_eval/evaluation/bleu/bleu_scorer.py |
from bert_score import score
# Code for BertScore reused from original implementation: https://github.com/Tiiiger/bert_score
class BertScore:
def __init__(self):
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
hyp_input = []
ref_input = []
same_indices = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
hyp_input += [hypo[0]] * len(ref)
ref_input += ref
same_indices.append(len(ref_input))
p, r, f_scores = score(hyp_input, ref_input, model_type="bert-base-uncased")
prev_idx = 0
aggreg_f1_scores = []
for idx in same_indices:
aggreg_f1_scores.append(f_scores[prev_idx: idx].mean().cpu().item())
prev_idx = idx
return sum(aggreg_f1_scores)/len(aggreg_f1_scores), aggreg_f1_scores
def method(self):
return "Bert Score"
| comet-atomic-2020-master | system_eval/evaluation/bert_score/bert_score.py |
comet-atomic-2020-master | system_eval/evaluation/bert_score/__init__.py |
|
import torch
from math import log
from itertools import chain
from collections import defaultdict, Counter
from multiprocessing import Pool
from functools import partial
from tqdm.auto import tqdm
__all__ = ['bert_types']
bert_types = [
'bert-base-uncased',
'bert-large-uncased',
'bert-base-cased',
'bert-large-cased',
'bert-base-multilingual-uncased',
'bert-base-multilingual-cased',
'bert-base-chinese',
]
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, :lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, :lens[i]] = 1
return padded, lens, mask
def bert_encode(model, x, attention_mask):
model.eval()
x_seg = torch.zeros_like(x, dtype=torch.long)
with torch.no_grad():
x_encoded_layers, pooled_output = model(x, x_seg, attention_mask=attention_mask, output_all_encoded_layers=False)
return x_encoded_layers
def process(a, tokenizer=None):
if not tokenizer is None:
a = ["[CLS]"]+tokenizer.tokenize(a)+["[SEP]"]
a = tokenizer.convert_tokens_to_ids(a)
return set(a)
def get_idf_dict(arr, tokenizer, nthreads=4):
"""
Returns mapping from word piece index to its inverse document frequency.
Args:
- :param: `arr` (list of str) : sentences to process.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `nthreads` (int) : number of CPU threads to use
"""
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process, tokenizer=tokenizer)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda : log((num_docs+1)/(1)))
idf_dict.update({idx:log((num_docs+1)/(c+1)) for (idx, c) in idf_count.items()})
return idf_dict
def collate_idf(arr, tokenize, numericalize, idf_dict,
pad="[PAD]", device='cuda:0'):
"""
Helper function that pads a list of sentences to hvae the same length and
loads idf score for words in the sentences.
Args:
- :param: `arr` (list of str): sentences to process.
- :param: `tokenize` : a function that takes a string and return list
of tokens.
- :param: `numericalize` : a function that takes a list of tokens and
return list of token indexes.
- :param: `idf_dict` (dict): mapping a word piece index to its
inverse document frequency
- :param: `pad` (str): the padding token.
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
arr = [["[CLS]"]+tokenize(a)+["[SEP]"] for a in arr]
arr = [numericalize(a) for a in arr]
idf_weights = [[idf_dict[i] for i in a] for a in arr]
pad_token = numericalize([pad])[0]
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded_idf, _, _ = padding(idf_weights, pad_token, dtype=torch.float)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, padded_idf, lens, mask
def get_bert_embedding(all_sens, model, tokenizer, idf_dict,
batch_size=-1, device='cuda:0'):
"""
Compute BERT embedding in batches.
Args:
- :param: `all_sens` (list of str) : sentences to encode.
- :param: `model` : a BERT model from `pytorch_pretrained_bert`.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `idf_dict` (dict) : mapping a word piece index to its
inverse document frequency
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
padded_sens, padded_idf, lens, mask = collate_idf(all_sens,
tokenizer.tokenize, tokenizer.convert_tokens_to_ids,
idf_dict,
device=device)
if batch_size == -1: batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = bert_encode(model, padded_sens[i:i+batch_size],
attention_mask=mask[i:i+batch_size])
# batch_embedding = torch.stack(batch_embedding)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=0)
return total_embedding, lens, mask, padded_idf
def greedy_cos_idf(ref_embedding, ref_lens, ref_masks, ref_idf,
hyp_embedding, hyp_lens, hyp_masks, hyp_idf):
"""
Compute greedy matching based on cosine similarity.
Args:
- :param: `ref_embedding` (torch.Tensor):
embeddings of reference sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `ref_lens` (list of int): list of reference sentence length.
- :param: `ref_masks` (torch.LongTensor): BxKxK, BERT attention mask for
reference sentences.
- :param: `ref_idf` (torch.Tensor): BxK, idf score of each word
piece in the reference setence
- :param: `hyp_embedding` (torch.Tensor):
embeddings of candidate sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `hyp_lens` (list of int): list of candidate sentence length.
- :param: `hyp_masks` (torch.LongTensor): BxKxK, BERT attention mask for
candidate sentences.
- :param: `hyp_idf` (torch.Tensor): BxK, idf score of each word
piece in the candidate setence
"""
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
batch_size = ref_embedding.size(0)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
masks = torch.bmm(hyp_masks.unsqueeze(2).float(), ref_masks.unsqueeze(1).float())
masks = masks.expand(batch_size, masks.size(1), masks.size(2))\
.contiguous().view_as(sim)
masks = masks.float().to(sim.device)
sim = sim * masks
word_precision = sim.max(dim=2)[0]
word_recall = sim.max(dim=1)[0]
hyp_idf.div_(hyp_idf.sum(dim=1, keepdim=True))
ref_idf.div_(ref_idf.sum(dim=1, keepdim=True))
precision_scale = hyp_idf.to(word_precision.device)
recall_scale = ref_idf.to(word_recall.device)
P = (word_precision * precision_scale).sum(dim=1)
R = (word_recall * recall_scale).sum(dim=1)
F = 2 * P * R / (P + R)
return P, R, F
def bert_cos_score_idf(model, refs, hyps, tokenizer, idf_dict,
verbose=False, batch_size=64, device='cuda:0'):
"""
Compute BERTScore.
Args:
- :param: `model` : a BERT model in `pytorch_pretrained_bert`
- :param: `refs` (list of str): reference sentences
- :param: `hyps` (list of str): candidate sentences
- :param: `tokenzier` : a BERT tokenizer corresponds to `model`
- :param: `idf_dict` : a dictionary mapping a word piece index to its
inverse document frequency
- :param: `verbose` (bool): turn on intermediate status update
- :param: `batch_size` (int): bert score processing batch size
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
preds = []
iter_range = range(0, len(refs), batch_size)
if verbose: iter_range = tqdm(iter_range)
for batch_start in iter_range:
batch_refs = refs[batch_start:batch_start+batch_size]
batch_hyps = hyps[batch_start:batch_start+batch_size]
ref_stats = get_bert_embedding(batch_refs, model, tokenizer, idf_dict,
device=device)
hyp_stats = get_bert_embedding(batch_hyps, model, tokenizer, idf_dict,
device=device)
P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats)
preds.append(torch.stack((P, R, F1), dim=1).cpu())
preds = torch.cat(preds, dim=0)
return preds
| comet-atomic-2020-master | system_eval/evaluation/bert_score/utils.py |
import os
import time
import argparse
import torch
from collections import defaultdict
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from .utils import get_idf_dict, bert_cos_score_idf,\
get_bert_embedding, bert_types
__all__ = ['score', 'plot_example']
def score(cands, refs, bert="bert-base-multilingual-cased",
num_layers=8, verbose=False, no_idf=False, batch_size=64):
"""
BERTScore metric.
Args:
- :param: `cands` (list of str): candidate sentences
- :param: `refs` (list of str): reference sentences
- :param: `bert` (str): bert specification
- :param: `num_layers` (int): the layer of representation to use
- :param: `verbose` (bool): turn on intermediate status update
- :param: `no_idf` (bool): do not use idf weighting
- :param: `batch_size` (int): bert score processing batch size
"""
assert len(cands) == len(refs)
assert bert in bert_types
tokenizer = BertTokenizer.from_pretrained(bert)
model = BertModel.from_pretrained(bert)
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# drop unused layers
model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])
if no_idf:
idf_dict = defaultdict(lambda: 1.)
# set idf for [SEP] and [CLS] to 0
idf_dict[101] = 0
idf_dict[102] = 0
else:
if verbose:
print('preparing IDF dict...')
start = time.perf_counter()
idf_dict = get_idf_dict(refs, tokenizer)
if verbose:
print('done in {:.2f} seconds'.format(time.perf_counter() - start))
if verbose:
print('calculating scores...')
start = time.perf_counter()
all_preds = bert_cos_score_idf(model, refs, cands, tokenizer, idf_dict,
verbose=verbose, device=device, batch_size=batch_size)
P = all_preds[:, 0].cpu()
R = all_preds[:, 1].cpu()
F1 = all_preds[:, 2].cpu()
if verbose:
print('done in {:.2f} seconds'.format(time.perf_counter() - start))
return P, R, F1
def plot_example(h, r, verbose=False, bert="bert-base-multilingual-cased",
num_layers=8, fname=''):
"""
BERTScore metric.
Args:
- :param: `h` (str): a candidate sentence
- :param: `r` (str): a reference sentence
- :param: `verbose` (bool): turn on intermediate status update
- :param: `bert` (str): bert specification
- :param: `num_layers` (int): the layer of representation to use
"""
assert bert in bert_types
if verbose:
print('loading BERT model...')
tokenizer = BertTokenizer.from_pretrained(bert)
model = BertModel.from_pretrained(bert)
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
h_tokens = ['[CLS]'] + tokenizer.tokenize(h) + ['[SEP]']
r_tokens = ['[CLS]'] + tokenizer.tokenize(r) + ['[SEP]']
model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])
idf_dict = defaultdict(lambda: 1.)
ref_embedding, ref_lens, ref_masks, padded_idf = get_bert_embedding([r], model, tokenizer, idf_dict,
device=device)
hyp_embedding, ref_lens, ref_masks, padded_idf = get_bert_embedding([h], model, tokenizer, idf_dict,
device=device)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
batch_size = ref_embedding.size(1)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2)).cpu()
sim = sim.squeeze(0).numpy()
# remove [CLS] and [SEP] tokens
r_tokens = r_tokens[1:-1]
h_tokens = h_tokens[1:-1]
sim = sim[1:-1,1:-1]
fig, ax = plt.subplots(figsize=(len(r_tokens)*0.8, len(h_tokens)*0.8))
im = ax.imshow(sim, cmap='Blues')
# We want to show all ticks...
ax.set_xticks(np.arange(len(r_tokens)))
ax.set_yticks(np.arange(len(h_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(r_tokens, fontsize=10)
ax.set_yticklabels(h_tokens, fontsize=10)
plt.xlabel("Refernce", fontsize=10)
plt.ylabel("Candidate", fontsize=10)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(h_tokens)):
for j in range(len(r_tokens)):
text = ax.text(j, i, '{:.3f}'.format(sim[i, j]),
ha="center", va="center", color="k" if sim[i, j] < 0.6 else "w")
# P = sim.max(1).mean()
# R = sim.max(0).mean()
# F1 = 2 * P * R / (P + R)
fig.tight_layout()
# plt.title("BERT-F1: {:.3f}".format(F1), fontsize=10)
if fname != "":
print("Saved figure to file: ", fname+".png")
plt.savefig(fname+'.png', dpi=100)
plt.show()
| comet-atomic-2020-master | system_eval/evaluation/bert_score/score.py |
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <[email protected]>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
print("len score:", len(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
| comet-atomic-2020-master | system_eval/evaluation/rouge/rouge.py |
__author__ = 'vrama91'
| comet-atomic-2020-master | system_eval/evaluation/rouge/__init__.py |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="aries",
version="0.1.0",
author="Mike D'Arcy",
author_email="[email protected]",
description="Code for the ARIES project",
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache 2.0",
url="https://github.com/allenai/aries",
packages=setuptools.find_packages(),
classifiers=[
"Topic :: Scientific/Engineering",
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
],
)
| aries-master | setup.py |
from aries.util.edit import levenshtein_distance, basic_token_align, find_overlapping_substrings
def test_basic_token_align():
seq1 = ['this', 'is', 'my', 'sentence']
seq2 = ['this', 'is', 'my', 'sentence']
d, align = basic_token_align(seq1, seq2)
assert d == 0
assert align == [0, 1, 2, 3]
seq2 = ['t', 'h', 'i', 's', 'i', 's', 'm', 'y', 's', 'e', 'n', 't', 'e', 'n', 'c', 'e']
d, align = basic_token_align(seq1, seq2)
assert d == 0
assert align == [0]*4 + [1]*2 + [2]*2 + [3]*8
seq2 = ['thisi', 's', 'mys', 'entence']
d, align = basic_token_align(seq1, seq2)
assert d == 2
assert align == [0, 1, 2, 3]
seq2 = ['this', '_is', '_my', '_sentence']
d, align = basic_token_align(seq1, seq2)
assert d == 3
assert align == [0, 1, 2, 3]
seq2 = ['this', 'is', 'my']
try:
d, align = basic_token_align(seq1, seq2)
assert False, "Expected error since characters didn't match"
except ValueError:
pass
seq2 = ['[this]', 'this', 'is', '[smy]', 'my', 'sentence', '[e]']
d, align = basic_token_align(seq1, seq2, seq2_ignored_ids=[0,3,6])
assert d == 0
assert align == [None, 0, 1, None, 2, 3, None]
def test_levenshtein():
assert levenshtein_distance('', '') == 0
assert levenshtein_distance('', 'text') == 4
assert levenshtein_distance('text', '') == 4
assert levenshtein_distance('text', 'text') == 0
assert levenshtein_distance('text', 'textb') == 1
assert levenshtein_distance('textb', 'text') == 1
assert levenshtein_distance('texta', 'textb') == 1
assert levenshtein_distance('abba', 'acca') == 2
def test_find_overlapping_substrings():
assert find_overlapping_substrings('', '', min_length=1) == []
assert find_overlapping_substrings('', 'text', min_length=1) == []
assert find_overlapping_substrings('text', '', min_length=1) == []
assert find_overlapping_substrings('text', 'text', min_length=1) == [((0, 4), (0, 4))]
assert find_overlapping_substrings('text', 'text', min_length=4) == [((0, 4), (0, 4))]
assert find_overlapping_substrings('text', 'text', min_length=5) == []
assert find_overlapping_substrings('atext', 'text', min_length=2) == [((1, 5), (0, 4))]
assert find_overlapping_substrings('texta', 'text', min_length=2) == [((0, 4), (0, 4))]
assert find_overlapping_substrings('text', 'atext', min_length=2) == [((0, 4), (1, 5))]
assert find_overlapping_substrings('text', 'texta', min_length=2) == [((0, 4), (0, 4))]
assert find_overlapping_substrings('btext', 'atext', min_length=2) == [((1, 5), (1, 5))]
assert sorted(find_overlapping_substrings('the man and the cat', 'the cat and the man', min_length=4)) == [((0, 4), (0, 4)), ((0, 7), (12, 19)), ((7, 16), (7, 16)), ((12, 19), (0, 7))]
| aries-master | tests/test_edit.py |
from . import *
| aries-master | aries/__init__.py |
import datetime
import json
import logging
import os
import sqlite3
import time
import openai
import tiktoken
import tqdm
logger = logging.getLogger(__name__)
class Gpt3CacheClient:
def __init__(self, cache_db_path):
self.cache_db = self._init_cache_db(cache_db_path)
if openai.api_key is None:
if "OPENAI_API_KEY" not in os.environ:
logger.error("Need OpenAI key in OPENAI_API_KEY")
openai.api_key = os.environ["OPENAI_API_KEY"]
self.tokenizer = None
self.tokenizers_by_model = dict()
def estimate_num_tokens(self, text, model="text-davinci-003"):
return len(self._get_tokenizer(model).encode(text))
def _get_tokenizer(self, model):
if model not in self.tokenizers_by_model:
self.tokenizers_by_model[model] = tiktoken.encoding_for_model(model)
return self.tokenizers_by_model[model]
def __enter__(self):
self.cache_db.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.cache_db.__exit__(*args, **kwargs)
def close(self):
self.cache_db.close()
def _init_cache_db(self, cache_db_path):
db = sqlite3.connect(cache_db_path)
try:
cur = db.cursor()
cur.execute(
"""create table if not exists gpt3_cache (
model text not null,
prompt text not null,
temperature real not null,
top_p real not null,
max_tokens integer not null,
total_tokens integer not null,
frequency_penalty real not null,
presence_penalty real not null,
logprobs integer not null,
response_json text not null,
response_timestamp real
)"""
)
cur.execute("create index if not exists prompt_index on gpt3_cache (prompt)")
cur.execute(
"""create table if not exists chat_gpt3_cache (
model text not null,
messages_json text not null,
temperature real not null,
top_p real not null,
max_tokens integer not null,
total_tokens integer not null,
frequency_penalty real not null,
presence_penalty real not null,
response_json text not null,
response_timestamp real
)"""
)
cur.execute("create index if not exists messages_json_index on chat_gpt3_cache (messages_json)")
db.commit()
return db
except Exception as e:
db.close()
raise e
def get_gpt3_result(self, *args, **kwargs):
"""Deprecated. Use prompt_completion() instead."""
return self.prompt_completion(*args, **kwargs)
def prompt_completion(
self,
model,
prompt,
temperature,
max_tokens,
top_p,
frequency_penalty,
presence_penalty,
prompt_token_count=-1,
logprobs=0,
):
"""Works like openai.Completion.create, but adds a caching layer."""
if prompt_token_count < 0:
prompt_token_count = self.estimate_num_tokens(prompt, model)
db_keyvals = {
"model": model,
"prompt": prompt,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"logprobs": logprobs,
}
cur = self.cache_db.cursor()
cache_json = None
from_cache = False
# Cache only makes sense if temperature==0 (deterministic result)
if temperature == 0.0:
select_keyvals = db_keyvals.copy()
select_keyvals["prompt_token_count"] = prompt_token_count
dbrecs = cur.execute(
"""select response_json from gpt3_cache
where
model = :model and
prompt = :prompt and
temperature = :temperature and
((:prompt_token_count+max_tokens) > total_tokens or max_tokens = :max_tokens) and
total_tokens <= (:prompt_token_count+:max_tokens) and
top_p = :top_p and
frequency_penalty = :frequency_penalty and
presence_penalty = :presence_penalty and
logprobs >= :logprobs""",
select_keyvals,
).fetchall()
if len(dbrecs) == 1:
cache_json = dbrecs[0][0]
elif len(dbrecs) >= 2:
logger.warning("Got {} recs for gpt3 query when only one was expected.".format(len(dbrecs)))
cache_json = dbrecs[0][0]
if cache_json is None:
logger.debug("UNCACHED prompt completion")
resp = openai.Completion.create(**db_keyvals)
insert_keyvals = db_keyvals.copy()
cache_json = json.dumps(resp)
insert_keyvals["response_json"] = cache_json
insert_keyvals["response_timestamp"] = datetime.datetime.timestamp(datetime.datetime.utcnow())
insert_keyvals["total_tokens"] = resp["usage"]["total_tokens"]
cur.execute(
"""INSERT INTO gpt3_cache ( model, prompt, temperature, top_p, max_tokens, frequency_penalty, presence_penalty, logprobs, response_json, response_timestamp, total_tokens)
VALUES (:model, :prompt, :temperature, :top_p, :max_tokens, :frequency_penalty, :presence_penalty, :logprobs, :response_json, :response_timestamp, :total_tokens)""",
insert_keyvals,
)
self.cache_db.commit()
else:
from_cache = True
resp = json.loads(cache_json)
if from_cache:
resp["usage"]["uncached_total_tokens"] = 0
else:
resp["usage"]["uncached_total_tokens"] = resp["usage"]["total_tokens"]
return resp
def chat_completion(
self,
model,
messages,
temperature,
max_tokens,
top_p,
frequency_penalty,
presence_penalty,
messages_token_count=-1,
max_retries=3,
):
"""Works like openai.ChatCompletion.create, but adds a caching layer."""
# Sort keys when serializing to maximize cache hits
messages_json = json.dumps(messages, sort_keys=True)
if messages_token_count < 0:
messages_token_count = sum(self.estimate_num_tokens(x["content"], model) for x in messages)
db_keyvals = {
"model": model,
"messages_json": messages_json,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
cur = self.cache_db.cursor()
cache_json = None
from_cache = False
# Cache only makes sense if temperature==0 (deterministic result)
if temperature == 0.0:
select_keyvals = db_keyvals.copy()
select_keyvals["messages_token_count"] = messages_token_count
dbrecs = cur.execute(
"""select response_json from chat_gpt3_cache
where
model = :model and
messages_json = :messages_json and
temperature = :temperature and
((:messages_token_count+max_tokens) > total_tokens or max_tokens = :max_tokens) and
total_tokens <= (:messages_token_count+:max_tokens) and
top_p = :top_p and
frequency_penalty = :frequency_penalty and
presence_penalty = :presence_penalty
""",
select_keyvals,
).fetchall()
if len(dbrecs) == 1:
cache_json = dbrecs[0][0]
elif len(dbrecs) >= 2:
logger.warning("Got {} recs for gpt3 query when only one was expected.".format(len(dbrecs)))
cache_json = dbrecs[0][0]
if cache_json is None:
logger.debug("UNCACHED chat completion")
model_keyvals = db_keyvals.copy()
del model_keyvals["messages_json"]
model_keyvals["messages"] = messages
resp = None
while resp is None and max_retries >= 0:
try:
resp = openai.ChatCompletion.create(**model_keyvals)
except openai.error.RateLimitError:
logger.warning("Rate limit error on openai request, waiting 60 seconds and trying again")
time.sleep(60)
max_retries -= 1
insert_keyvals = db_keyvals.copy()
cache_json = json.dumps(resp)
insert_keyvals["response_json"] = cache_json
insert_keyvals["response_timestamp"] = datetime.datetime.timestamp(datetime.datetime.utcnow())
insert_keyvals["total_tokens"] = resp["usage"]["total_tokens"]
cur.execute(
"""INSERT INTO chat_gpt3_cache ( model, messages_json, temperature, top_p, max_tokens, frequency_penalty, presence_penalty, response_json, response_timestamp, total_tokens)
VALUES (:model, :messages_json, :temperature, :top_p, :max_tokens, :frequency_penalty, :presence_penalty, :response_json, :response_timestamp, :total_tokens)""",
insert_keyvals,
)
self.cache_db.commit()
else:
from_cache = True
resp = json.loads(cache_json)
if from_cache:
resp["usage"]["uncached_total_tokens"] = 0
else:
resp["usage"]["uncached_total_tokens"] = resp["usage"]["total_tokens"]
return resp
| aries-master | aries/util/gpt3.py |
import logging
import os
import pprint
from typing import Callable, List, Union
logger = logging.getLogger(__name__)
def init_logging(logfile=None, level=logging.INFO):
handlers = [
logging.StreamHandler(),
]
if logfile:
handlers.append(logging.FileHandler(logfile))
logging.basicConfig(
format="%(asctime)s [%(levelname)s] (%(name)s): %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=level,
handlers=handlers,
)
def pprint_metrics(metrics, print_fn: Union[Callable[[str], None], logging.Logger] = print, val_format="{:0.4f}", int_format="{:d}", name="eval"):
if isinstance(print_fn, logging.Logger):
print_fn = print_fn.info
if name != "":
name += " "
for k, v in metrics.items():
vstr = str(v)
if isinstance(v, float) or isinstance(v, int):
vstr = val_format.format(v)
if isinstance(v, int) and int_format is not None:
vstr = int_format.format(v)
print_fn("{name}{metric_name}: {val}".format(name=name, metric_name=k, val=vstr))
class PrettyFloatPrinter(pprint.PrettyPrinter):
def __init__(self, *args, **kwargs):
if "sort_dicts" not in kwargs:
kwargs["sort_dicts"] = False
super().__init__(*args, **kwargs)
def format(self, obj, ctx, maxlvl, lvl):
if isinstance(obj, float):
return "{:.4f}".format(obj), True, False
# elif isinstance(obj, dict):
# print('gd', obj)
# v = '{' + ',\n'.join(["'{}': {}".format(k, self.format(v, ctx, maxlvl, lvl+1)[0]) for k, v in obj.items()]) + '}', True, False
# print(v[0])
# return v
return pprint.PrettyPrinter.format(self, obj, ctx, maxlvl, lvl + 1)
def table2str(grid, format_fn=str, col_names=None, row_names=None, colsep=" | ", rowend="", header_row_sep="-"):
if col_names is None:
col_names = ["" for _ in range(len(grid[0]))]
col_names = list(map(str, col_names))
if row_names is None:
row_names = ["" for _ in range(len(grid))]
row_names = list(map(str, row_names))
new_grid = [[""] + col_names]
for rowidx, row in enumerate(grid):
new_grid.append([row_names[rowidx]] + [format_fn(cell) for cell in row])
return raw_table2str(new_grid, colsep=colsep, rowend=rowend, header_row_sep=header_row_sep)
def raw_table2str(grid, colsep=" | ", rowend="", header_row_sep="-"):
s = ""
col_widths = [max(len(grid[y][x]) for y in range(len(grid))) for x in range(len(grid[0]))]
for y, row in enumerate(grid):
if all(cell == "" for cell in row[1:]):
continue
# s += ' '
s += colsep.join(["{text:>{width}s}".format(width=col_widths[x], text=cell) if col_widths[x] != 0 else "" for x, cell in enumerate(row)])
s += "{}\n".format(rowend)
if y == 0:
if len(header_row_sep) == 1:
s += header_row_sep * (sum(col_widths) + len(colsep) * (len(col_widths) - 1) + 1) + "\n"
elif len(header_row_sep) == 0:
continue
else:
s += header_row_sep + ("\n" if not header_row_sep.endswith("\n") else "")
return s
| aries-master | aries/util/logging.py |
import re
# Adapted from jupyterlab css
COLOR_TABLE = {
"black": {"hex": "3e424d", "ansi": "30"},
"red": {"hex": "e75c58", "ansi": "31"},
"green": {"hex": "00a050", "ansi": "32"},
"yellow": {"hex": "ddbb33", "ansi": "33"},
"blue": {"hex": "2090ff", "ansi": "34"},
"magenta": {"hex": "d060c0", "ansi": "35"},
"cyan": {"hex": "60c7c7", "ansi": "36"},
"white": {"hex": "c0c0b0", "ansi": "37"},
"strong-black": {"hex": "303030", "ansi": "90"},
"strong-red": {"hex": "b03030", "ansi": "91"},
"strong-green": {"hex": "007030", "ansi": "92"},
"strong-yellow": {"hex": "b08010", "ansi": "93"},
"strong-blue": {"hex": "0070dd", "ansi": "94"},
"strong-magenta": {"hex": "a03090", "ansi": "95"},
"strong-cyan": {"hex": "209090", "ansi": "96"},
"strong-white": {"hex": "a0a0b0", "ansi": "97"},
}
def colorify(s: str, color: str, bold: bool = False, form="html", tag_side="both"):
"""if tag_side is 'left', only the left tag is added. If tag_side irght
'right', only the right tag is added. This is useful if, for example,
a list of tokens needs to be colored without joining the tokens. Raises an
error if this is not possible for the given form."""
if color is None or form == "none":
return s
m = re.match(r"#(?P<hexcode>[0-9a-fA-F]{6})", color)
valid_ansi = False
if not m:
if color in COLOR_TABLE:
valid_ansi = True
hex_color = COLOR_TABLE[color]["hex"]
else:
raise ValueError("Invalid color {}".format(color))
else:
hex_color = m.group("hexcode")
left_tag, right_tag = "", ""
if form == "html":
bold_code = "font-weight: bold;" if bold else ""
left_tag = '<span style="color: #{code};{boldness}">'.format(code=hex_color, boldness=bold_code)
right_tag = "</span>"
elif form == "ansi" and valid_ansi:
bold_code = "1" if bold else "0"
left_tag = "\033[{boldness};{code}m".format(code=COLOR_TABLE[color]["ansi"], boldness=bold_code)
right_tag = "\033[0m"
else:
raise ValueError("Invalid format {}".format(form))
if tag_side == "left":
return left_tag + s
elif tag_side == "right":
return s + right_tag
elif tag_side == "both":
return left_tag + s + right_tag
raise ValueError("Invalid tag_side {}".format(tag_side))
def colorprint(s, color=None, bold=False, form="ansi", *print_args, **print_kwargs):
return print(colorify(s, color, bold=bold, form=form), *print_args, **print_kwargs)
| aries-master | aries/util/color.py |
aries-master | aries/util/__init__.py |
|
import collections
import difflib
import itertools
from typing import Iterable, List, Tuple, Union
import numpy as np
import tqdm
from cffi import FFI
from .color import colorify, colorprint
def init_levenshtein_c():
ffibuilder = FFI()
ffibuilder.set_source(
"_levenshtein",
r"""
int levenshtein(int *seq1, int seq1_len, int *seq2, int seq2_len, int *v0)
{
// Adapted from https://en.wikipedia.org/wiki/Levenshtein_distance (CC-BY-SA)
// v0 is just a buffer for temporary calculations; easier to
// ask the caller to allocate it than to deal with C mem
// management
int substitutionCost, insertionCost, deletionCost;
int tmpval;
for (int i = 0; i < seq2_len+1; i++) {
v0[i] = i;
}
for (int i = 0; i < seq1_len; i++) {
// calculate v1 (current row distances) from the previous row v0
// first element of v1 is A[i+1][0]
// edit distance is delete (i+1) chars from s to match empty t
tmpval = i + 1;
// use formula to fill in the rest of the row
for(int j = 0; j < seq2_len; j++) {
// calculating costs for A[i+1][j+1]
deletionCost = v0[j + 1] + 1;
insertionCost = tmpval + 1;
substitutionCost = v0[j];
if (seq1[i] != seq2[j]) {
substitutionCost++;
}
v0[j] = tmpval;
tmpval = deletionCost;
if (insertionCost < tmpval) {
tmpval = insertionCost;
}
if (substitutionCost < tmpval) {
tmpval = substitutionCost;
}
}
v0[seq2_len] = tmpval;
}
// after the last swap, the results of v1 are now in v0
return v0[seq2_len];
}
""",
)
ffibuilder.cdef("int levenshtein(int*, int, int*, int, int*);")
# Compile the C module and import it
ffibuilder.compile(verbose=True)
from _levenshtein import ffi, lib
return ffi, lib
levenshtein_ffi, levenshtein_lib = None, None
def levenshtein_distance(seq1, seq2):
# We call a C function for levenshtein via CFFI because it is about 1000x
# faster than the python version (the difference between running in an hour
# vs running in a month)
global levenshtein_ffi, levenshtein_lib
if levenshtein_ffi is None:
levenshtein_ffi, levenshtein_lib = init_levenshtein_c()
if isinstance(seq1, str):
seq1 = [ord(c) for c in seq1]
if isinstance(seq2, str):
seq2 = [ord(c) for c in seq2]
if len(seq1) > len(seq2):
seq1, seq2 = seq2, seq1
# Important: these arrs need to be in their own variables, NOT inlined with
# the levenshtein_ffi.from_buffer, or else the GC will free the memory and
# memory will get corrupted (often manifests as seq2 overwriting seq1, but
# also can segfault)
seq1_arr = np.array(seq1, dtype=np.int32)
seq2_arr = np.array(seq2, dtype=np.int32)
v0_arr = np.zeros(len(seq2) + 1, dtype=np.int32)
seq1_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq1_arr))
seq2_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq2_arr))
v0 = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(v0_arr))
result = levenshtein_lib.levenshtein(seq1_buf, len(seq1), seq2_buf, len(seq2), v0)
return result
def basic_token_align(seq1, seq2, seq2_ignored_ids: Iterable = None):
"""Aligns the tokens of seq1 and seq2 assuming that seq2 contains all the
characters of seq1, but possibly with some extra tokens (e.g., special
whitespace markers from a huggingface transformers tokenizer) and possibly
partitioned differently.
In cases where the boundaries are mismatched, this maps to the token with
largest overlap, and breaks ties in favor of earlier tokens.
if seq2_ignored_ids is given, the specified token indexes in seq2 are
ignored and will not be aligned to anything in seq1.
Returns a tuple (dist, alignment) where dist is the total of mismatches
(number of characters that seq2 token boundaries had to be moved to
complete alignment) and `alignment` is a list of the same length as seq2
containing the indexes of the aligned tokens from seq1 (or None if the
token did not overlap seq1 at all)."""
if seq2_ignored_ids is None:
seq2_ignored_ids = set()
# if seq1[0] == 'numerous':
# breakpoint()
seq1idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq1)]))
seq2idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq2)]))
seq2_seq1_char_align = [None] * len(seq2idxs)
idx1 = 0
last_valid = None
for chridx2, (idx2, c2) in enumerate(seq2idxs):
if idx1 >= len(seq1idxs):
break
if c2 == seq1idxs[idx1][1] and idx2 not in seq2_ignored_ids:
seq2_seq1_char_align[chridx2] = idx1
last_valid = idx1
idx1 += 1
# Ensure that all chars of seq1 were mapped to a char in seq2
# if ''.join(seq1) != ''.join(seq2):
if last_valid != (len(seq1idxs) - 1):
raise ValueError("Cannot align: Sequences didn't contain the same characters")
# Align the sequences
alignment_counts = {idx: collections.Counter() for idx in range(len(seq2))}
# for idx1, idx2 in zip(seq1idxs, seq2idxs):
for chridx1, (idx2, c2) in zip(seq2_seq1_char_align, seq2idxs):
idx1 = seq1idxs[chridx1][0] if chridx1 is not None else None
alignment_counts[idx2][idx1] += 1
alignments = []
n_mismatch_total = 0
for idx2 in range(len(seq2)):
best_idxs = sorted(
alignment_counts[idx2].keys(), reverse=True, key=lambda x: (alignment_counts[idx2][x], -x if x is not None else float("-inf"))
)
best_idx1 = best_idxs[0]
if best_idx1 is None and len(best_idxs) > 1:
best_idx1 = best_idxs[1]
n_mismatch_total += sum(alignment_counts[idx2].values()) - alignment_counts[idx2][best_idx1]
alignments.append(best_idx1)
return (n_mismatch_total, alignments)
def print_word_diff(text1, text2, color_format="ansi", **print_kwargs):
print(make_word_diff(text1, text2, color_format=color_format), **print_kwargs)
def make_word_diff(text1, text2, color_format="ansi"):
if not isinstance(text1, list):
text1 = text1.split(" ") if len(text1) != 0 else []
if not isinstance(text2, list):
text2 = text2.split(" ") if len(text2) != 0 else []
prevtok = " "
parity = 0
def color_for_tok(tok):
if color_format == "none":
return None
if tok == "+":
return "green"
elif tok == "-":
return "red"
elif tok == "?":
return "blue"
return None
s = ""
for idx, x in enumerate(difflib.ndiff(text1, text2)):
if prevtok != x[0] and prevtok in ("+", "-"):
s += colorify(prevtok + "]", color=color_for_tok(prevtok), form=color_format)
if prevtok != x[0] and x[0] in ("+", "-"):
if parity == 0 and idx > 0:
s += " "
s += colorify("[" + x[0], color=color_for_tok(x[0]), form=color_format)
if x[0] == " ":
if idx != 0:
s += " "
s += x[2:]
parity = 0
elif x[0] == "?":
pass
else:
# s = '['+x[0]+x[1:]+x[0]+']'
if prevtok != x[0]:
parity = parity ^ 1
else:
s += " "
s += colorify(x[2:], color=color_for_tok(x[0]), form=color_format)
prevtok = x[0]
if prevtok in ("+", "-"):
s += colorify(prevtok + "]", color=color_for_tok(prevtok), form=color_format)
return s
def build_offsets(
toks: Union[str, List[str]],
chunk_length: int,
) -> dict:
offsets = dict()
for idx in range(len(toks) - chunk_length + 1):
chunk = tuple(toks[idx : idx + chunk_length])
if chunk not in offsets:
offsets[chunk] = []
offsets[chunk].append(idx)
return offsets
def update_overlaps(
cur_overlaps: List[Tuple[int, int]],
toks1: Union[str, List[str]],
toks2: Union[str, List[str]],
idx2: int,
min_length: int,
) -> Tuple[List[Tuple[int, int]], List[Tuple[Tuple[int, int], Tuple[int, int]]]]:
overlaps = []
new_overlaps = []
for overlap in cur_overlaps:
overlap_length = idx2 - overlap[1]
end1 = overlap[0] + overlap_length
if end1 < len(toks1) and idx2 < len(toks2) and toks1[end1] == toks2[idx2]:
new_overlaps.append(overlap)
elif overlap_length >= min_length:
overlaps.append(((overlap[0], overlap[0] + overlap_length), (overlap[1], overlap[1] + overlap_length)))
return new_overlaps, overlaps
def find_overlapping_substrings(
toks1: Union[str, List[str]],
toks2: Union[str, List[str]],
min_length: int = 32,
):
"""
Finds overlapping substrings of toks1 and toks2, where toks1 and toks2 are
lists of tokens.
min_length is the minimum number of tokens that a match must span in order
to be returned
Returns a list of pairs of spans, e.g. [((10, 20), (14, 24))]. Each span
pair is a (start_idx, end_idx) tuple representing a half-open interval.
Any long match technically contains many shorter matches. This function
returns only the longest match for each set; for each returned pair of
spans (span1, span2), there will be no other returned pair (span3, span4)
such that span3 contains span1 AND span4 contains span2.
"""
if len(toks1) == 0 or len(toks2) == 0:
return []
# Use chunks to reduce number of hits per token, but don't go too high
# since mem usage is len(toks1)*chunk_length. If character tokenization and
# long chunk_length (e.g., 1000), then we would use 1000x the memory needed
# to store toks1.
chunk_length = min(min_length, 10)
offsets1 = build_offsets(toks1, chunk_length)
overlaps = []
cur_overlaps = []
for idx2, tk2 in enumerate(toks2):
cur_overlaps, new_overlaps = update_overlaps(cur_overlaps, toks1, toks2, idx2, min_length)
overlaps.extend(new_overlaps)
if idx2 <= (len(toks2) - min_length):
chunk = tuple(toks2[idx2 : idx2 + chunk_length])
for idx1 in offsets1.get(chunk, []):
has_overlap = False
for overlap in cur_overlaps:
overlap_length = idx2 - overlap[1]
if idx1 - overlap_length == overlap[0]:
has_overlap = True
break
if not has_overlap:
cur_overlaps.append((idx1, idx2))
idx2 = len(toks2)
_, new_overlaps = update_overlaps(cur_overlaps, toks1, toks2, idx2, min_length)
overlaps.extend(new_overlaps)
final_overlaps = []
for o1 in overlaps:
is_subset = False
for o2 in overlaps:
if o1 != o2 and o1[0][0] >= o2[0][0] and o1[0][1] <= o2[0][1] and o1[1][0] >= o2[1][0] and o1[1][1] <= o2[1][1]:
is_subset = True
break
if not is_subset:
final_overlaps.append(o1)
return final_overlaps
| aries-master | aries/util/edit.py |
import json
import logging
import os
import sqlite3
import sys
import tqdm
logger = logging.getLogger(__name__)
def fuse_back_matter(s2json):
"""Fuse back matter into body text (mutating the input object) and return
the mutated s2orc object. Often the parser puts whatever is on the last
pdf page into back matter even if it is clearly part of the appendix, so
this function tries to fix that."""
s2json["pdf_parse"]["body_text"] = s2json["pdf_parse"]["body_text"] + s2json["pdf_parse"]["back_matter"]
return s2json
def load_s2orc(pdf_id, fetcher):
s = fetcher.get(pdf_id)
if s is None:
return None
return fuse_back_matter(s)
def iter_s2orc_pairs(base_path, paper_records, error_on_missing=True):
with S2orcFetcherFilesystem(base_path) as fetcher:
for record in tqdm.tqdm(paper_records, desc="loading papers"):
doc_id = record["doc_id"]
if not all(fetcher.has(pdf_id) for pdf_id in [record["source_pdf_id"], record["target_pdf_id"]]):
if error_on_missing:
raise RuntimeError("missing pdf ids for doc {} ({}, {})".format(doc_id, record["source_pdf_id"], record["target_pdf_id"]))
else:
logger.warning("missing pdf ids for doc {} ({}, {})".format(doc_id, record["source_pdf_id"], record["target_pdf_id"]))
continue
s2orc1 = load_s2orc(record["source_pdf_id"], fetcher)
s2orc2 = load_s2orc(record["target_pdf_id"], fetcher)
yield doc_id, s2orc1, s2orc2
def iter_s2orc_docs(config, pdf_ids):
with S2orcFetcherSqlite(
config.get("s2orc_db_path", ":memory:"),
fallback_fetcher=S2orcFetcherFilesystem(config["s2orc_base_path"]) if config.get("s2orc_base_path", None) else None,
update_db=False,
) as fetcher:
for pdf_id in tqdm.tqdm(pdf_ids, desc="loading papers"):
if not fetcher.has(pdf_id):
logger.warning("missing pdf ids for doc {}".format(pdf_id))
continue
s2orc2 = load_s2orc(pdf_id, fetcher)
yield pdf_id, s2orc2
class S2orcFetcher:
def get(self, pdf_id):
raise NotImplementedError()
def has(self, pdf_id):
raise NotImplementedError()
class S2orcFetcherDummy(S2orcFetcher):
def get(self, pdf_id):
return None
def has(self, pdf_id):
return False
class S2orcFetcherSqlite(S2orcFetcher):
def __init__(self, s2orc_db_path, fallback_fetcher=None, update_db=False):
self.s2orc_db_path = s2orc_db_path
self.fallback_fetcher = fallback_fetcher or S2orcFetcherDummy()
self.update_db = update_db
self.db = None
self.cur = None
def __enter__(self):
self.db = sqlite3.connect(self.s2orc_db_path)
self.cur = self.db.cursor()
self.cur.execute("BEGIN")
# We create the table/index regardless of update_db, since otherwise we hit errors later
self.cur.execute("CREATE TABLE IF NOT EXISTS pdf_records (pdf_id TEXT PRIMARY KEY NOT NULL, title TEXT, json TEXT)")
self.cur.execute("CREATE INDEX IF NOT EXISTS pdf_records_by_id ON pdf_records (pdf_id)")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.db.commit()
self.db.close()
def get(self, pdf_id):
rec = self.cur.execute("SELECT json FROM pdf_records WHERE pdf_id=?", (pdf_id,)).fetchone()
if rec is not None:
return json.loads(rec[0])
s2orc_json = self.fallback_fetcher.get(pdf_id)
if self.update_db and s2orc_json is not None:
self.cur.execute("INSERT INTO pdf_records (pdf_id, title, json) VALUES (?, ?, ?)", (pdf_id, s2orc_json["title"], json.dumps(s2orc_json)))
return s2orc_json
def has(self, pdf_id):
rec = self.cur.execute("SELECT 1 FROM pdf_records WHERE pdf_id=?", (pdf_id,)).fetchone()
if rec is not None:
return True
return self.fallback_fetcher.has(pdf_id)
class S2orcFetcherFilesystem(S2orcFetcher):
def __init__(self, s2orc_base_path):
self.s2orc_base_path = s2orc_base_path
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return
def get(self, pdf_id):
if not self.s2orc_base_path:
return None
path = os.path.join(self.s2orc_base_path, "{}.json".format(pdf_id))
try:
with open(path) as f:
return json.load(f)
except FileNotFoundError:
return None
def has(self, pdf_id):
if not self.s2orc_base_path:
return False
path = os.path.join(self.s2orc_base_path, "{}.json".format(pdf_id))
return os.path.exists(path)
| aries-master | aries/util/s2orc.py |
import collections
import json
import logging
import os
import torch
import transformers
from .logging import pprint_metrics
logger = logging.getLogger(__name__)
class TrainLoggerCallback(transformers.TrainerCallback):
def __init__(self, logger):
self.logger = logger
def on_log(self, args, state, control, logs=None, **kwargs):
if not logs:
return
self.logger.info(
"Logs at global step {} (epoch {}, {:0.2f} TFLOs): {}".format(state.global_step, state.epoch, state.total_flos / 1e12, json.dumps(logs))
)
class Seq2SeqEvalCallback(transformers.TrainerCallback):
def __init__(self, config, model, eval_records, model_eval_fn, model_selection_metric_fn=None):
self.config = config
self.model = model
self.eval_records = eval_records
self.model_eval_fn = model_eval_fn
self.eval_precached_dataset = self.model._make_dataset(self.eval_records)
self.model_selection_metric_fn = model_selection_metric_fn
if isinstance(model_selection_metric_fn, str):
self.model_selection_metric_fn = lambda x: x[model_selection_metric_fn]
self._best_metric_val = float("-inf")
self._best_model_state = None
@staticmethod
def _clone_cpu_model_state_dict(model):
return collections.OrderedDict((k, v.clone().cpu().detach()) for k, v in model.state_dict().items())
def on_evaluate(self, args, state, control, **kwargs):
metrics, all_results, _ = self.model_eval_fn(self.model, self.eval_records, eval_precached_dataset=self.eval_precached_dataset)
if self.config.get("write_examples_on_eval", False):
with open(os.path.join(self.config["output_dir"], "{}_inferences.jsonl".format("tmp_mid_eval")), "w") as f:
for res in all_results:
f.write(json.dumps(res) + "\n")
pprint_metrics(metrics, logger, name="dev (mid-train)")
if self.model_selection_metric_fn is not None:
metric_val = self.model_selection_metric_fn(metrics)
if metric_val > self._best_metric_val:
logger.info(
"Got new best model at global step {} (epoch {}, {:0.2f} TFLOs)".format(state.global_step, state.epoch, state.total_flos / 1e12)
)
state.best_metric = metric_val
self._best_metric_val = metric_val
self._best_model_state = Seq2SeqEvalCallback._clone_cpu_model_state_dict(self.model.model)
class SequentialTrainer(transformers.Trainer):
def _get_train_sampler(self):
if self.train_dataset is None:
return None
return torch.utils.data.SequentialSampler(self.train_dataset)
| aries-master | aries/util/training.py |
import glob
import gzip
import itertools
import json
import lzma
import os
import sqlite3
from typing import Any, Callable, Dict, Iterable, Iterator, List, Union
import numpy as np
try:
import zstandard
except ImportError:
zstandard = None
try:
import orjson
except ImportError:
orjson = json
class ReservoirSampler:
def __init__(self, size, rng=None):
self.size = size
self.rng = rng or np.random.default_rng()
self.reservoir = []
self.n_seen = 0
def add(self, x):
self.n_seen += 1
if len(self.reservoir) < self.size:
self.reservoir.append(x)
else:
idx = self.rng.integers(0, self.n_seen)
if idx < self.size:
self.reservoir[idx] = x
def add_many(self, xs):
for x in xs:
self.add(x)
def get_reservoir(self):
return self.reservoir
def openc(fname, mode="rt", *, compression="auto", **kwargs):
"""Opens a file, transparently handling a variety of possible compression schemes."""
if mode == "w":
mode = "wt"
if mode == "x":
mode = "xt"
kwargs["mode"] = mode
if compression == "auto":
# TODO: Maybe use magic number instead of extension
if fname.lower().endswith(".gz"):
compression = "gzip"
elif fname.lower().endswith(".xz"):
compression = "lzma"
elif fname.lower().endswith(".zst"):
compression = "zstd"
else:
compression = "none"
open_fn = open
if compression == "gzip":
open_fn = gzip.open
elif compression == "lzma":
open_fn = lzma.open
elif compression == "zstd":
if zstandard is None:
raise ValueError("zstandard module is not available")
open_fn = zstandard.open
return open_fn(fname, **kwargs)
def iter_jsonl_files(infiles):
if isinstance(infiles, str):
infiles = [infiles]
for infile in infiles:
with openc(infile) as f:
for obj in map(orjson.loads, f):
yield obj
def zip_strict(*iterables):
# Until python 3.10, seems like there's no builtin way to do this, but
# there's a fairly simple workaround implementation:
# https://stackoverflow.com/a/32954700
canary = object()
for tup in itertools.zip_longest(*iterables, fillvalue=canary):
if canary in tup:
raise ValueError("Iterables have different lengths")
yield tup
def downsample_recs(recs: List[Any], downsample_config: Dict[str, Any]):
if downsample_config is None:
# Return recs, for consistency with old configs before downsampling was added
return recs.copy()
if downsample_config.get("keep_n", -1) != -1 and downsample_config.get("keep_ratio", -1) != -1:
raise ValueError("Need only one of keep_n and keep_ratio (not both)")
keep_n = len(recs)
if "keep_n" in downsample_config:
keep_n = downsample_config["keep_n"]
elif "keep_ratio" in downsample_config:
keep_n = max(1, int(downsample_config["keep_ratio"] * len(recs)))
assert isinstance(keep_n, int) and keep_n > 0
if keep_n > len(recs):
raise ValueError("Can't sample more data points than the dataset has")
rng = np.random.default_rng(downsample_config.get("seed", None))
return [recs[idx] for idx in rng.choice(len(recs), size=keep_n, replace=False)]
def batch_iter(iterable, batch_size):
batch = []
for rec in iterable:
if len(batch) >= batch_size:
yield batch
batch = []
batch.append(rec)
if len(batch) != 0:
yield batch
def index_by(
lst: Union[Iterable, Iterator],
key: Union[str, Callable],
one_to_one=False,
) -> Dict:
key_fn = key
if isinstance(key_fn, str):
key_fn = lambda x: x[key]
index = dict()
if one_to_one:
for rec in lst:
k = key_fn(rec)
if k in index:
raise ValueError("Duplicate key: {}".format(k))
index[k] = rec
else:
for rec in lst:
k = key_fn(rec)
if k not in index:
index[k] = []
index[k].append(rec)
return index
def deduplicate_by(
lst: Union[Iterable, Iterator],
key: Union[str, Callable],
) -> List:
key_fn = key
if isinstance(key_fn, str):
key_fn = lambda x: x[key]
new_lst = []
used_keys = set()
for rec in lst:
k = key_fn(rec)
if k not in used_keys:
used_keys.add(k)
new_lst.append(rec)
return new_lst
def counter_jaccard(counter1: Dict, counter2: Dict) -> float:
"""Computes the jaccard overlap of two dict objects."""
if len(counter1) == 0 and len(counter2) == 0:
return float("nan")
if len(counter1) == 0 or len(counter2) == 0:
return 0.0
intersection = sum((counter1 & counter2).values())
if intersection == 0:
return 0.0
return intersection / (sum(counter1.values()) + sum(counter2.values()) - intersection)
| aries-master | aries/util/data.py |
import itertools
import logging
import gensim
logger = logging.getLogger(__name__)
def stem_tokens(tokens):
return list(map(gensim.parsing.preprocessing.stem, tokens))
class InMemoryTextCorpus(gensim.corpora.textcorpus.TextCorpus):
def __init__(self, texts, dictionary=None, **kwargs):
self.texts = texts
if "token_filters" not in kwargs:
kwargs["token_filters"] = [stem_tokens]
if "character_filters" not in kwargs:
kwargs["character_filters"] = [
gensim.parsing.preprocessing.lower_to_unicode,
gensim.utils.deaccent,
gensim.parsing.preprocessing.strip_multiple_whitespaces,
gensim.parsing.preprocessing.strip_punctuation,
]
super().__init__(dictionary=dictionary, **kwargs)
# self.token_filters = [gensim.parsing.preprocessing.remove_short_tokens, gensim.parsing.preprocessing.remove_stopword_tokens]
def __getitem__(self, item):
return self.dictionary.doc2bow(self.preprocess_text(self.texts[item]))
def init_dictionary(self, dictionary):
self.dictionary = dictionary if dictionary is not None else gensim.corpora.Dictionary()
if dictionary is None:
logger.debug("Initializing dictionary")
metadata_setting = self.metadata
self.metadata = False
self.dictionary.add_documents(self.get_texts())
self.metadata = metadata_setting
else:
logger.debug("Dictionary already initialized")
def get_texts(self):
return list(map(self.preprocess_text, self.texts))
def __len__(self):
return len(self.texts)
| aries-master | aries/util/gensim.py |
import difflib
import itertools
import json
import logging
import os
import sys
import gensim
import numpy as np
import tqdm
import aries.util.data
import aries.util.edit
import aries.util.gensim
from aries.alignment.eval import full_tune_optimal_thresholds
logger = logging.getLogger(__name__)
class BM25Aligner:
def __init__(self, config):
self.config = config
self.fixed_pred_threshold = self.config.get("fixed_pred_threshold", None)
self.pred_threshold = self.config.get("fixed_pred_threshold", 0.5)
self.fixed_rel_pred_threshold = self.config.get("fixed_rel_pred_threshold", None)
self.rel_pred_threshold = self.config.get("fixed_rel_pred_threshold", 0.2)
self.tune_on_dev = self.config.get("tune_on_dev", False)
self.tuning_minimum_recall = self.config.get("tuning_minimum_recall", 0.0)
self.query_input_format = self.config["query_input_format"]
self.edit_input_format = self.config["edit_input_format"]
self.output_dir = self.config.get("output_dir", None)
# Check for conflicts between tune_on_dev and fixed_*_thresholds
if self.tune_on_dev and (self.fixed_pred_threshold is not None or self.fixed_rel_pred_threshold is not None):
logger.warning("tune_on_dev is set to True, but fixed_pred_threshold and/or fixed_rel_pred_threshold are set. Ignoring fixed thresholds.")
self.bm25_model = None
self.bm25_index = None
self.tfidf_model = None
self.dictionary = None
if self.config.get("bm25_dictionary", None) is not None:
logger.info("Loading dictionary from {}".format(self.config["bm25_dictionary"]))
self.dictionary = gensim.corpora.Dictionary.load(self.config["bm25_dictionary"])
def _candidate_record_to_input_text(self, rec):
if self.query_input_format == "comment_only":
return rec["review_comment"]
elif self.query_input_format == "comment_with_canonical":
return rec["review_comment"] + "\ncanonicalized: " + rec["canonical"]["canonicalized"]
elif self.query_input_format == "reply_comment_or_extracted_comment":
return rec.get("reply_comment_line", rec["review_comment"])
elif self.query_input_format == "reply_comment_or_extracted_comment_with_canonical":
return rec.get("reply_comment_line", rec["review_comment"]) + "\ncanonicalized: " + rec["canonical"]["canonicalized"]
elif self.query_input_format == "comment_with_context":
comment_str = rec["review_comment"].strip()
if rec.get("context_side", "none") == "left":
comment_str = rec["context"].strip() + " " + comment_str
else:
comment_str = comment_str + " " + rec["context"].strip()
return "review comment: " + comment_str
raise ValueError("Unknown query_input_format {}".format(self.query_input_format))
def _edit_to_input_text(self, edit):
if self.edit_input_format == "added_tokens":
return " ".join(edit.get_added_tokens())
if self.edit_input_format == "source_text":
return edit.get_source_text()
if self.edit_input_format == "target_text":
return edit.get_target_text()
if self.edit_input_format == "target_text_with_context":
context = "context: none"
if len(edit.target_idxs) != 0 and min(edit.target_idxs) != 0:
context = "context: " + edit.texts2[min(edit.target_idxs) - 1]
return edit.get_target_text() + "\n\n" + context
elif self.edit_input_format == "diff":
return aries.util.edit.make_word_diff(
edit.get_source_text(),
edit.get_target_text(),
color_format="none",
)
elif self.edit_input_format == "tokens_union":
text1 = edit.get_source_text()
text2 = edit.get_target_text()
textw = text1.split(" ") if len(text1) != 0 else []
outtextw = text2.split(" ") if len(text2) != 0 else []
tokens = []
for idx, x in enumerate(difflib.ndiff(textw, outtextw)):
tokens.append(x[2:])
return " ".join(tokens)
raise ValueError("Unknown edit_input_format {}".format(self.edit_input_format))
def train(self, train_recs, dev_recs):
logger.info("Getting corpus statistics from training documents...")
# Pull the full doc text from the training set
all_doc_edits = dict()
for rec in train_recs:
# We only need one edit to get the DocEdits for the whole doc
if rec["doc_id"] in all_doc_edits:
continue
edits = rec["positives"] + rec["negatives"]
if len(edits) == 0:
continue
all_doc_edits[rec["doc_id"]] = edits[0].doc_edits
docs = []
for doc_id, doc_edits in all_doc_edits.items():
docs.append("\n\n".join([x["text"] for x in doc_edits.s2orc2["pdf_parse"]["body_text"]]))
corpus = aries.util.gensim.InMemoryTextCorpus(docs, dictionary=self.dictionary)
self.dictionary = corpus.dictionary
# Save dictionary
self.dictionary.save(os.path.join(self.output_dir, "dictionary.pk"))
# Tune the thresholds, if needed
if self.tune_on_dev:
logger.info("Tuning thresholds on dev set...")
self.pred_threshold, self.rel_pred_threshold = self._tune_thresholds(dev_recs)
logger.info("Tuned thresholds: pred_threshold={}, rel_pred_threshold={}".format(self.pred_threshold, self.rel_pred_threshold))
with open(os.path.join(self.output_dir, "thresholds.json"), "w") as f:
json.dump(
{
"pred_threshold": self.pred_threshold,
"rel_pred_threshold": self.rel_pred_threshold,
},
f,
)
def _tune_thresholds(self, dev_recs):
eval_records = []
for rec in dev_recs:
eval_records.append(
{
"doc_id": rec["doc_id"],
"review_comment": rec["review_comment"],
"context": rec["context"],
"context_side": rec.get("context_side", "none"),
"candidates": rec["positives"] + rec["negatives"] + rec.get("unknowns", []),
"candidate_labels": [1] * len(rec["positives"]) + [0] * len(rec["negatives"]) + [None] * len(rec.get("unknowns", [])),
}
)
all_results = self.predict_many(eval_records)
all_candidates = []
for rec in all_results:
for idx, ex in enumerate(rec["predictions"]):
ex["label"] = rec["input_record"]["candidate_labels"][idx]
all_candidates.append(ex)
pred_threshold, rel_pred_threshold, _ = full_tune_optimal_thresholds(
all_candidates,
min_recall=self.tuning_minimum_recall,
num_abs_thresholds=20,
num_rel_thresholds=20,
abs_thresh=self.fixed_pred_threshold,
rel_thresh=self.fixed_rel_pred_threshold,
)
return pred_threshold, rel_pred_threshold
def _init_vector_models(self):
self.bm25_model = gensim.models.OkapiBM25Model(dictionary=self.dictionary)
self.tfidf_model = gensim.models.TfidfModel(dictionary=self.dictionary, normalize=True, smartirs="bnn")
def predict_many(self, *args, **kwargs):
if self.bm25_model is None:
self._init_vector_models()
results = self._predict_many(*args, **kwargs)
return results
def _predict_many(self, test_recs, quiet=False):
out_recs = []
logger.info("Doing inference with pred_threshold={}, rel_pred_threshold={}".format(self.pred_threshold, self.rel_pred_threshold))
for rec in tqdm.tqdm(test_recs, "predicting", disable=quiet):
outrec = {
"input_record": rec,
"predictions": [{"edit": cand, "pred": None, "score": None} for cand in rec["candidates"]],
}
out_recs.append(outrec)
if len(outrec["predictions"]) == 0:
continue
candidate_texts = [self._edit_to_input_text(x["edit"]) for x in outrec["predictions"]]
corpus = aries.util.gensim.InMemoryTextCorpus(candidate_texts, dictionary=self.dictionary)
query_vec = self.tfidf_model[self.dictionary.doc2bow(corpus.preprocess_text(self._candidate_record_to_input_text(rec)))]
candidate_vectors = self.bm25_model[list(corpus)]
bm25_index = gensim.similarities.SparseMatrixSimilarity(None)
bm25_index.normalize = True
bm25_index.index = gensim.matutils.corpus2csc(candidate_vectors, num_docs=len(corpus), num_terms=len(self.dictionary), dtype=float).T
cosine_similarities = bm25_index[query_vec].tolist()
best_candidxs = np.argsort(cosine_similarities).tolist()
best_candidx_score = cosine_similarities[best_candidxs[-1]]
for candidx, predr in enumerate(outrec["predictions"]):
predr["best_group_score"] = best_candidx_score
predr["cosine_score"] = cosine_similarities[candidx]
predr["pred"] = (
1
if cosine_similarities[candidx] > self.pred_threshold
and cosine_similarities[candidx] >= (best_candidx_score - self.rel_pred_threshold)
else 0
)
predr["score"] = cosine_similarities[candidx]
return out_recs
| aries-master | aries/alignment/bm25.py |
import logging
logger = logging.getLogger(__name__)
class MultiStageAligner:
def __init__(self, config, aligners):
self.config = config
self.aligners = aligners
self.prune_candidates = config.get("prune_candidates", False)
def train(self, train_recs, dev_recs):
logger.info("Multi-stage aligner doesn't train; skipping...")
def _update_candidate_scores(self, candidate):
# Fill base_pred, pred, and score based on the stack of aligner predictions
candidate["base_pred"] = None
candidate["pred"] = None
candidate["score"] = None
if len(candidate["predictions"]) == 0:
return
# If any aligner predicts 0, then the candidate's pred is 0. The
# base_pred is 0 if any aligner other than the last one predicts 0 (1 otherwise).
# The score is the final aligner's score.
for pred_idx, pred_rec in enumerate(candidate["predictions"]):
if pred_rec is None:
continue
if pred_rec["pred"] == 0:
if pred_idx < len(candidate["predictions"]) - 1:
candidate["base_pred"] = 0
candidate["pred"] = 0
elif pred_rec["pred"] == 1 and candidate["base_pred"] is None:
if pred_idx < len(candidate["predictions"]) - 1:
candidate["base_pred"] = 1
candidate["pred"] = 1
if candidate["predictions"][-1] is not None:
candidate["score"] = candidate["predictions"][-1]["score"]
def predict_many(self, *args, **kwargs):
results = self._predict_many(*args, **kwargs)
return results
def _predict_many(self, test_recs):
out_recs = []
for rec in test_recs:
out_recs.append(
{
"input_record": rec,
"predictions": [{"edit": x, "predictions": [], "base_pred": None, "pred": None, "score": None} for x in rec["candidates"]],
}
)
backmaps = [list(range(len(x["candidates"]))) for x in test_recs]
# Don't modify the input test_recs if we need to prune
cur_recs = test_recs
if self.prune_candidates:
cur_recs = [x.copy() for x in test_recs]
for rec in cur_recs:
rec["candidates"] = rec["candidates"].copy()
pruned_idxs = [set() for x in test_recs]
for aligner_idx, aligner in enumerate(self.aligners):
logger.info(f"Running aligner {aligner_idx + 1} of {len(self.aligners)} ({aligner.__class__.__name__})")
predictions = aligner.predict_many(cur_recs)
# Update the corresponding prediction lists, keeping track of the
# back-mappings from pruned candidates
for recidx, rec in enumerate(predictions):
for candidx, cand in enumerate(rec["predictions"]):
out_cand = out_recs[recidx]["predictions"][backmaps[recidx][candidx]]
# Hack: need to remove 'edit' to make the cands
# JSON-serializable
assert out_cand["edit"] == cand["edit"]
del cand["edit"]
out_cand["predictions"].append(cand)
self._update_candidate_scores(out_cand)
if out_cand["pred"] is None:
breakpoint()
print(out_cand["pred"])
if self.prune_candidates:
# Append None to predictions for any candidates that were pruned
# by previous aligners
for recidx, rec in enumerate(out_recs):
for candidx in pruned_idxs[recidx]:
rec["predictions"][candidx]["predictions"].append(None)
self._update_candidate_scores(rec["predictions"][candidx])
if aligner_idx < len(self.aligners) - 1:
# Prune anything that was predicted to be 0
candidates_to_prune = []
for recidx, rec in enumerate(predictions):
for candidx, cand in enumerate(rec["predictions"]):
if cand["pred"] == 0:
candidates_to_prune.append((recidx, candidx))
# Reverse sort is important to ensure indices don't shift as we prune them
for recidx, candidx in sorted(candidates_to_prune, key=lambda x: x[1], reverse=True):
backmaps[recidx].pop(candidx)
cur_recs[recidx]["candidates"].pop(candidx)
pruned_idxs[recidx].add(candidx)
return out_recs
| aries-master | aries/alignment/other.py |
import functools
import logging
import os
import sys
import datasets
import numpy as np
import torch
import tqdm
import transformers
from aries.alignment.eval import AlignerEvalCallback
from aries.util.edit import make_word_diff
from aries.util.training import TrainLoggerCallback
logger = logging.getLogger(__name__)
class PairwiseTransformerAligner:
def __init__(self, config, model, tokenizer):
self.config = config
self.model = model
self.tokenizer = tokenizer
self.max_length = self.config["max_seq_length"]
@staticmethod
def preprocess_fn(examples, tokenizer, max_length):
model_inputs = tokenizer(
examples["first_text"],
max_length=max_length,
padding=False,
truncation=True,
)
model_inputs["labels"] = examples["label"]
return model_inputs
def _candidate_record_to_input_text(self, rec):
if self.config["query_input_format"] == "comment_only":
return rec["review_comment"]
elif self.config["query_input_format"] == "comment_with_canonical":
return rec["review_comment"] + "\ncanonicalized: " + rec["canonical"]["canonicalized"]
elif self.config["query_input_format"] == "reply_comment_or_extracted_comment":
return rec.get("reply_comment_line", rec["review_comment"])
elif self.config["query_input_format"] == "reply_comment_or_extracted_comment_with_canonical":
return rec.get("reply_comment_line", rec["review_comment"]) + "\ncanonicalized: " + rec["canonical"]["canonicalized"]
elif self.config["query_input_format"] == "comment_with_context":
comment_str = rec["review_comment"].strip()
if rec.get("context_side", "none") == "left":
comment_str = rec["context"].strip() + " " + comment_str
else:
comment_str = comment_str + " " + rec["context"].strip()
return "review comment: " + comment_str
raise ValueError("Unknown query_input_format {}".format(self.config["query_input_format"]))
def _edit_to_input_text(self, edit):
if self.config["edit_input_format"] == "added_tokens":
return " ".join(edit.get_added_tokens())
if self.config["edit_input_format"] == "source_text":
return edit.get_source_text()
if self.config["edit_input_format"] == "target_text":
return edit.get_target_text()
if self.config["edit_input_format"] == "target_text_with_context":
context = "context: none"
if len(edit.target_idxs) != 0 and min(edit.target_idxs) != 0:
context = "context: " + edit.texts2[min(edit.target_idxs) - 1]
return edit.get_target_text() + "\n\n" + context
elif self.config["edit_input_format"] == "diff":
return make_word_diff(
edit.get_source_text(),
edit.get_target_text(),
color_format="none",
)
raise ValueError("Unknown edit_input_format {}".format(self.config["edit_input_format"]))
def _make_example_for_rec_edit(self, rec, edit, label=None):
query_text = self._candidate_record_to_input_text(rec)
edit_text = self._edit_to_input_text(edit)
return {
"doc_id": rec["doc_id"],
"source_pdf_id": rec["source_pdf_id"],
"target_pdf_id": rec["target_pdf_id"],
"review_comment": rec["review_comment"],
"first_text": "review comment: {}\n\nparagraph: {}".format(query_text, edit_text),
"label": label,
}
def _make_dataset(self, recs, name="dataset", shuffle=False):
if isinstance(recs, dict):
recs = list(recs.values())
exs = []
for rec in recs:
edit_with_labels = []
edit_with_labels.extend([(x, 1) for x in rec["positives"]])
edit_with_labels.extend([(x, 0) for x in rec["negatives"]])
for edit, label in edit_with_labels:
exs.append(self._make_example_for_rec_edit(rec, edit, label=label))
tmp = {k: [] for k in exs[0].keys()}
for ex in exs:
for k, v in ex.items():
tmp[k].append(v)
dset = datasets.Dataset.from_dict(tmp)
if shuffle:
dset = dset.shuffle()
dset = dset.map(
functools.partial(PairwiseTransformerAligner.preprocess_fn, tokenizer=self.tokenizer, max_length=self.max_length),
batched=True,
num_proc=4,
load_from_cache_file=False,
desc="Processing {}".format(name),
)
return dset
def train(self, train_recs, dev_recs):
if len(train_recs) == 0:
raise ValueError("Got empty train_recs")
if len(dev_recs) == 0:
raise ValueError("Got empty dev_recs")
training_args_dict = transformers.TrainingArguments(output_dir=self.config["output_dir"], log_level="passive").to_dict()
training_args_dict.update(self.config.get("training_args", dict()))
training_args = transformers.HfArgumentParser(transformers.TrainingArguments).parse_dict(training_args_dict)[0]
self.rng = np.random.default_rng(self.config["seed"])
for rec in train_recs:
rec["negatives"] = [x for x in rec["negatives"] if x.is_full_addition()]
train_dset = self._make_dataset(train_recs, shuffle=True)
self.rng = np.random.default_rng(self.config["seed"])
dev_dset = self._make_dataset(dev_recs)
logger.info("{} | {}".format(self.tokenizer.decode(train_dset["input_ids"][0]), self.tokenizer.decode(train_dset["labels"][0])))
data_collator = transformers.DataCollatorWithPadding(
self.tokenizer,
pad_to_multiple_of=None,
)
model_selector_callback = AlignerEvalCallback(
self.config,
self,
dev_recs,
model_selection_metric_fn=lambda x: x["optimal_f1"],
)
# TODO: Make training args configurable from model_config
trainer = transformers.Trainer(
model=self.model,
args=training_args,
train_dataset=train_dset,
eval_dataset=dev_dset,
tokenizer=self.tokenizer,
data_collator=data_collator,
callbacks=[model_selector_callback, TrainLoggerCallback(logger)],
compute_metrics=None,
)
_ = trainer.train()
self.model.load_state_dict(model_selector_callback._best_model_state)
self.model.save_pretrained(os.path.join(self.config["output_dir"], "ptmodel"))
self.tokenizer.save_pretrained(os.path.join(self.config["output_dir"], "ptmodel"))
def predict_many(self, test_recs):
was_training = self.model.training
self.model.eval()
out_recs = []
with tqdm.trange(sum(len(x["candidates"]) for x in test_recs), miniters=1, desc="{}.predict_many".format(self.__class__.__name__)) as pbar:
with torch.no_grad():
for rec in test_recs:
outrec = {
"input_record": rec,
"predictions": [{"edit": cand, "pred": None, "score": None} for cand in rec["candidates"]],
}
out_recs.append(outrec)
for pred_rec in outrec["predictions"]:
tensors = self.tokenizer(
self._make_example_for_rec_edit(rec, pred_rec["edit"])["first_text"],
max_length=self.max_length,
padding=False,
truncation=True,
)
out = self.model(
input_ids=torch.tensor(tensors["input_ids"], device=self.model.device, dtype=torch.long).unsqueeze(0),
attention_mask=torch.tensor(tensors["attention_mask"], device=self.model.device, dtype=torch.long).unsqueeze(0),
)
pred_rec["pred"] = torch.argmax(out.logits, dim=-1)[0].item()
pred_rec["score"] = torch.nn.functional.softmax(out.logits, dim=-1)[0].tolist()[1]
pred_rec["logits"] = [out.logits[0][0].item(), out.logits[0][1].item()]
pbar.update(1)
self.model.train(was_training)
return out_recs
| aries-master | aries/alignment/cross_encoder.py |
import collections
import difflib
import html
import io
import itertools
import logging
import re
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import nltk.corpus
import nltk.util
import numpy as np
import tqdm
from nltk.util import ngrams
import aries.util.data
import aries.util.edit
import aries.util.s2orc
from aries.util.color import colorprint
from aries.util.s2orc import load_s2orc
logger = logging.getLogger(__name__)
class ParagraphEdit:
def __init__(self, texts1, texts2, source_idxs, target_idxs, preceding_source_idx=None, similarity=None, doc_edits=None, edit_id=None):
self.texts1 = texts1
self.texts2 = texts2
self.doc_edits = doc_edits
self.edit_id = edit_id
self.preceding_sidx = preceding_source_idx
self.source_idxs = source_idxs if source_idxs is not None else []
self.target_idxs = target_idxs if target_idxs is not None else []
self.similarity = similarity
if self.preceding_sidx is not None and len(self.source_idxs) != 0:
raise ValueError(
"Edit cannot be both an addition and a revision, but got both preceding idx {} and source idxs {}".format(
self.preceding_sidx, str(self.source_idxs)
)
)
self._diff = None
def get_source_text(self):
return "\n".join([self.texts1[idx] for idx in self.source_idxs])
def get_target_text(self):
return "\n".join([self.texts2[idx] for idx in self.target_idxs])
def get_diff(self):
if self._diff is None:
self._diff = list(difflib.ndiff(self.get_source_text().split(), self.get_target_text().split()))
return self._diff
def print_diff(self, color_format="ansi", **kwargs):
t1 = self.get_source_text().split(" ")
t2 = self.get_target_text().split(" ")
if len(t1) == 0 or len(t2) == 0:
return
wdiff = aries.util.edit.make_word_diff(t1, t2, color_format=color_format)
wdiff = re.sub(r"^ ?\[([+-])", r" [\1", wdiff)
print(wdiff, **kwargs)
def get_added_tokens(self):
if len(self.source_idxs) == 0:
return self.get_target_text().split()
return self._get_diff_tokens_by_type("+")
def get_removed_tokens(self):
if len(self.target_idxs) == 0:
return self.get_source_text().split()
return self._get_diff_tokens_by_type("-")
def get_preserved_tokens(self):
return self._get_diff_tokens_by_type(" ")
def is_identical(self):
return self.get_source_text() == self.get_target_text()
def is_full_addition(self):
return len(self.source_idxs) == 0
def is_full_deletion(self):
return len(self.target_idxs) == 0
def _get_diff_tokens_by_type(self, type_char):
tokens = []
for tok in self.get_diff():
if tok[0] == type_char:
tokens.append(tok[2:])
return tokens
class DocEdits:
def __init__(self, s2orc1, s2orc2, paragraph_edits: List[ParagraphEdit] = None):
self.s2orc1 = s2orc1
self.s2orc2 = s2orc2
self.paragraph_edits = paragraph_edits if paragraph_edits is not None else []
self.source_target_map = dict()
self.target_source_map = dict()
self.near_edits_map = dict()
self.edit_ids_map = dict()
self._source_texts = [x["text"] for x in self.s2orc1["pdf_parse"]["body_text"]]
self._target_texts = [x["text"] for x in self.s2orc2["pdf_parse"]["body_text"]]
for ed in self.paragraph_edits:
self.add_edit(ed)
def add_edit(self, ed):
# Check that edit can be safely added
for sidx in ed.source_idxs:
if sidx in self.source_target_map:
raise ValueError("Edit must have unique source indexes but got conflict for {}".format(sidx))
for tidx in ed.target_idxs:
if tidx in self.target_source_map:
raise ValueError("Edit must have unique target indexes but got conflict for {}".format(tidx))
# Add edit
self.paragraph_edits.append(ed)
for sidx in ed.source_idxs:
self.source_target_map[sidx] = ed
for tidx in ed.target_idxs:
self.target_source_map[tidx] = ed
if ed.preceding_sidx is not None:
if ed.preceding_sidx not in self.near_edits_map:
self.near_edits_map[ed.preceding_sidx] = []
self.near_edits_map[ed.preceding_sidx].append(ed)
if ed.edit_id is not None:
if ed.edit_id in self.edit_ids_map:
raise ValueError("Duplicate edit id {}".format(ed.edit_id))
self.edit_ids_map[ed.edit_id] = ed
def make_edit(self, *args, **kwargs):
return ParagraphEdit(self._source_texts, self._target_texts, *args, doc_edits=self, **kwargs)
def iter_source_edits(self):
"""Iterate through paragraph edits in order of lowest source idx"""
# Go through paras in order, but make sure newly-added paras go after the preceding para
estimated_source_idxs = []
edits = sorted(self.paragraph_edits, key=lambda x: min(x.target_idxs) if len(x.target_idxs) != 0 else float("inf"))
for edit in edits:
if len(edit.source_idxs) != 0:
estimated_source_idxs.append((min(edit.source_idxs), 0))
elif edit.preceding_sidx is not None:
estimated_source_idxs.append((edit.preceding_sidx + 0.5, 0))
else:
last_esi = estimated_source_idxs[-1] if len(estimated_source_idxs) != 0 else (-1, 0)
estimated_source_idxs.append((last_esi[0], last_esi[1] + 1))
for est_idx, edit in sorted(zip(estimated_source_idxs, edits), key=lambda x: x[0]):
yield edit
def has_source_edit(self, sidx):
return sidx in self.source_target_map
def source_edit(self, sidx):
return self.source_target_map[sidx]
def has_target_edit(self, tidx):
return tidx in self.target_source_map
def target_edit(self, tidx):
return self.target_source_map[tidx]
def by_id(self, edit_id):
return self.edit_ids_map[edit_id]
def get_unmapped_source_idxs(self):
return [sidx for sidx in range(len(self.s2orc1["pdf_parse"]["body_text"])) if sidx not in self.source_target_map]
def get_unmapped_target_idxs(self):
return [tidx for tidx in range(len(self.s2orc2["pdf_parse"]["body_text"])) if tidx not in self.target_source_map]
def to_json(self):
edits_json = []
if any([x.edit_id is not None for x in self.paragraph_edits]):
edits_json = [
{
"edit_id": edit.edit_id,
"source_idxs": edit.source_idxs,
"target_idxs": edit.target_idxs,
}
for edit in self.iter_source_edits()
]
else:
edits_json = [
{
"edit_id": edit_idx,
"source_idxs": edit.source_idxs,
"target_idxs": edit.target_idxs,
}
for edit_idx, edit in enumerate(self.iter_source_edits())
]
return {"source_pdf_id": self.s2orc1["paper_id"], "target_pdf_id": self.s2orc2["paper_id"], "edits": edits_json}
def make_paper_diff_string(
doc_edits,
print_ids_only=False,
skip_identical=False,
color_format="none",
return_edit_ids=False,
):
buf = io.StringIO()
print_kwargs = {"file": buf}
escape_fn = html.escape if color_format == "html" else lambda x: x
if color_format == "html":
print("<p>", end="", **print_kwargs)
aries.util.edit.print_word_diff(doc_edits.s2orc1["abstract"], doc_edits.s2orc2["abstract"], color_format=color_format, **print_kwargs)
print("[abstract]", **print_kwargs)
print("edit id: 9999", **print_kwargs)
if color_format == "html":
print("</p>", end="", **print_kwargs)
else:
print(**print_kwargs)
edits_by_id = dict()
for edit_idx, edit in enumerate(doc_edits.iter_source_edits()):
if (edit.is_identical() or len(edit.get_added_tokens()) == 0) and skip_identical:
# print("skip", edit_idx)
continue
section_name = ""
if len(edit.target_idxs) != 0:
section_name = doc_edits.s2orc2["pdf_parse"]["body_text"][edit.target_idxs[0]]["section"]
elif len(edit.source_idxs) != 0:
section_name = doc_edits.s2orc1["pdf_parse"]["body_text"][edit.source_idxs[0]]["section"]
if color_format == "html":
print("<p>", end="", **print_kwargs)
if edit.is_full_addition():
colorprint("[+" + escape_fn(edit.get_target_text()) + "+]", color="green", form=color_format, **print_kwargs)
if not print_ids_only:
print(edit.preceding_sidx, "(added)", edit.target_idxs, end="", **print_kwargs)
elif edit.is_full_deletion():
colorprint("[-" + escape_fn(edit.get_source_text()) + "-]", color="red", form=color_format, **print_kwargs)
if not print_ids_only:
print(edit.source_idxs, "(deleted)", end="", **print_kwargs)
else:
edit.print_diff(color_format=color_format, **print_kwargs)
if not print_ids_only:
print(edit.source_idxs, edit.target_idxs, txtcmp(edit.get_source_text(), edit.get_target_text()), end="", **print_kwargs)
if not print_ids_only:
print(**print_kwargs)
print("section: {}".format(section_name or "unknown"), **print_kwargs)
print("edit id: {}".format(edit_idx), **print_kwargs)
edits_by_id[edit_idx] = edit
if color_format == "html":
print("</p>", end="", **print_kwargs)
else:
print(**print_kwargs)
buf.seek(0)
s = buf.read()
if color_format == "html":
s = s.replace("\n", "<br>")
if return_edit_ids:
return s, edits_by_id
return s
@staticmethod
def from_list(s2orc1, s2orc2, edits_list):
edits = DocEdits(s2orc1, s2orc2)
for edit_rec in sorted(edits_list, key=lambda x: x["edit_id"]):
edit = edits.make_edit(edit_rec["source_idxs"], edit_rec["target_idxs"], edit_id=edit_rec["edit_id"])
edits.add_edit(edit)
return edits
def iter_s2orc_pairs(config, doc_ids, docid2allpdfids, docid2pdfid):
with aries.util.s2orc.S2orcFetcherSqlite(
config.get("s2orc_db_path", ":memory:"),
fallback_fetcher=aries.util.s2orc.S2orcFetcherFilesystem(config["s2orc_base_path"]) if config.get("s2orc_base_path", None) else None,
update_db=False,
) as fetcher:
for doc_id in tqdm.tqdm(doc_ids, desc="loading papers"):
pdf_ids = docid2allpdfids[doc_id]
main_pdf_id = docid2pdfid[doc_id][0]
if main_pdf_id not in pdf_ids:
logger.error("main pdf id {} not in pdf ids {}".format(main_pdf_id, pdf_ids))
continue
# id 0 is the newest one
revised_pdf_id = pdf_ids[0]
if revised_pdf_id == main_pdf_id:
continue
if not all([fetcher.has(pdf_id) for pdf_id in [main_pdf_id, revised_pdf_id]]):
logger.warning("missing pdf ids for doc {}".format(doc_id))
continue
s2orc1 = load_s2orc(main_pdf_id, fetcher)
s2orc2 = load_s2orc(revised_pdf_id, fetcher)
yield doc_id, s2orc1, s2orc2
def txtcmp(txt1, txt2, txt1_bigram_counter=None):
if txt1 == txt2:
return 1
ng1 = txt1_bigram_counter
if txt1_bigram_counter is None:
ng1 = collections.Counter(ngrams(txt1.split(), 2))
ng2 = collections.Counter(ngrams(txt2.split(), 2))
if len(ng1) == 0 and len(ng2) == 0:
return aries.util.data.counter_jaccard(collections.Counter(txt1), collections.Counter(txt2))
return aries.util.data.counter_jaccard(ng1, ng2)
def make_aligns(s2orc1, s2orc2, shortcut_threshold=0.4, min_threshold=0.1, window_size=30):
aligns = dict()
cur_offset = 0
prev_cur_offset = 0
for idx1, rec1 in enumerate(s2orc1["pdf_parse"]["body_text"]):
# if rec1['text'] == 'EQUATION':
# continue
best_score = 0
# Include a window around the current estimate and also around the raw value (in case the cur_offset falls out of alignment somehow)
idx_range = set(
range(idx1 + cur_offset - window_size, idx1 + cur_offset + window_size)
) # | set(range(idx1 + cur_offset - window_size, idx1 + cur_offset + window_size))
idx_range = sorted((x for x in idx_range if 0 <= x and x < len(s2orc2["pdf_parse"]["body_text"])), key=lambda x: abs(x - (idx1 + cur_offset)))
# First check if there are any exact matches in the window; this is a fast test and guarantees we won't miss perfect alignments
for idx2 in idx_range:
rec2 = s2orc2["pdf_parse"]["body_text"][idx2]
val = 0
if rec1["text"] == rec2["text"]:
val = 2
val -= abs(idx1 + cur_offset - idx2) / len(s2orc1["pdf_parse"]["body_text"])
if val > best_score:
best_score = val
aligns[idx1] = (idx2, val)
if best_score > 1:
prev_cur_offset = cur_offset
cur_offset = aligns[idx1][0] - idx1
continue
# IF we didn't get an exact match, do the more expensive checks
ng1 = collections.Counter(ngrams(rec1["text"].split(), 2))
for idx2 in idx_range:
rec2 = s2orc2["pdf_parse"]["body_text"][idx2]
val = txtcmp(rec1["text"], rec2["text"], txt1_bigram_counter=ng1)
val -= abs(idx1 + cur_offset - idx2) / len(s2orc1["pdf_parse"]["body_text"])
if val > best_score:
best_score = val
aligns[idx1] = (idx2, val)
if best_score > shortcut_threshold and best_score < 1.0:
break
if best_score < min_threshold and idx1 in aligns:
del aligns[idx1]
if idx1 in aligns:
prev_cur_offset = cur_offset
cur_offset = aligns[idx1][0] - idx1
return aligns
def _should_merge_edit_pair(edit1, edit2):
# For now, we require one of the edits to be a full addition or full
# deletion, since otherwise the corner cases get complicated
if not (edit1.is_full_addition() or edit1.is_full_deletion() or edit2.is_full_addition() or edit2.is_full_deletion()):
return False
if (edit1.is_full_addition() or edit1.is_full_deletion()) and (edit2.is_full_addition() or edit2.is_full_deletion()):
return False
# One of these should have similarity=None in theory.
sim_threshold = max(edit1.similarity or 0, edit2.similarity or 0)
new_source_idxs = edit1.source_idxs.copy() + edit2.source_idxs
new_txt1 = "".join([edit1.texts1[i] for i in new_source_idxs])
new_target_idxs = edit1.target_idxs.copy() + edit2.target_idxs
new_txt2 = "".join([edit1.texts2[i] for i in new_target_idxs])
if txtcmp(new_txt1, new_txt2) > sim_threshold:
return True
return False
def _make_merged_edit(edit1, edit2, docedits):
new_source_idxs = edit1.source_idxs.copy() + edit2.source_idxs
new_target_idxs = edit1.target_idxs.copy() + edit2.target_idxs
new_txt1 = "".join([edit1.texts1[i] for i in new_source_idxs])
new_txt2 = "".join([edit1.texts2[i] for i in new_target_idxs])
similarity = txtcmp(new_txt1, new_txt2)
preceding_sidx = None
if edit1.preceding_sidx == edit2.preceding_sidx:
preceding_sidx = edit1.preceding_sidx
new_edit = docedits.make_edit(new_source_idxs, new_target_idxs, similarity=similarity, preceding_source_idx=preceding_sidx)
return new_edit
def _adjust_bad_merges(aligns):
# We want to check if some paragraph has been split differently in the
# different s2orcs. So, if we could join two source paras to better align
# to a target para, or join two target paras to better align a source para,
# we should do that.
# TODO: We could be much fancier with the algoritm here to handle
# already-merged things and edited-but-also-split stuff; for now we do
# a very basic check for easy corrections, which generally catches cases
# where identical paras get split differently
new_aligns = DocEdits(aligns.s2orc1, aligns.s2orc2)
# Because we might need to merge both forwards and backwards, we do
# in-place merges in the list of edits rather than building incrementally
new_edit_list = sorted(aligns.paragraph_edits, key=lambda x: min(x.source_idxs) if len(x.source_idxs) != 0 else x.preceding_sidx + 0.5)
edit_idx = 0
while edit_idx < len(new_edit_list):
edit = new_edit_list[edit_idx]
if edit.is_identical() or edit.is_full_addition() or edit.is_full_deletion():
edit_idx += 1
continue
# We have a partial edit, so we need to check if we can merge with preceding or following paras
prev_edit_idx = edit_idx - 1
while prev_edit_idx >= 0:
prev_edit = new_edit_list[prev_edit_idx]
if _should_merge_edit_pair(prev_edit, edit):
logger.debug("merging %s %s %s %s", edit.source_idxs, edit.target_idxs, prev_edit.source_idxs, prev_edit.target_idxs)
edit = _make_merged_edit(prev_edit, edit, new_aligns)
prev_edit_idx -= 1
else:
break
new_edit_list[prev_edit_idx + 1 : edit_idx + 1] = [edit]
edit_idx = prev_edit_idx + 1
next_edit_idx = edit_idx + 1
while next_edit_idx < len(new_edit_list):
next_edit = new_edit_list[next_edit_idx]
if _should_merge_edit_pair(edit, next_edit):
logger.debug("merging %s %s %s %s", edit.source_idxs, edit.target_idxs, next_edit.source_idxs, next_edit.target_idxs)
edit = _make_merged_edit(edit, next_edit, new_aligns)
next_edit_idx += 1
else:
break
new_edit_list[edit_idx:next_edit_idx] = [edit]
edit_idx += 1
for edit in new_edit_list:
new_aligns.add_edit(edit)
return new_aligns
def make_full_aligns(s2orc1, s2orc2):
aligns = make_full_aligns_v1(s2orc1, s2orc2)
aligns = _adjust_bad_merges(aligns)
for edit_idx, edit in enumerate(aligns.iter_source_edits()):
assert edit.edit_id is None
edit.edit_id = edit_idx
return aligns
def make_full_aligns_v1(s2orc1, s2orc2):
tmp_aligns = make_aligns(s2orc1, s2orc2)
revmap = {v[0]: [] for k, v in tmp_aligns.items()}
for k, v in tmp_aligns.items():
revmap[v[0]].append(k)
aligns = DocEdits(s2orc1, s2orc2)
for k, v in tmp_aligns.items():
if aligns.has_source_edit(k):
continue
aligns.add_edit(aligns.make_edit([kk for kk in revmap[v[0]]], [v[0]], similarity=v[1]))
for sidx in aligns.get_unmapped_source_idxs():
aligns.add_edit(aligns.make_edit([sidx], []))
for idx2 in aligns.get_unmapped_target_idxs():
tmpidx = idx2 - 1
while tmpidx >= 0 and not aligns.has_target_edit(tmpidx):
tmpidx -= 1
if tmpidx < 0:
nearidx = 0
else:
x = aligns.target_edit(tmpidx)
if len(x.source_idxs) != 0:
nearidx = max(x.source_idxs)
elif x.preceding_sidx is not None:
nearidx = x.preceding_sidx
else:
raise ValueError("Invalid mapping")
aligns.add_edit(aligns.make_edit([], [idx2], preceding_source_idx=nearidx))
return aligns
| aries-master | aries/alignment/doc_edits.py |
import collections
import json
import logging
import os
import sys
import numpy as np
import sklearn.exceptions
import sklearn.metrics
import transformers
from aries.util.data import index_by
from aries.util.logging import pprint_metrics
logger = logging.getLogger(__name__)
class AlignerEvalCallback(transformers.TrainerCallback):
def __init__(self, config, model, eval_records, model_selection_metric_fn=None, model_to_save=None):
self.config = config
self.model = model
self.model_to_save = model_to_save or model.model
self.eval_records = eval_records
# self.eval_precached_dataset = self.model._make_dataset(self.eval_records)
self.eval_precached_dataset = None
self.model_selection_metric_fn = model_selection_metric_fn
if isinstance(model_selection_metric_fn, str):
self.model_selection_metric_fn = lambda x: x[model_selection_metric_fn]
self._best_metric_val = float("-inf")
self._best_model_state = None
@staticmethod
def _clone_cpu_model_state_dict(model):
return collections.OrderedDict((k, v.clone().cpu().detach()) for k, v in model.state_dict().items())
def on_evaluate(self, args, state, control, **kwargs):
metrics, all_results, _ = do_model_eval(self.model, self.eval_records, eval_precached_dataset=self.eval_precached_dataset)
if self.config.get("write_examples_on_eval", False):
with open(os.path.join(self.config["output_dir"], "{}_inferences.jsonl".format("tmp_mid_eval")), "w") as f:
for res in all_results:
f.write(json.dumps(res) + "\n")
pprint_metrics(metrics, logger, name="dev (mid-train)")
metrics["global_step"] = state.global_step
metrics["epoch"] = state.epoch
metrics["total_flos"] = state.total_flos
with open(os.path.join(self.config["output_dir"], "{}_metrics.jsonl".format("mid_eval")), "a") as f:
f.write(json.dumps(metrics) + "\n")
if self.model_selection_metric_fn is not None:
metric_val = self.model_selection_metric_fn(metrics)
if metric_val > self._best_metric_val:
logger.info(
"Got new best model at global step {} (epoch {}, {:0.2f} TFLOs)".format(state.global_step, state.epoch, state.total_flos / 1e12)
)
state.best_metric = metric_val
self._best_metric_val = metric_val
self._best_model_state = AlignerEvalCallback._clone_cpu_model_state_dict(self.model_to_save)
def _get_possible_optimal_thresholds(all_candidates):
return _get_possible_optimal_thresholds_smart(all_candidates)
def _get_possible_optimal_thresholds_smart(all_candidates):
"""Gets the thresholds that have a chance of maximizing f1; that is,
thresholds at positive-negative boundaries (in descending order of score)
and thresholds at the extremes."""
# Sort by descending score
all_scored_candidates = sorted([x for x in all_candidates if x["score"] is not None], key=lambda x: x["score"], reverse=True)
if len(all_scored_candidates) == 0:
return []
# return list(range(min(x['score'] for x in all_scored_candidates), max(x['score'] for x in all_scored_candidates), 0.05))
# The possible thresholds should be the midpoints between each pos-label score and the next-lowest-scoring point, plus the endpoints
possible_thresholds = []
possible_thresholds.append(all_scored_candidates[0]["score"] + 0.0001)
possible_thresholds.append(all_scored_candidates[-1]["score"] - 0.0001)
# We only need to consider pos-neg boundaries; if there is a run of
# consecutive positive examples, it is never worse to include all of them.
for candidx in range(len(all_scored_candidates)):
cand0 = all_scored_candidates[candidx - 1]
cand1 = all_scored_candidates[candidx]
if cand0["label"] == 1 and cand1["label"] == 0:
thresh = (cand0["score"] + cand1["score"]) / 2
if thresh not in possible_thresholds:
possible_thresholds.append(thresh)
return possible_thresholds
def get_pred_labels_for_threshold(thresh, all_candidates, rel_thresh=0.2):
pred_labels = []
for x in all_candidates:
if "score" not in x or x.get("base_pred", None) == 0:
pred_labels.append(x["pred"])
elif "best_group_score" in x:
pred_labels.append(1 if x["score"] > thresh and x["score"] >= (x["best_group_score"] - rel_thresh) else 0)
else:
pred_labels.append(1 if x["score"] > thresh else 0)
return pred_labels
def tune_optimal_f1_threshold(all_candidates):
"""Find the absolute decision threshold that maximizes F1."""
if len(all_candidates) == 0:
return None, []
possible_thresholds = _get_possible_optimal_thresholds(all_candidates)
if len(possible_thresholds) == 0:
logger.info("Couldn't get optimal threshold because there were no scores on positive examples")
return None, [x["pred"] for x in all_candidates]
possible_thresholds = sorted(possible_thresholds)
true_labels = [x["label"] for x in all_candidates]
best = (-float("inf"), None, None)
for thresh in possible_thresholds:
pred_labels = get_pred_labels_for_threshold(thresh, all_candidates)
f1 = sklearn.metrics.f1_score(true_labels, pred_labels)
if f1 > best[0]:
best = (f1, thresh, pred_labels)
return best[1], best[2]
def full_tune_optimal_thresholds(all_candidates, min_recall=None, num_abs_thresholds=100, num_rel_thresholds=100, abs_thresh=None, rel_thresh=None):
"""Find the combination of absolute and relative decision thresholds that
maximize F1. If abs_thresh or rel_thresh are set, only the other one will
be tuned. However, note that this is less efficient and precise than
tune_optimal_f1_threshold if only the absolute threshold needs to be tuned.
To tune the relative threshold, records in all_candidates must have
a "best_group_score" field set."""
if len(all_candidates) == 0:
return None, None, []
if abs_thresh is not None and rel_thresh is not None:
raise ValueError("Cannot specify both abs_thresh and rel_thresh")
possible_abs_threshs = [abs_thresh]
possible_rel_threshs = [rel_thresh]
if abs_thresh is None:
# First, find the maximum pred_threshold that achieves the minimum recall
max_threshold = max(x["score"] for x in all_candidates)
if min_recall > 0:
# We can be efficient by just going down the list in score order
# until we have enough positives (min_recall
# * num positives in all_candidates)
all_candidates.sort(key=lambda x: x["score"], reverse=True)
num_positives = sum(x["label"] == 1 for x in all_candidates)
num_positives_needed = min_recall * num_positives
num_positives_found = 0
for idx, x in enumerate(all_candidates):
if x["label"] == 1:
num_positives_found += 1
if num_positives_found >= num_positives_needed:
max_threshold = x["score"]
break
if num_positives_found < num_positives_needed:
logger.warning("Unable to find enough positives to achieve tuning_minimum_recall of {}".format(min_recall))
# We're done; thresholds must be low enough to predict positive for everything
min_score = min(x["score"] for x in all_candidates)
max_score = max(x["score"] for x in all_candidates)
return min_score, (max_score - min_score), [1] * len(all_candidates)
possible_abs_threshs = np.linspace(0, max_threshold, num_abs_thresholds)
if rel_thresh is None:
max_rel_pred_threshold = max(x["score"] for x in all_candidates) - max_threshold
# Iterate rel thresholds from high to low; if we miss the recall target
# we can exit early
possible_rel_threshs = np.linspace(max_rel_pred_threshold, 0, num_rel_thresholds)
# Now find the combination of pred_threshold and rel_pred_threshold
# that maximizes f1 while achieving the minimum recall
best_f1 = 0
best_thresholds = (0, 0)
best_pred_labels = []
for pred_threshold in possible_abs_threshs:
for rel_pred_threshold in possible_rel_threshs:
labels = [x["label"] for x in all_candidates]
pred_labels = get_pred_labels_for_threshold(pred_threshold, all_candidates, rel_pred_threshold)
recall = sklearn.metrics.recall_score(labels, pred_labels)
if recall < min_recall:
break
f1 = sklearn.metrics.f1_score(labels, pred_labels)
if f1 > best_f1:
best_f1 = f1
best_thresholds = (pred_threshold, rel_pred_threshold)
best_pred_labels = pred_labels
return best_thresholds[0], best_thresholds[1], best_pred_labels
def group_macro_prf1(labels, preds, group_ids, include_empty=False):
grouped_comments = {gid: [] for gid in set(group_ids)}
if not (len(labels) == len(preds)) and (len(labels) == len(group_ids)):
raise ValueError("need len(labels) ({}) == len(preds) ({}) == len(group_ids) ({})".format(len(labels), len(preds), len(group_ids)))
if len(labels) == 0:
return float("nan"), float("nan"), float("nan"), float("nan")
for idx in range(len(labels)):
grouped_comments[group_ids[idx]].append((labels[idx], preds[idx]))
group_prf1s = []
group_ps = []
group_rs = []
group_f1s = []
group_ems = []
for gid, group in sorted(grouped_comments.items()):
labels, preds = list(zip(*group))
if any(x == 1 for x in preds):
p = sklearn.metrics.precision_score(labels, preds)
group_ps.append(p)
else:
p = 1
if include_empty:
group_ps.append(p)
if any(x == 1 for x in labels):
r = sklearn.metrics.recall_score(labels, preds)
group_rs.append(r)
else:
r = 1
if include_empty:
group_rs.append(r)
if any(x == 1 for x in preds) or any(x == 1 for x in labels):
f1 = sklearn.metrics.f1_score(labels, preds, zero_division="warn")
group_f1s.append(f1)
else:
f1 = 1
if include_empty:
group_f1s.append(f1)
group_ems.append(1 if all(x == y for x, y in zip(labels, preds)) else 0)
group_prf1s.append(
(
p,
r,
sklearn.metrics.f1_score(labels, preds, zero_division=1),
)
)
if include_empty:
pmean, rmean, f1mean = np.mean(np.array(group_prf1s), axis=0).tolist()
else:
pmean = np.mean(group_ps).tolist()
rmean = np.mean(group_rs).tolist()
f1mean = np.mean(group_f1s).tolist()
return pmean, rmean, f1mean, np.mean(group_ems).tolist()
def do_model_eval(model, eval_records, eval_precached_dataset=None, custom_decision_threshold=None, custom_threshold_name="custom_threshold"):
for rec in eval_records:
rec["candidates"] = rec["positives"] + rec["negatives"] + rec.get("unknowns", [])
rec["candidate_labels"] = [1] * len(rec["positives"]) + [0] * len(rec["negatives"]) + [None] * len(rec.get("unknowns", []))
all_results = model.predict_many(eval_records)
if len(all_results) != len(eval_records):
raise ValueError("Number of results ({}) does not match number of records ({})".format(len(all_results), len(eval_records)))
comment2id = dict()
all_candidates = []
candidate_comment_ids = []
for rec in all_results:
if rec["input_record"]["review_comment"] not in comment2id:
comment2id[rec["input_record"]["review_comment"]] = len(comment2id)
for idx, ex in enumerate(rec["predictions"]):
ex["label"] = rec["input_record"]["candidate_labels"][idx]
all_candidates.append(ex)
candidate_comment_ids.append(comment2id[rec["input_record"]["review_comment"]])
true_labels = [x["label"] for x in all_candidates]
def metrics_for_predictions(pred_labels, prefix=""):
nonlocal true_labels
_, _, _, exactmatch = group_macro_prf1(true_labels, pred_labels, candidate_comment_ids, include_empty=False)
ie_macro_p, ie_macro_r, ie_macro_f1, _ = group_macro_prf1(true_labels, pred_labels, candidate_comment_ids, include_empty=True)
metrics = {
"accuracy": sklearn.metrics.accuracy_score(true_labels, pred_labels),
"precision": sklearn.metrics.precision_score(true_labels, pred_labels),
"recall": sklearn.metrics.recall_score(true_labels, pred_labels),
"f1": sklearn.metrics.f1_score(true_labels, pred_labels),
"macro_precision": ie_macro_p,
"macro_recall": ie_macro_r,
"macro_f1": ie_macro_f1,
"exact_match": exactmatch,
"n_pred_positive": sum(1 for x in pred_labels if x == 1),
}
return {(prefix + k): v for k, v in metrics.items()}
metrics = dict()
pred_labels = [x["pred"] for x in all_candidates]
metrics.update(metrics_for_predictions(pred_labels, prefix=""))
optimal_threshold, optimal_pred_labels = tune_optimal_f1_threshold(all_candidates)
if optimal_threshold is not None:
logger.info("Got optimal threshold: {:0.3f}".format(optimal_threshold))
metrics.update(metrics_for_predictions(optimal_pred_labels, prefix="optimal_"))
metrics["optimal_decision_threshold"] = optimal_threshold
if custom_decision_threshold is not None:
custom_pred_labels = get_pred_labels_for_threshold(custom_decision_threshold, all_candidates)
metrics.update(metrics_for_predictions(custom_pred_labels, prefix=(custom_threshold_name + "_")))
metrics[(custom_threshold_name + "_decision_threshold")] = custom_decision_threshold
metrics.update(
{
"n_true_positive": sum(1 for x in all_candidates if x["label"] == 1),
"n_candidates": len(all_candidates),
"n_comments": len(eval_records),
}
)
serializable_results = []
for res in all_results:
sres = dict()
for k, v in res["input_record"].items():
try:
json.dumps(v)
sres[k] = v
except TypeError:
pass
cands = []
for pred_rec in res["predictions"]:
edit = pred_rec["edit"]
scand = {k: v for k, v in pred_rec.items() if k not in ["edit"]}
scand["edit_source_idxs"] = edit.source_idxs
scand["edit_target_idxs"] = edit.target_idxs
if any(x >= len(edit.doc_edits.s2orc2["pdf_parse"]["body_text"]) and x != 9999 for x in edit.target_idxs):
raise KeyError(
"Out of bounds! {} {} {} {}".format(
edit.doc_edits.s2orc2["paper_id"],
len(edit.doc_edits.s2orc2["pdf_parse"]["body_text"]),
str(edit.target_idxs),
edit.get_target_text(),
)
)
scand["edit_source_pdf_id"] = edit.doc_edits.s2orc1["paper_id"]
scand["edit_target_pdf_id"] = edit.doc_edits.s2orc2["paper_id"]
cands.append(scand)
sres["candidates"] = cands
serializable_results.append(sres)
return (
metrics,
serializable_results,
# pair_results,
index_by(
serializable_results,
lambda x: (x["doc_id"], x["source_pdf_id"], x["target_pdf_id"], x["review_comment"]),
),
)
| aries-master | aries/alignment/eval.py |
import json
import logging
import os
import sys
from aries.util.data import index_by, openc
logger = logging.getLogger(__name__)
class PrecomputedEditsAligner:
def __init__(self, config):
self.config = config
def train(self, train_recs, dev_recs):
logger.warning("{} doesn't train; ignoring call to train()".format(self.__class__.__name__))
def predict_many(self, test_recs):
out_recs = []
predictions_by_docid = dict()
with openc(self.config["precomputed_predictions_jsonl_path"], "rt") as f:
predictions_by_docid = index_by(map(json.loads, f), "doc_id")
warned_docs = set()
for rec in test_recs:
outrec = {
"input_record": rec,
"predictions": [{"edit": cand, "pred": None, "score": None} for cand in rec["candidates"]],
}
out_recs.append(outrec)
if rec["doc_id"] not in predictions_by_docid:
if rec["doc_id"] not in warned_docs:
logger.warning("missing prediction for doc: {}".format(rec["doc_id"]))
warned_docs.add(rec["doc_id"])
for cand_rec in outrec["predictions"]:
cand_rec["pred"] = 0
cand_rec["score"] = 0
continue
pred_recs = predictions_by_docid[rec["doc_id"]]
pred_rec = None
for rec2 in pred_recs:
# if rec["review_comment"] == dset_rec["review_comment"]:
# if rec["review_comment"].strip(".\n ") == rec2["review_comment"].strip(".\n "):
if rec["review_comment"].strip() == rec2["comment"].strip():
pred_rec = rec2
break
if pred_rec is None:
logger.warning("Missing prediction match for comment: {}".format(rec["review_comment"]))
for cand_rec in outrec["predictions"]:
pred_label = 0
for edit_id in pred_rec["positive_edits"]:
if edit_id == cand_rec["edit"].edit_id:
pred_label = 1
break
if cand_rec["edit"].is_identical():
pred_label = 0
cand_rec["pred"] = pred_label
cand_rec["score"] = pred_label
return out_recs
| aries-master | aries/alignment/precomputed.py |
import functools
import logging
import os
import sys
import datasets
import numpy as np
import torch
import tqdm
import transformers
from aries.alignment.eval import AlignerEvalCallback
from aries.util.data import batch_iter
from aries.util.edit import make_word_diff
from aries.util.training import TrainLoggerCallback
logger = logging.getLogger(__name__)
class BiencoderTransformerAlignerModel(torch.nn.Module):
def __init__(self, embedding_model, projection_size=None):
super().__init__()
self.embedding_model = embedding_model
self.projector = None
if projection_size is not None:
self.projector = torch.nn.Linear(embedding_model.config.hidden_size, projection_size)
self.pred_threshold = 0
def forward(
self,
query_input_ids,
positive_input_ids,
negative_input_ids,
query_attention_mask=None,
positive_attention_mask=None,
negative_attention_mask=None,
):
if not len(query_input_ids.shape) == 2:
raise ValueError("Expected query_input_ids to be 2D, got {}".format(query_input_ids.shape))
if query_input_ids.shape[0] != positive_input_ids.shape[0] or query_input_ids.shape[0] != negative_input_ids.shape[0]:
raise ValueError(
"Expected query_input_ids, positive_input_ids, and negative_input_ids to have the same batch size, got {} vs {} vs {}".format(
query_input_ids.shape[0], positive_input_ids.shape[0], negative_input_ids.shape[0]
)
)
query_embeddings = self.embedding_model(query_input_ids, attention_mask=query_attention_mask).last_hidden_state[:, 0, :]
positive_embeddings = self.embedding_model(positive_input_ids, attention_mask=positive_attention_mask).last_hidden_state[:, 0, :]
negative_embeddings = self.embedding_model(negative_input_ids, attention_mask=negative_attention_mask).last_hidden_state[:, 0, :]
if self.projector:
query_embeddings = self.projector(query_embeddings)
positive_embeddings = self.projector(positive_embeddings)
negative_embeddings = self.projector(negative_embeddings)
loss = self._compute_loss(query_embeddings, positive_embeddings, negative_embeddings)
loss = torch.mean(loss)
return {"loss": loss}
def _compute_loss(self, query_embeddings, positive_embeddings, negative_embeddings):
"""Margin loss on the triplets"""
query_embeddings = query_embeddings / query_embeddings.norm(dim=-1, keepdim=True)
positive_embeddings = positive_embeddings / positive_embeddings.norm(dim=-1, keepdim=True)
negative_embeddings = negative_embeddings / negative_embeddings.norm(dim=-1, keepdim=True)
positive_scores = (query_embeddings * positive_embeddings).sum(dim=-1)
negative_scores = (query_embeddings * negative_embeddings).sum(dim=-1)
# TODO: Hacky; just gives us a rough estimate of the pos/neg class divider on a single train batch
# new_pred_threshold = (positive_scores.mean().item() + negative_scores.mean().item()) / 2
new_pred_threshold = min(positive_scores.tolist())
delta = new_pred_threshold - self.pred_threshold
delta = delta * 0.01 + np.sign(delta) * 0.01
self.pred_threshold += delta
# scores_as_logits = torch.stack([positive_scores, negative_scores], axis=1)
# loss = torch.nn.functional.cross_entropy(scores_as_logits, torch.zeros(positive_scores.shape[0], dtype=torch.long, device=scores_as_logits.device), reduction='none')
# loss = torch.nn.functional.relu(1.0 - positive_scores + negative_scores).mean()
loss = torch.nn.functional.relu(0.5 - positive_scores + negative_scores).mean()
return loss
class TripletDataCollator:
def __init__(self, tokenizer, max_length, pad_to_multiple_of=None):
self.tokenizer = tokenizer
self.max_length = max_length
self.pad_to_multiple_of = pad_to_multiple_of
def __call__(self, batch):
collated_batch = dict()
tensors = self.tokenizer.pad(
{"input_ids": [x["query_input_ids"] for x in batch]},
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
padding="longest",
)
collated_batch["query_input_ids"] = tensors["input_ids"]
collated_batch["query_attention_mask"] = tensors["attention_mask"]
tensors = self.tokenizer.pad(
{"input_ids": [x["positive_input_ids"] for x in batch]},
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
padding="longest",
)
collated_batch["positive_input_ids"] = tensors["input_ids"]
collated_batch["positive_attention_mask"] = tensors["attention_mask"]
tensors = self.tokenizer.pad(
{"input_ids": [x["negative_input_ids"] for x in batch]},
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
padding="longest",
)
collated_batch["negative_input_ids"] = tensors["input_ids"]
collated_batch["negative_attention_mask"] = tensors["attention_mask"]
return collated_batch
class BiencoderTransformerAligner:
def __init__(self, config, embedding_model, tokenizer):
self.config = config
self.model = BiencoderTransformerAlignerModel(embedding_model, projection_size=config.get("embedding_projection_size", None))
self.tokenizer = tokenizer
if self.config["add_diff_tokens"]:
self.tokenizer.add_tokens(["[+", "+]", "[-", "-]"])
embedding_model.resize_token_embeddings(len(self.tokenizer))
self.max_length = self.config["max_seq_length"]
@staticmethod
def preprocess_fn(examples, tokenizer, max_length):
model_inputs = dict()
query_inputs = tokenizer(
examples["query_text"],
max_length=max_length,
padding=False,
truncation=True,
)
model_inputs["query_input_ids"] = query_inputs["input_ids"]
positive_inputs = tokenizer(
examples["positive_text"],
max_length=max_length,
padding=False,
truncation=True,
)
model_inputs["positive_input_ids"] = positive_inputs["input_ids"]
negative_inputs = tokenizer(
examples["negative_text"],
max_length=max_length,
padding=False,
truncation=True,
)
model_inputs["negative_input_ids"] = negative_inputs["input_ids"]
return model_inputs
def _base_rec_example_fields(self, rec):
return {
"doc_id": rec["doc_id"],
"source_pdf_id": rec["source_pdf_id"],
"target_pdf_id": rec["target_pdf_id"],
"review_comment": rec["review_comment"],
}
def _make_example_for_rec_triplet(self, rec, triplet):
ex = self._base_rec_example_fields(rec)
ex.update(
{
"query_text": triplet["query"],
"positive_text": self._edit_to_input_text(triplet["positive"]),
"negative_text": self._edit_to_input_text(triplet["negative"]),
}
)
return ex
def _candidate_record_to_input_text(self, rec):
if self.config["query_input_format"] == "comment_only":
return "review comment: " + rec["review_comment"]
elif self.config["query_input_format"] == "comment_with_canonical":
return "review comment: " + rec["review_comment"] + "\ncanonicalized: " + rec["canonical"]["canonicalized"]
elif self.config["query_input_format"] == "reply_comment_or_extracted_comment":
return "review comment: " + rec.get("reply_comment_line", rec["review_comment"])
# return rec.get("reply_comment_line", rec["review_comment"])
elif self.config["query_input_format"] == "reply_comment_or_extracted_comment_with_canonical":
return "review comment: " + rec.get("reply_comment_line", rec["review_comment"]) + "\ncanonicalized: " + rec["canonical"]["canonicalized"]
elif self.config["query_input_format"] == "comment_with_context":
comment_str = rec["review_comment"].strip()
if rec.get("context_side", "none") == "left":
comment_str = rec["context"].strip() + " " + comment_str
else:
comment_str = comment_str + " " + rec["context"].strip()
return "review comment: " + comment_str
raise ValueError("Unknown query_input_format {}".format(self.config["query_input_format"]))
def _edit_to_input_text(self, edit):
if self.config["edit_input_format"] == "target_text":
return edit.get_target_text()
elif self.config["edit_input_format"] == "target_text_with_context":
context = "context: none"
if len(edit.target_idxs) != 0 and min(edit.target_idxs) != 0:
context = "context: " + edit.texts2[min(edit.target_idxs) - 1]
return edit.get_target_text() + "\n\n" + context
elif self.config["edit_input_format"] == "source_text":
return edit.get_source_text()
elif self.config["edit_input_format"] == "diff":
return make_word_diff(
edit.get_source_text(),
edit.get_target_text(),
color_format="none",
)
raise ValueError("Unknown edit_input_format {}".format(self.config["edit_input_format"]))
def _make_triplets_for_rec(self, rec, max_negatives=10000):
query_text = self._candidate_record_to_input_text(rec)
triplets = []
for positive_edit in rec["positives"]:
for negative_edit in sorted(rec["negatives"], key=lambda x: -len(x.get_target_text()))[:max_negatives]:
triplets.append({"query": query_text, "positive": positive_edit, "negative": negative_edit})
return triplets
def _make_dataset(self, recs, name="dataset", shuffle=False):
if isinstance(recs, dict):
recs = list(recs.values())
exs = []
for rec in tqdm.tqdm(recs, desc="building triplets for records"):
edit_with_labels = []
edit_with_labels.extend([(x, 1) for x in rec["positives"]])
edit_with_labels.extend([(x, 0) for x in rec["negatives"]])
for triplet in self._make_triplets_for_rec(rec):
exs.append(self._make_example_for_rec_triplet(rec, triplet))
tmp = {k: [] for k in exs[0].keys()}
for ex in exs:
for k, v in ex.items():
tmp[k].append(v)
dset = datasets.Dataset.from_dict(tmp)
if shuffle:
dset = dset.shuffle()
dset = dset.map(
functools.partial(BiencoderTransformerAligner.preprocess_fn, tokenizer=self.tokenizer, max_length=self.max_length),
batched=True,
num_proc=4,
load_from_cache_file=False,
desc="Processing {}".format(name),
)
return dset
def train(self, train_recs, dev_recs):
if len(train_recs) == 0:
raise ValueError("Got empty train_recs")
if len(dev_recs) == 0:
raise ValueError("Got empty dev_recs")
training_args_dict = transformers.TrainingArguments(output_dir=self.config["output_dir"], log_level="passive").to_dict()
training_args_dict.update(self.config.get("training_args", dict()))
training_args = transformers.HfArgumentParser(transformers.TrainingArguments).parse_dict(training_args_dict)[0]
if training_args.max_steps == 0 and training_args.num_train_epochs <= 0:
logger.info("Got 0 train steps; skipping training")
return
self.rng = np.random.default_rng(self.config["seed"])
train_dset = self._make_dataset(train_recs, shuffle=False)
self.rng = np.random.default_rng(self.config["seed"])
dev_dset = self._make_dataset(dev_recs, shuffle=False)
logger.info("Train data size: {} Dev data size: {}".format(len(train_dset), len(dev_dset)))
logger.info(
"{} | {} | {}\n".format(
self.tokenizer.decode(train_dset["query_input_ids"][0]),
self.tokenizer.decode(train_dset["positive_input_ids"][0]),
self.tokenizer.decode(train_dset["negative_input_ids"][0]),
)
)
data_collator = TripletDataCollator(
self.tokenizer,
self.max_length,
pad_to_multiple_of=None,
)
model_selector_callback = AlignerEvalCallback(
self.config,
self,
dev_recs,
model_selection_metric_fn=lambda x: x["optimal_f1"],
model_to_save=self.model.embedding_model,
)
logger.info("Subsampling dev_dset for loss computation")
sub_dev_dset = dev_dset.select(np.random.default_rng(42).choice(len(dev_dset), size=min(200, len(dev_dset)), replace=False))
trainer = transformers.Trainer(
model=self.model,
args=training_args,
train_dataset=train_dset,
eval_dataset=sub_dev_dset,
tokenizer=self.tokenizer,
data_collator=data_collator,
callbacks=[model_selector_callback, TrainLoggerCallback(logger)],
compute_metrics=None,
)
_ = trainer.train()
self.model.embedding_model.load_state_dict(model_selector_callback._best_model_state)
self.model.embedding_model.save_pretrained(os.path.join(self.config["output_dir"], "ptmodel"))
self.tokenizer.save_pretrained(os.path.join(self.config["output_dir"], "ptmodel"))
def _eval_batch_size(self):
td = self.config.get("training_args", dict())
return td.get("per_device_eval_batch_size", td.get("per_device_train_batch_size", 16))
def _embed_texts(self, texts, batch_size=None, cache_dict=None, use_tqdm=False):
"""cache_dict contains cached embeddings for some texts; missing ones
will be computed and the cache_dict will be updated"""
if len(texts) == 0:
return np.array([])
if batch_size is None:
batch_size = self._eval_batch_size()
if cache_dict is None:
cache_dict = dict()
final_embeddings = [cache_dict.get(x, None) for x in texts]
text_idxs = [idx for idx, x in enumerate(final_embeddings) if x is None]
orig_texts = texts
texts = [orig_texts[idx] for idx in text_idxs]
if len(text_idxs) != 0:
embeddings = []
pbar = tqdm.tqdm(
batch_iter(texts, batch_size=batch_size),
total=np.ceil(len(texts) / batch_size),
disable=(not use_tqdm),
desc="embedding text batches",
)
for text_batch in pbar:
inputs = self.tokenizer(
text_batch,
return_tensors="pt",
max_length=self.max_length,
padding="longest",
truncation=True,
)
inputs = {k: v.to(self.model.embedding_model.device) for k, v in inputs.items()}
batch_embeddings = self.model.embedding_model(**inputs).last_hidden_state[:, 0, :]
if self.model.projector:
batch_embeddings = self.model.projector(batch_embeddings)
batch_embeddings = batch_embeddings.detach().cpu().numpy()
embeddings.append(batch_embeddings)
embeddings = np.concatenate(embeddings, axis=0)
for idx, embed in zip(text_idxs, embeddings):
final_embeddings[idx] = embed
cache_dict[orig_texts[idx]] = embed
final_embeddings = np.stack(final_embeddings, axis=0)
return final_embeddings
def predict_many(self, *args, **kwargs):
was_training = self.model.training
self.model.eval()
with torch.no_grad():
results = self._predict_many(*args, **kwargs)
self.model.train(was_training)
return results
def _predict_many(self, test_recs, quiet=False):
out_recs = []
pred_threshold = self.config.get("fixed_pred_threshold", None)
if pred_threshold is None:
pred_threshold = self.model.pred_threshold
if not quiet:
logger.info("Predicting with pred_threshold={}".format(pred_threshold))
embed_cache_dict = dict()
for rec in tqdm.tqdm(test_recs, "predicting", disable=quiet):
outrec = {
"input_record": rec,
"predictions": [{"edit": cand, "pred": None, "score": None} for cand in rec["candidates"]],
}
query_embedding = self._embed_texts([self._candidate_record_to_input_text(rec)])[0]
query_embedding /= np.linalg.norm(query_embedding)
if len(outrec["predictions"]) != 0:
candidate_embeddings = self._embed_texts(
[self._edit_to_input_text(cand["edit"]) for cand in outrec["predictions"]], cache_dict=embed_cache_dict
)
candidate_embeddings /= np.linalg.norm(candidate_embeddings, axis=1, keepdims=True)
cosine_similarities = query_embedding.dot(candidate_embeddings.T).tolist()
best_candidxs = np.argsort(cosine_similarities).tolist()
best_candidx_score = cosine_similarities[best_candidxs[-1]]
for candidx, cand in enumerate(outrec["predictions"]):
cand["best_group_score"] = best_candidx_score
cand["cosine_score"] = cosine_similarities[candidx]
cand["pred"] = (
1 if cosine_similarities[candidx] > pred_threshold and cosine_similarities[candidx] >= (best_candidx_score - 0.2) else 0
)
cand["score"] = cosine_similarities[candidx]
out_recs.append(outrec)
return out_recs
| aries-master | aries/alignment/biencoder.py |
import itertools
import json
import logging
import os
import sys
import tqdm
from aries.util.data import index_by
from aries.util.edit import make_word_diff
from aries.util.gpt3 import Gpt3CacheClient
logger = logging.getLogger(__name__)
class GptChatAligner:
def __init__(self, config):
self.config = config
self.system_prompt = self.config["gpt_system_prompt"]
self.prompt_template = self.config["gpt_prompt_template"]
self.model_name = self.config["gpt_model"]
self.max_length = self.config["gpt_max_length"]
self.cache_db_path = self.config["cache_db_path"]
def train(self, train_recs, dev_recs):
logger.warning("GPT doesn't train; ignoring call to train()")
def _predict_one(self, comment, edit, gptcli):
tags = {
"{{review_comment}}": comment,
"{{target_paragraph}}": edit.get_target_text(),
"{{source_paragraph}}": edit.get_source_text(),
"{{diff_paragraph}}": make_word_diff(
edit.get_source_text(),
edit.get_target_text(),
color_format="none",
),
}
msg = self.prompt_template
for k, v in sorted(tags.items()):
msg = msg.replace(k, v)
messages = [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": msg},
]
resp = gptcli.chat_completion(
model=self.model_name,
messages=messages,
temperature=0.0,
max_tokens=self.max_length,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
result_text = resp["choices"][0]["message"]["content"]
result_words = set(result_text.lower().replace(".", " ").replace(",", " ").replace("\n", " ").replace('"', " ").replace("'", " ").split(" "))
# Extract yes/no answer from response text
has_yes = "yes" in result_words or "answer=yes" in result_words
has_no = "no" in result_words or "answer=no" in result_words
pred = None
if has_yes and has_no:
pred = None
raise ValueError("Got both yes and no in response")
elif has_yes:
pred = 1
elif has_no:
pred = 0
else:
logger.error("Bad response: {}".format(result_text))
raise ValueError("Got neither yes nor no in response")
return pred, resp
def predict_many(self, test_recs):
out_recs = []
total_tokens, uncached_total_tokens = 0, 0
loopname = "{}.predict_many".format(self.__class__.__name__)
with tqdm.trange(sum(len(x["candidates"]) for x in test_recs), miniters=1, desc=loopname) as pbar:
with Gpt3CacheClient(self.cache_db_path) as gptcli:
for rec in test_recs:
outrec = {
"input_record": rec,
"predictions": [{"edit": cand, "pred": None, "score": None} for cand in rec["candidates"]],
}
out_recs.append(outrec)
for pred_rec in outrec["predictions"]:
pred_label, resp = self._predict_one(rec["review_comment"], pred_rec["edit"], gptcli)
total_tokens += resp["usage"]["total_tokens"]
uncached_total_tokens += resp["usage"]["uncached_total_tokens"]
pred_rec["pred"] = pred_label
pred_rec["score"] = pred_label
pbar.set_description(f"{loopname} tt={total_tokens} utt={uncached_total_tokens}", refresh=False)
pbar.update(1)
return out_recs
class GptChatFullPaperAligner:
def __init__(self, config):
self.config = config
self.system_prompt = self.config["gpt_system_prompt"]
self.prompt_template = self.config["gpt_prompt_template"]
self.model_name = self.config["gpt_model"]
self.max_length = self.config["gpt_max_length"]
self.cache_db_path = self.config["cache_db_path"]
self.output_dir = self.config.get("output_dir", None)
self.max_response_length = 500
self.raw_responses = []
def train(self, train_recs, dev_recs):
logger.warning("GPT doesn't train; ignoring call to train()")
def _make_chunked_paper_diff(self, doc_edits, chunk_size, gptcli):
full_diff_string, edits_by_id = doc_edits.make_paper_diff_string(
color_format="none",
print_ids_only=True,
return_edit_ids=True,
)
para_chunks = full_diff_string.split("\n\n")
diff_chunks = []
cur_chunk = []
cur_chunk_len = 0
# Note: we don't account for individual paras being bigger than
# chunk_size; that probably never happens anyway
for para_chunk in para_chunks:
# Add 2 for the stripped \n\n
new_chunk_len = gptcli.estimate_num_tokens(para_chunk, self.model_name) + 2
if cur_chunk_len + new_chunk_len > chunk_size:
diff_chunks.append("\n\n".join(cur_chunk))
cur_chunk = []
cur_chunk_len = 0
cur_chunk.append(para_chunk)
cur_chunk_len += new_chunk_len
if len(cur_chunk) != 0:
diff_chunks.append("\n\n".join(cur_chunk))
return diff_chunks, edits_by_id
def _make_comments_text_blob(self, recs):
comments_text_blob = ""
for idx, comment in enumerate(recs):
comments_text_blob += comment.replace("\n", " ") + "\ncomment id: {}\n\n".format(idx)
return comments_text_blob
def _predict_one_doc(self, doc_edits, comments, gptcli):
comments_text = self._make_comments_text_blob(comments)
base_length = gptcli.estimate_num_tokens(self.prompt_template, self.model_name) + gptcli.estimate_num_tokens(
self.system_prompt, self.model_name
)
if "{{review_comments}}" in self.prompt_template:
base_length += gptcli.estimate_num_tokens(comments_text, self.model_name)
chunk_size = self.max_length - base_length - self.max_response_length
diff_chunks, edits_by_id = self._make_chunked_paper_diff(doc_edits, chunk_size=chunk_size, gptcli=gptcli)
all_response_lines_by_comment = {idx: [] for idx in range(len(comments))}
total_tokens, uncached_total_tokens = 0, 0
for chunk in diff_chunks:
tags = {
"{{review_comments}}": comments_text,
"{{paper_diff_chunk}}": chunk,
}
msg = self.prompt_template
for k, v in sorted(tags.items()):
msg = msg.replace(k, v)
messages = [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": msg},
]
if base_length + gptcli.estimate_num_tokens(chunk, self.model_name) + self.max_response_length > 8150:
print(base_length, gptcli.estimate_num_tokens(chunk, self.model_name), self.max_response_length)
print()
try:
resp = gptcli.chat_completion(
model=self.model_name,
messages=messages,
temperature=0.0,
# max_tokens=self.max_length,
max_tokens=self.max_response_length,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
except Exception as e:
breakpoint()
print(e)
total_tokens += resp["usage"]["total_tokens"]
uncached_total_tokens += resp["usage"]["uncached_total_tokens"]
result_text = resp["choices"][0]["message"]["content"]
self.raw_responses.append(
{
# "doc_id": doc_id,
"source_pdf_id": doc_edits.s2orc1["paper_id"],
"target_pdf_id": doc_edits.s2orc2["paper_id"],
"comments": comments,
"comments_text": comments_text,
"response_text": result_text,
}
)
for line in result_text.split("\n"):
# Imperfect but good-enough detection of JSON lines
if not line.startswith("{"):
continue
# Hacky; fix some specific known failures
line = line.replace(" \\phi", " \\\\phi")
try:
obj = json.loads(line)
except json.JSONDecodeError as e:
logger.error("Failed to parse JSON line: {}".format(line))
# raise e
continue
all_response_lines_by_comment[obj["comment_id"]].append(obj)
results = []
for comment_id, resps in all_response_lines_by_comment.items():
# Ignore the abstract (9999) since it isn't diffed in DocEdits
all_edit_ids = sorted(set(itertools.chain(*[x["edit_ids"] for x in resps])) - {9999})
results.append(
{
"review_comment": comments[comment_id],
"predicted_positive_edits": [
{
"source_idxs": edits_by_id[x].source_idxs,
"target_idxs": edits_by_id[x].target_idxs,
}
for x in all_edit_ids
],
}
)
usage_info = {
"total_tokens": total_tokens,
"uncached_total_tokens": uncached_total_tokens,
}
return results, usage_info
def predict_many(self, test_recs):
out_recs = []
# We need to run the model for the pdf pair of each *candidate*, since
# it is possible to have candidates sampled from other documents than
# the one the comment was for.
comment_pdf_pairs = []
for rec in test_recs:
for edit in rec["candidates"]:
comment_pdf_pairs.append(
{
"comment": rec["review_comment"],
"pdf_pair": (edit.doc_edits.s2orc1["paper_id"], edit.doc_edits.s2orc2["paper_id"]),
"doc_edits": edit.doc_edits,
}
)
# For consistency, include comments in the many-to-many alignment
# even when no candidates are given
if "source_pdf_id" in rec and "target_pdf_id" in rec:
comment_pdf_pairs.append(
{
"comment": rec["review_comment"],
"pdf_pair": (rec["source_pdf_id"], rec["target_pdf_id"]),
"doc_edits": None,
}
)
comment_pdf_pairs_by_pdf = index_by(comment_pdf_pairs, "pdf_pair")
total_tokens, uncached_total_tokens = 0, 0
with Gpt3CacheClient(self.cache_db_path) as gptcli:
loopname = "{}.predict_many".format(self.__class__.__name__)
predictions_by_pdf = dict()
pbar = tqdm.tqdm(comment_pdf_pairs_by_pdf.items(), miniters=1, desc=loopname)
for pdf_pair, comment_recs in pbar:
if all(x["doc_edits"] is None for x in comment_recs):
# No candidates for this pdf pair, so skip it
continue
predictions_by_pdf[pdf_pair], token_usage = self._predict_one_doc(
[x for x in comment_recs if x["doc_edits"] is not None][0]["doc_edits"],
sorted(set([x["comment"] for x in comment_recs])),
gptcli,
)
predictions_by_pdf[pdf_pair] = index_by(predictions_by_pdf[pdf_pair], "review_comment", one_to_one=True)
total_tokens += token_usage["total_tokens"]
uncached_total_tokens += token_usage["uncached_total_tokens"]
pbar.set_description(f"{loopname} tt={total_tokens} utt={uncached_total_tokens}", refresh=False)
for rec in test_recs:
outrec = {
"input_record": rec,
"predictions": [{"edit": cand, "pred": None, "score": None} for cand in rec["candidates"]],
}
out_recs.append(outrec)
for pred in outrec["predictions"]:
pred_rec = predictions_by_pdf[(pred["edit"].doc_edits.s2orc1["paper_id"], pred["edit"].doc_edits.s2orc2["paper_id"])][
rec["review_comment"]
]
pos_edits = [] if pred_rec is None else pred_rec["predicted_positive_edits"]
pred_label = 0
for edit in pos_edits:
if (sorted(edit["source_idxs"]) == sorted(pred["edit"].source_idxs)) and (
sorted(edit["target_idxs"]) == sorted(pred["edit"].target_idxs)
):
pred_label = 1
break
pred["pred"] = pred_label
pred["score"] = pred_label
if self.output_dir is not None:
with open(os.path.join(self.output_dir, "raw_gpt_outputs.json"), "w") as f:
json.dump(self.raw_responses, f)
return out_recs
| aries-master | aries/alignment/gpt.py |
import collections
import itertools
import json
import logging
import os
import re
import sys
import time
import nltk
import nltk.util
import numpy as np
import tqdm
from nltk.util import ngrams
import aries.util.data
import aries.util.s2orc
from aries.alignment.doc_edits import make_full_aligns
from aries.util.data import deduplicate_by, index_by, iter_jsonl_files
from aries.util.edit import find_overlapping_substrings
from aries.util.logging import init_logging
logger = logging.getLogger(__name__)
STOPWORDS = set(nltk.corpus.stopwords.words("english")) | set(",./<>?!@#$%^&*()_+-={}|[]\\,")
def get_author_replies(review_note, forum_notes):
replies = [x for x in forum_notes if x["replyto"] == review_note["id"] and any("author" in xx.lower() for xx in x["writers"])]
# Sometimes authors break up their response by replying to their own message
nested_replies = []
for reply in replies:
nested_replies.extend(get_author_replies(reply, forum_notes))
return replies + nested_replies
def _get_combined_text2_overlap_spans_overlapping_span(all_overlaps, span, is_sorted=False):
"""Given a set of overlap span pairs, find all of the ones for which the
text2 span overlaps the given span, and return a list of just the text2
spans of those overlaps, merged such that any partially-overlapping spans
are collapsed into a single span in the list."""
if not is_sorted:
# Sort by text2 idxs; this allows fast lookups for overlaps contained within a span
all_overlaps = sorted(all_overlaps, key=lambda x: x[1])
overlaps = []
for ov in all_overlaps:
if ov[1][0] >= span[0] and ov[1][0] < span[1]:
overlaps.append(ov)
elif ov[1][1] > span[0] and ov[1][1] <= span[1]:
overlaps.append(ov)
if len(overlaps) <= 1:
return [x[1] for x in overlaps]
combined = []
last_span = overlaps[0][1]
for ov in overlaps[1:]:
if ov[1][0] < last_span[1]:
last_span = (last_span[0], ov[1][1])
else:
combined.append(last_span)
last_span = ov[1]
combined.append(last_span)
return combined
TextReplyMatch = collections.namedtuple("TextReplyMatch", "doc_id review_id reply_id match_text spans line_span next_line_span reply_text")
def get_tight_span(toks, line_overlaps, prevnewline, nextnewline):
# There are two main ways authors mess with the text at this
# point: (1) they correct typos, causing non-exact matches, and
# (2) they add prefixes or quotation marks to the text (e.g.,
# "Comment 1:"). To deal with the first case, we want to
# include the whole line even if there are some non-matched
# spans in the middle. But, to deal with the second case we
# want to omit the start or end of the line if those aren't
# matched and they don't occur in the middle of a word.
tight_span = [max(min(x[0] for x in line_overlaps), prevnewline), min(max(x[1] for x in line_overlaps), nextnewline)]
while toks[tight_span[0]].isspace() or toks[tight_span[0]] in ".:)*":
tight_span[0] += 1
# Expand back if the span started mid-word (usually a capitalization difference)
while toks[tight_span[0] - 1].isalpha():
tight_span[0] -= 1
# Citations are weird; never prune when we have one
if re.search(r" et |[0-9]{4}", toks[prevnewline : tight_span[0]]):
tight_span[0] = prevnewline
while toks[tight_span[1] - 1].isspace():
tight_span[1] -= 1
if tight_span[0] > prevnewline + 20:
tight_span[0] = prevnewline
if tight_span[1] < nextnewline - 10:
tight_span[1] = nextnewline
return tight_span
def _get_match_for_overlap(toks1, toks2, overlap, all_overlaps, min_line_overlap_ratio, doc_id, review_id, reply_id):
# Check if it takes up most of a line (quotes usually do)
prevnewline = max(0, toks2.rfind("\n", 0, overlap[1][0]))
nextnewline = toks2.find("\n", overlap[1][1] - 1)
nextnewline = nextnewline if nextnewline >= 0 else len(toks2)
while nextnewline > prevnewline and toks2[nextnewline - 1] == "\n":
nextnewline -= 1
if nextnewline == prevnewline:
print(nextnewline, prevnewline, overlap, len(toks2))
line_overlaps = _get_combined_text2_overlap_spans_overlapping_span(all_overlaps, (prevnewline, nextnewline))
total_line_overlap = sum(max(x[1], prevnewline) - min(x[0], nextnewline) for x in line_overlaps)
lineratio = total_line_overlap / (nextnewline - prevnewline)
if lineratio < min_line_overlap_ratio:
return None
tight_span = get_tight_span(toks2, line_overlaps, prevnewline, nextnewline)
# if abs(tight_span[0] - prevnewline) > 2 or abs(tight_span[0] - nextnewline) > 2:
# print('difference! oldline={},\nnewline={}'.format(toks2[prevnewline:nextnewline], toks2[tight_span[0]:tight_span[1]],))
nextnextnewline = nextnewline
while nextnextnewline < len(toks2) and toks2[nextnextnewline] == "\n":
nextnextnewline += 1
nnlend = nextnextnewline
while nextnextnewline < len(toks2) and toks2[nextnextnewline] != "\n":
nextnextnewline += 1
# print(toks1[overlap[0][0]:overlap[0][1]])
# all_matches.append((toks1[overlap[0][0]:overlap[0][1]], docrec['docid'], revrec['id'], reply['id'], overlap))
# all_matches.append((toks2[prevnewline:nextnewline], docrec["docid"], revrec["id"], reply["id"], overlap, (prevnewline, nextnewline), toks2))
return TextReplyMatch(
doc_id,
review_id,
reply_id,
# None,
# None,
# None,
# docrec["docid"],
# revrec["id"],
# reply["id"],
# toks2[prevnewline:nextnewline],
toks2[tight_span[0] : tight_span[1]],
overlap,
# (prevnewline, nextnewline),
tuple(tight_span),
(nnlend, nextnextnewline),
toks2,
)
def get_author_comment_replies_for_doc(forum_id, review_replies, min_length=80, min_line_overlap_ratio=0.9):
all_matches = []
for review_rec in review_replies:
replies = review_rec["author_replies"]
used_spans = set()
for reply in replies:
toks1 = "\n".join([str(x) for x in review_rec["content"].values()])
toks2 = reply["content"]["comment"]
overlaps = find_overlapping_substrings(toks1, toks2, min_length=min_length)
overlaps.sort(key=lambda x: x[1])
for overlap in overlaps:
m = _get_match_for_overlap(
toks1, toks2, overlap, overlaps, min_line_overlap_ratio, review_rec["forum"], review_rec["id"], reply["id"]
)
if m is not None:
sp = (m.doc_id, m.review_id, m.reply_id, m.line_span)
if sp not in used_spans:
all_matches.append(m)
used_spans.add(sp)
else:
logger.debug("Skipping duplicate match: %s", sp)
return all_matches
def make_bow(txt):
return collections.Counter(ngrams([x for x in txt.split() if x.lower() not in STOPWORDS], 1))
def _similarity_many_many_minl(txts1, txts2, match_denom=False):
ngs1 = [make_bow(txt) for txt in txts1]
ngs2 = [make_bow(txt) for txt in txts2]
sim_mat = np.zeros((len(txts1), len(txts2)))
if match_denom:
denom = max(sum(x.values()) for x in itertools.chain(ngs1, ngs2))
def sim_fn(counter1, counter2):
return sum((counter1 & counter2).values()) / denom
else:
def sim_fn(counter1, counter2):
return sum((counter1 & counter2).values()) / max(40, min(sum(counter1.values()), sum(counter2.values())))
for idx1, idx2 in itertools.product(range(len(txts1)), range(len(txts2))):
ng1 = ngs1[idx1]
ng2 = ngs2[idx2]
if len(ng1) == 0 and len(ng2) == 0:
sim_mat[idx1, idx2] = sim_fn(collections.Counter(txts1[idx1]), collections.Counter(txts2[idx2]))
sim_mat[idx1, idx2] = sim_fn(ng1, ng2)
return sim_mat
def _get_high_similarity_comment_edit_texts(comments, edits, sim_threshold):
output_matches = []
t2s = []
for edit_idx, edit in enumerate(edits):
try:
output_text = " ".join(edit.get_added_tokens())
except RecursionError:
logger.error("Recursion error for edit %s", edit_idx)
output_text = ""
t2s.append(output_text)
sim_mat = _similarity_many_many_minl(comments, t2s, match_denom=False)
for cidx, rec in enumerate(comments):
# If there are multiple matches, take only the best; others are sometimes spurious
best = None
for eidx in range(len(t2s)):
if sim_mat[cidx, eidx] <= sim_threshold:
continue
edit = edits[eidx]
# We allow a little wiggle room for off-by-1's (could come from bad splits/parses),
# but it's unlikely to be correct if there were many non-consecutive matches
if (sorted(edit.target_idxs)[-1] - sorted(edit.target_idxs)[0]) >= (len(edit.target_idxs) * 2 - 1):
continue
if edits[eidx].is_full_deletion() or edits[eidx].is_identical() or len(edits[eidx].get_added_tokens()) < 5:
continue
if best is None or best[2] < sim_mat[cidx, eidx]:
best = (cidx, eidx, sim_mat[cidx, eidx])
if best is not None:
output_matches.append(best)
return output_matches
def get_high_precision_reply_based_alignments_for_doc(
pdf_pair_record,
review_replies,
sim_threshold,
min_reply_overlap_chars,
min_line_overlap_ratio,
s2orc_fetcher,
):
doc_id = pdf_pair_record["doc_id"]
s2orc1 = aries.util.s2orc.load_s2orc(pdf_pair_record["source_pdf_id"], s2orc_fetcher)
s2orc2 = aries.util.s2orc.load_s2orc(pdf_pair_record["target_pdf_id"], s2orc_fetcher)
if s2orc1 is None or s2orc2 is None or s2orc1["paper_id"] == s2orc2["paper_id"]:
return []
forum_replies = get_author_comment_replies_for_doc(
doc_id,
review_replies,
min_length=min_reply_overlap_chars,
min_line_overlap_ratio=min_line_overlap_ratio,
)
review_recs = [
{
"review_id": x.review_id,
"review_comment": x.match_text,
"reply": x.reply_text[x.next_line_span[0] : x.next_line_span[1]],
"full_match": x,
}
for x in forum_replies
]
# If we don't even have enough tokens to form a sentence, it's probably invalid
review_recs = [x for x in review_recs if len(x["review_comment"].split()) >= 4]
review_recs = deduplicate_by(review_recs, "review_comment")
aligns = make_full_aligns(s2orc1, s2orc2)
aug_review_comments = [x["reply"] for x in review_recs]
output_matches = []
for cidx, eidx, sim in _get_high_similarity_comment_edit_texts(aug_review_comments, aligns.paragraph_edits, sim_threshold):
edit = aligns.paragraph_edits[eidx]
output_matches.append(
{
"doc_id": doc_id,
"source_pdf_id": pdf_pair_record["source_pdf_id"],
"target_pdf_id": pdf_pair_record["target_pdf_id"],
"review_id": review_recs[cidx]["review_id"],
"edit": edit,
"review_comment": review_recs[cidx]["review_comment"],
"reply": review_recs[cidx]["reply"],
"forum_reply": review_recs[cidx]["full_match"],
"similarity": sim,
}
)
paper_edit_record = aligns.to_json()
paper_edit_record["doc_id"] = doc_id
review_comment_records = [
{
"comment_id": cidx,
"doc_id": doc_id,
"annotation": "synthetic",
"comment": x["review_comment"],
"comment_context": "",
"review_id": x["review_id"],
}
for cidx, x in enumerate(output_matches)
]
edit_label_records = [
{"doc_id": doc_id, "comment_id": cidx, "positive_edits": [x["edit"].edit_id], "negative_edits": [], "annotation": "synthetic"}
for cidx, x in enumerate(output_matches)
]
return paper_edit_record, review_comment_records, edit_label_records, output_matches
def augment_config(config):
config_defaults = {
"similarity_threshold": 0.26,
"min_reply_overlap_chars": 40,
"min_line_overlap_ratio": 0.9,
"seed": 42,
}
for k, v in config_defaults.items():
config[k] = config.get(k, v)
NEEDED_KEYS = ["s2orc_base_path", "output_dir", "split_ids_file", "split_name", "review_replies_file"]
missing_keys = [x for x in NEEDED_KEYS if x not in config]
if len(missing_keys) > 0:
raise ValueError("Missing config keys: %s" % missing_keys)
def main():
with open(sys.argv[1]) as f:
config = json.load(f)
augment_config(config)
init_logging(
logfile=os.path.join(config["output_dir"], "logging_output.log"),
level=logging.INFO,
)
with open(config["split_ids_file"]) as f:
pdf_pair_ids = json.load(f)[config["split_name"]]
review_replies = list(iter_jsonl_files([config["review_replies_file"]]))
review_replies_by_docid = index_by(review_replies, "forum")
paper_edit_records = []
review_comment_records = []
edit_label_records = []
full_match_records = []
pbar = tqdm.tqdm(pdf_pair_ids)
with aries.util.s2orc.S2orcFetcherSqlite(
config.get("s2orc_db_path", ":memory:"),
fallback_fetcher=aries.util.s2orc.S2orcFetcherFilesystem(config["s2orc_base_path"]),
update_db=False,
) as fetcher:
for pdf_pair in pbar:
if pdf_pair["doc_id"] not in review_replies_by_docid:
continue
per, rcr, elr, fmr = get_high_precision_reply_based_alignments_for_doc(
pdf_pair,
review_replies_by_docid[pdf_pair["doc_id"]],
sim_threshold=config["similarity_threshold"],
min_reply_overlap_chars=config["min_reply_overlap_chars"],
min_line_overlap_ratio=config["min_line_overlap_ratio"],
s2orc_fetcher=fetcher,
)
paper_edit_records.append(per)
review_comment_records.extend(rcr)
for elr_i in elr:
elr_i["split"] = config["split_name"]
edit_label_records.extend(elr)
full_match_records.extend(fmr)
pbar.set_description("n_results={}".format(len(edit_label_records)), refresh=False)
with open(os.path.join(config["output_dir"], "paper_edits.jsonl"), "w") as f:
for rec in paper_edit_records:
f.write(json.dumps(rec) + "\n")
with open(os.path.join(config["output_dir"], "review_comments.jsonl"), "w") as f:
for rec in review_comment_records:
f.write(json.dumps(rec) + "\n")
with open(os.path.join(config["output_dir"], "edit_labels.jsonl"), "w") as f:
for rec in edit_label_records:
f.write(json.dumps(rec) + "\n")
if __name__ == "__main__":
main()
| aries-master | scripts/generate_synthetic_data.py |
import collections
import itertools
import json
import logging
import os
import sys
import numpy as np
import sacrebleu
import torch
import tqdm
import transformers
import aries.util.data
import aries.util.s2orc
from aries.alignment.biencoder import BiencoderTransformerAligner
from aries.alignment.bm25 import BM25Aligner
from aries.alignment.cross_encoder import PairwiseTransformerAligner
from aries.alignment.doc_edits import DocEdits
from aries.alignment.eval import do_model_eval
from aries.alignment.gpt import GptChatAligner, GptChatFullPaperAligner
from aries.alignment.other import MultiStageAligner
from aries.alignment.precomputed import PrecomputedEditsAligner
from aries.util.data import index_by, iter_jsonl_files
from aries.util.logging import init_logging, pprint_metrics
logger = logging.getLogger(__name__)
try:
# Needed for SLED models
import sled
except ImportError:
sled = None
def _load_transformer(config, cls):
transformer_model = cls.from_pretrained(config["model_name_or_path"])
if torch.cuda.device_count() > 0:
transformer_model = transformer_model.to(torch.device("cuda"))
if config.get("model_adapter", None) is not None:
logger.info("initializing adapter: {}".format(config["model_adapter"]))
transformer_model.load_adapter(config["model_adapter"], source="hf", load_as="adapter", set_active=True)
logger.info(transformer_model.adapter_summary())
tokenizer = transformers.AutoTokenizer.from_pretrained(config["model_name_or_path"])
return transformer_model, tokenizer
def init_model_from_config(config):
model = None
if config["model_type"] == "cross_encoder":
transformer_model, tokenizer = _load_transformer(config, transformers.AutoModelForSequenceClassification)
model = PairwiseTransformerAligner(config, transformer_model, tokenizer)
elif config["model_type"] == "biencoder":
transformer_model, tokenizer = _load_transformer(config, transformers.AutoModel)
model = BiencoderTransformerAligner(config, transformer_model, tokenizer)
elif config["model_type"] == "gpt":
model = GptChatAligner(config)
elif config["model_type"] == "gpt_full_paper":
model = GptChatFullPaperAligner(config)
elif config["model_type"] == "bm25":
model = BM25Aligner(config)
elif config["model_type"] == "precomputed":
model = PrecomputedEditsAligner(config)
else:
raise ValueError("Unknown model type: {}".format(config["model_type"]))
return model
def train_eval_main(config, split_data):
models = []
# Initial models are treated as pre-processing filters and do not train
for model_conf in config["model_pipeline"][:-1]:
model_conf["output_dir"] = config["output_dir"]
model_conf["seed"] = config["seed"]
model = init_model_from_config(model_conf)
logger.info("model_cls: {}".format(str(model.__class__.__name__)))
models.append(model)
model_conf = config["model_pipeline"][-1]
model_conf["output_dir"] = config["output_dir"]
model_conf["seed"] = config["seed"]
model = init_model_from_config(model_conf)
logger.info("main model_cls: {}".format(str(model.__class__.__name__)))
model.train(split_data["train"], split_data["dev"])
models.append(model)
model = MultiStageAligner(config, models)
eval_splits = ["dev", "test"]
if config.get("do_final_evaluation_on_train", False):
eval_splits.append("train")
dev_threshold = None
for name in eval_splits:
recs = split_data[name]
metrics, all_results, by_review = do_model_eval(model, recs, custom_decision_threshold=dev_threshold, custom_threshold_name="devthresh")
if name == "dev":
dev_threshold = metrics.get("optimal_decision_threshold", None)
logger.info("Done. Writing output...")
with open(os.path.join(config["output_dir"], "{}_inferences.jsonl".format(name)), "w") as f:
for res in all_results:
f.write(json.dumps(res) + "\n")
logger.info("Final {} metrics:".format(name))
pprint_metrics(metrics, logger, name=name)
with open(os.path.join(config["output_dir"], "{}_metrics.json".format(name)), "w") as f:
if "bleu" in metrics and isinstance(metrics["bleu"], sacrebleu.metrics.bleu.BLEUScore):
metrics["bleu"] = metrics["bleu"].score
json.dump(metrics, f)
with open(os.path.join(config["output_dir"], "{}_inferences_by_review.jsonl".format(name)), "w") as f:
for rec in by_review.values():
f.write(json.dumps(rec) + "\n")
def make_revision_alignment_prediction_data(
config,
review_comments_by_doc,
paper_edits_by_doc,
edit_labels_file,
max_negatives,
negative_sample_method="same_doc",
hard_negative_ratio=0.0,
seed=None,
):
if seed is None:
seed = config["seed"]
edit_labels = list(iter_jsonl_files([edit_labels_file]))
edit_labels_by_doc = index_by(edit_labels, "doc_id")
all_split_edits = list(
itertools.chain(*[[(doc_id, x) for x in paper_edits_by_doc[doc_id].paragraph_edits] for doc_id in edit_labels_by_doc.keys()])
)
examples = []
rng = np.random.default_rng(seed)
for doc_id in edit_labels_by_doc.keys():
distractor_idxs = rng.choice(
len(all_split_edits),
size=min(len(all_split_edits), config["distractor_reservoir_size"]),
replace=False,
)
distractor_pool = [all_split_edits[i][1] for i in distractor_idxs if all_split_edits[i][0] != doc_id]
doc_examples = get_alignments_for_revision(
config,
rng,
doc_id,
review_comments_by_doc[doc_id],
paper_edits_by_doc[doc_id],
edit_labels=edit_labels_by_doc[doc_id],
max_negatives=max_negatives,
distractor_pool=distractor_pool,
negative_sample_method=negative_sample_method,
hard_negative_ratio=hard_negative_ratio,
)
examples.extend(doc_examples)
return examples
def _filter_edits_by_type(edit_list, keep_type, min_length=0):
newlist = None
if keep_type == "full_additions":
newlist = [edit for edit in edit_list if edit.is_full_addition()]
elif keep_type == "diffs":
newlist = [edit for edit in edit_list if not edit.is_identical()]
elif keep_type == "source_diffs":
newlist = [edit for edit in edit_list if (len(edit.source_idxs) != 0 and not edit.is_identical())]
else:
raise ValueError("Invalid candidate edit type {}".format(keep_type))
if min_length > 0:
newlist = [edit for edit in newlist if len(edit.get_source_text() + edit.get_target_text()) >= min_length]
return newlist
def get_alignments_for_revision(
config,
rng,
doc_id,
review_comments,
edits,
edit_labels,
max_negatives=999999,
distractor_pool=None,
negative_sample_method="same_doc",
hard_negative_ratio=0.0,
):
review_comments_by_id = index_by(review_comments, "comment_id", one_to_one=True)
examples = []
for record in edit_labels:
positives = [edits.by_id(x) for x in record["positive_edits"]]
positives = _filter_edits_by_type(positives, config["candidate_edit_type"], min_length=config["candidate_min_chars"])
pos_ids = set([x.edit_id for x in positives])
if negative_sample_method == "same_doc":
# Assume all non-positive the paras from the same doc are negatives (appropriate when positives are high-recall)
negatives = [x for idx, x in enumerate(edits.paragraph_edits) if x.edit_id not in pos_ids]
elif negative_sample_method == "other_docs":
# Only sample negatives from edits to other docs (appropriate when positives are low-recall)
if distractor_pool is None:
raise ValueError("Need distractor edits from other docs to use other_doc_edits negative sampling")
negatives = distractor_pool.copy()
negatives = _filter_edits_by_type(negatives, config["candidate_edit_type"], min_length=config["candidate_min_chars"])
rng.shuffle(negatives)
if len(negatives) <= max_negatives:
final_negatives = negatives
elif config["hard_negative_strategy"] == "none" or hard_negative_ratio == 0:
final_negatives = negatives
if hard_negative_ratio != 0:
logger.warning(
"hard_negative_ratio was {} but hard_negative_strategy is {}; no hard negatives will be used".format(
hard_negative_ratio, config["hard_negative_strategy"]
)
)
else:
hard_negatives = _get_hard_negatives(negatives, positives, strategy=config["hard_negative_strategy"])[:max_negatives]
n_hard = min(len(hard_negatives), int(max_negatives * hard_negative_ratio))
n_easy = max_negatives - n_hard
# note: Could potentially duplicate an example between easy and
# hard negatives since hard are just sorted; maybe try to dedup
final_negatives = negatives[:n_easy] + hard_negatives[:n_hard]
final_negatives = final_negatives[:max_negatives]
comment = review_comments_by_id[record["comment_id"]]
example = {
"source_pdf_id": edits.s2orc1["paper_id"],
"target_pdf_id": edits.s2orc2["paper_id"],
"doc_id": doc_id,
"comment_id": comment["comment_id"],
"review_comment": comment["comment"],
"context": comment["comment_context"],
"context_side": comment.get("context_side", None),
"positives": positives,
"negatives": final_negatives,
}
if example["context_side"] is None:
if example["context"] != "" and example["context"].strip().startswith("["):
example["context_side"] = "right"
else:
example["context_side"] = "left"
examples.append(example)
return examples
def _get_hard_negatives(negatives, positives, strategy="none"):
"""Returns the negatives sorted by hardness, and possibly also filtered by hardness"""
if len(positives) == 0 or strategy == "none":
return []
elif strategy == "length":
pos_lengths = [len(x.get_target_text()) for x in positives]
return sorted(negatives, key=lambda x: min(abs(len(x.get_target_text()) - pl) for pl in pos_lengths))
elif strategy == "aggregate_unigram_overlap":
all_pos_tokens = collections.Counter(itertools.chain(*[x.get_target_text().lower().split() for x in positives]))
return sorted(
negatives, key=lambda x: -aries.util.data.counter_jaccard(all_pos_tokens, collections.Counter(x.get_target_text().lower().split()))
)
raise ValueError("Unknown strategy {}".format(strategy))
def init_data(config):
review_comments = list(iter_jsonl_files([config["review_comments_file"]]))
review_comments_by_doc = index_by(review_comments, "doc_id")
paper_edits = iter_jsonl_files([config["paper_edits_file"]])
paper_edits_by_doc = index_by(paper_edits, "doc_id", one_to_one=True)
for doc_id, s2orc1, s2orc2 in aries.util.s2orc.iter_s2orc_pairs(config["s2orc_base_path"], [x[1] for x in sorted(paper_edits_by_doc.items())]):
paper_edits_by_doc[doc_id] = DocEdits.from_list(s2orc1, s2orc2, paper_edits_by_doc[doc_id]["edits"])
all_data = dict()
all_data["dev"] = make_revision_alignment_prediction_data(
config,
review_comments_by_doc,
paper_edits_by_doc,
config["dev_edit_labels_file"],
max_negatives=config.get("dev_max_negatives", config["max_negatives"]),
seed=config.get("dev_seed", config["seed"]),
negative_sample_method=config.get("dev_negative_sample_method", config["default_negative_sample_method"]),
)
logger.info("dev data size: {}".format(len(all_data["dev"])))
all_data["test"] = make_revision_alignment_prediction_data(
config,
review_comments_by_doc,
paper_edits_by_doc,
config["test_edit_labels_file"],
max_negatives=9999,
seed=config["seed"],
negative_sample_method=config.get("test_negative_sample_method", config["default_negative_sample_method"]),
)
logger.info("test data size: {}".format(len(all_data["test"])))
all_data["train"] = make_revision_alignment_prediction_data(
config,
review_comments_by_doc,
paper_edits_by_doc,
config["train_edit_labels_file"],
max_negatives=config["max_negatives"],
seed=config["seed"],
negative_sample_method=config.get("train_negative_sample_method", config["default_negative_sample_method"]),
)
logger.info("train data size: {}".format(len(all_data["train"])))
return all_data
def augment_config(config):
config_defaults = {
"max_negatives": 999999,
"candidate_edit_type": "diffs",
"candidate_min_chars": 100,
"prune_candidates": False,
"write_examples_on_eval": True,
"distractor_reservoir_size": 1000,
"default_negative_sample_method": "same_doc",
"train_hard_negative_ratio": 0.0,
"hard_negative_strategy": ("length" if config.get("train_hard_negative_ratio", 0.0) != 0 else "none"),
}
for k, v in config_defaults.items():
config[k] = config.get(k, v)
NEEDED_KEYS = [
"dev_edit_labels_file",
"test_edit_labels_file",
"train_edit_labels_file",
"model_pipeline",
"output_dir",
"paper_edits_file",
"review_comments_file",
"s2orc_base_path",
"seed",
]
missing_keys = [x for x in NEEDED_KEYS if x not in config]
if len(missing_keys) > 0:
raise ValueError("Missing config keys: %s" % missing_keys)
def main():
with open(sys.argv[1]) as f:
config = json.load(f)
augment_config(config)
os.makedirs(config["output_dir"], exist_ok=True)
init_logging(
logfile=os.path.join(config["output_dir"], "logging_output.log"),
level=logging.INFO,
)
transformers.set_seed(config["seed"])
all_data = init_data(config)
train_eval_main(config, all_data)
if __name__ == "__main__":
main()
| aries-master | scripts/train_revision_alignment.py |
import json
import logging
import os
import sys
import tqdm
import aries.util.data
import aries.util.s2orc
from aries.util.data import index_by, iter_jsonl_files
from aries.util.gpt3 import Gpt3CacheClient
from aries.util.logging import init_logging
logger = logging.getLogger(__name__)
def generate_edits_for_doc_comment(
doc_s2orc,
comment_record,
prompt_template,
gptcli,
):
prompt = make_gpt_prompt(doc_s2orc, comment_record, prompt_template, gptcli)
messages = [
{
"role": "system",
"content": "You are SciGPT, a research assistant that specializes in helping authors to improve their scientific papers. Follow the user's instructions carefully.",
},
{
"role": "user",
"content": prompt,
},
]
result = {
"source_pdf_id": doc_s2orc["paper_id"],
"comment_record": comment_record,
"openreview_base_pdf": "https://openreview.net/references/pdf?id={}".format(doc_s2orc["paper_id"]),
"gpt_edit": None,
}
try:
response = gptcli.chat_completion(
model="gpt-4-0314",
messages=messages,
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
result_text = response["choices"][0]["message"]["content"]
except Exception:
logging.exception("Error generating edit for doc_id={}".format(doc_s2orc["paper_id"]))
return result, None
parsed_result = parse_result_text(result_text)
result["gpt_edit"] = result_text[result_text.find("Location:") :]
return result, response
def make_gpt_prompt(doc_s2orc, comment_record, template, gptcli):
abstract = doc_s2orc["pdf_parse"]["abstract"][0]["text"]
body_text_blob = ""
prev_section = "unknown"
for idx, x in enumerate(doc_s2orc["pdf_parse"]["body_text"]):
secheader = ""
secname = x["section"] if x["section"] else "unknown"
if secname != prev_section:
secheader = "section: {}\n".format(secname)
prev_section = secname
newtext = "{}{}\nparagraph id: {}\n\n".format(secheader, x["text"], idx)
if gptcli.estimate_num_tokens(body_text_blob + newtext, model="gpt-4") < 6 * (2**10):
body_text_blob += newtext
comment_with_context = comment_record["comment"].strip()
if comment_record["comment_context"] != "":
comment_with_context += "\ncontext: {}".format(comment_record["comment_context"])
variables = {
"__abstract": abstract,
#'__comment': comment_record['comment'],
"__comment_with_context": comment_with_context,
"__body_chunk": body_text_blob,
#'__full_review': None,
}
s = template.format(**variables)
return s
def parse_result_text(result_text):
result = {"response": "", "edits": []}
lines = result_text.split("\n")
i = 0
while i < len(lines):
line = lines[i].strip()
if line.startswith("Response:"):
if result["response"] != "":
raise ValueError("Multiple 'Response' tags")
result["response"] = line[9:].strip()
i += 1
elif line.startswith("Location:"):
location = line[9:].strip()
i += 1
while i < len(lines) and not lines[i].strip().startswith("Edit:"):
location += "\n" + lines[i].strip()
i += 1
if i < len(lines) and lines[i].strip().startswith("Edit:"):
edit = lines[i][5:].strip()
i += 1
while i < len(lines) and not lines[i].strip().startswith("Location:"):
edit += "\n" + lines[i].strip()
i += 1
result["edits"].append({"location": location.strip(), "edit": edit.strip()})
else:
i += 1
return result
def augment_config(config):
config_defaults = {
"seed": 42,
}
for k, v in config_defaults.items():
config[k] = config.get(k, v)
NEEDED_KEYS = ["s2orc_base_path", "cache_db_path", "output_dir", "split_ids_file", "split_name", "review_comments_file"]
missing_keys = [x for x in NEEDED_KEYS if x not in config]
if len(missing_keys) > 0:
raise ValueError("Missing config keys: %s" % missing_keys)
def main():
with open(sys.argv[1]) as f:
config = json.load(f)
augment_config(config)
os.makedirs(config["output_dir"], exist_ok=True)
init_logging(
logfile=os.path.join(config["output_dir"], "logging_output.log"),
level=logging.INFO,
)
with open(config["split_ids_file"]) as f:
pdf_pair_ids = json.load(f)[config["split_name"]]
pair_ids_by_doc = index_by(pdf_pair_ids, "doc_id", one_to_one=True)
review_comments = [x for x in iter_jsonl_files([config["review_comments_file"]]) if x["doc_id"] in pair_ids_by_doc]
review_comments_by_docid = index_by(review_comments, "doc_id")
all_outputs = []
tt = 0
utt = 0
with aries.util.s2orc.S2orcFetcherSqlite(
config.get("s2orc_db_path", ":memory:"),
fallback_fetcher=aries.util.s2orc.S2orcFetcherFilesystem(config["s2orc_base_path"]),
update_db=False,
) as fetcher:
with Gpt3CacheClient(config["cache_db_path"]) as gptcli:
with tqdm.trange(sum(map(len, review_comments_by_docid.values()))) as pbar:
for doc_id, comment_records in review_comments_by_docid.items():
doc_s2orc = fetcher.get(pair_ids_by_doc[doc_id]["source_pdf_id"])
for idx, comment_record in enumerate(comment_records):
record, response = generate_edits_for_doc_comment(doc_s2orc, comment_record, config["prompt_template"], gptcli)
all_outputs.append(record)
if response is None:
raise ValueError("GPT returned no response")
tt += response["usage"]["total_tokens"]
utt += response["usage"]["uncached_total_tokens"]
pbar.set_description("tt={} utt={}, doc={}".format(tt, utt, doc_s2orc["paper_id"]))
pbar.update(1)
with open(os.path.join(config["output_dir"], "edits.jsonl"), "w") as f:
for record in all_outputs:
f.write(json.dumps(record) + "\n")
if __name__ == "__main__":
main()
| aries-master | scripts/generate_edits.py |
import unittest
import datastore
from typing import *
class TestDatastore(unittest.TestCase):
"""These tests require access to the public AI2 datastore."""
def test_download_file(self):
p = datastore.public.file("org.allenai.datastore", "DatastoreCli.jar", 1)
assert p.is_file()
def test_download_directory(self):
p = datastore.public.directory("org.allenai.datastore", "TestDirectory", 1)
assert p.is_dir()
assert (p / "1.gif").is_file()
| datastore-master | python/datastore_test.py |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='datastore',
version='3.2.1',
description='Store for immutable objects in S3',
long_description=long_description,
url='https://github.com/allenai/datastore',
author='Dirk Groeneveld',
author_email='[email protected]',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License',
'Programming Language :: Python :: 3'
],
py_modules=['datastore'],
test_suite="datastore_test",
install_requires=['botocore', 'boto3'],
project_urls={
'Bug Reports': 'https://github.com/allenai/datastore/issues',
'Funding': 'https://allenai.org',
'Source': 'https://github.com/allenai/datastore',
},
python_requires='>=3'
)
| datastore-master | python/setup.py |
#!/usr/bin/python3
import logging
from typing import *
import boto3
import platform
from pathlib import Path
import os
import time
import atexit
import shutil
import tempfile
import zipfile
import botocore
def _mkpath(p: Path) -> None:
p.mkdir(mode=0o755, parents=True, exist_ok=True)
#
# Cleanup stuff
#
_cleanup_paths: Set[Path] = set()
def _cleanup_cleanup_paths() -> None:
global _cleanup_paths
for path in _cleanup_paths:
assert path.is_absolute() # safety
shutil.rmtree(path)
_cleanup_paths = set()
atexit.register(_cleanup_cleanup_paths)
def remember_cleanup(p: Union[Path, str]) -> None:
global _cleanup_paths
if type(p) is str:
p = Path(p)
_cleanup_paths.add(p.absolute())
def forget_cleanup(p: Union[Path, str]) -> None:
global _cleanup_paths
if type(p) is str:
p = Path(p)
_cleanup_paths.remove(p)
#
# Datastore stuff
#
_s3 = boto3.resource('s3')
_s3_public = boto3.resource('s3')
from botocore.handlers import disable_signing
_s3_public.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
class Locator(NamedTuple):
group: str
name: str
version: int
directory: bool
def name_with_version(self) -> str:
if self.directory:
return f"{self.name}-d{self.version}.zip"
else:
try:
last_dot_index = self.name.rindex('.')
except ValueError:
return f"{self.name}-v{self.version}"
return f"{self.name[:last_dot_index]}-v{self.version}{self.name[last_dot_index:]}"
def s3_key(self):
return f"{self.group}/{self.name_with_version()}"
def local_cache_key(self):
r = self.s3_key()
if self.directory and r.endswith(".zip"):
r = r[:-(len(".zip"))]
return r
def flat_local_cache_key(self):
return self.local_cache_key().replace('/', '%')
class DsError(Exception):
def __init__(self, locator: Locator):
self.locator = locator
class DoesNotExistError(DsError):
pass
class AlreadyExistsError(DsError):
pass
class AccessDeniedError(DsError):
pass
class Datastore:
def __init__(self, name: str):
self.cache_dir = os.environ.get('AI2_DATASTORE_DIR')
if self.cache_dir is not None:
self.cache_dir = Path(self.cache_dir)
else:
self.cache_dir = Path.home()
if platform.system() == 'Darwin':
self.cache_dir = self.cache_dir / "Library" / "Caches" / "org.allenai.datastore"
elif platform.system() == 'Linux':
self.cache_dir = self.cache_dir / ".ai2" / "datastore"
else:
raise ValueError("Unsupported platform: %s" % platform.system())
self.temp_dir = self.cache_dir / "tmp" # temp has to be on the same filesystem as the cache itself, so that's why we put it here
self.cache_dir = self.cache_dir / name
_mkpath(self.temp_dir)
s3 = _s3
if name == "public":
s3 = _s3_public
self.bucket = s3.Bucket(f"{name}.store.dev.allenai.org")
self.logger = logging.getLogger(f"org.allenai.datastore.Datastore.{name}")
def _local_cache_path(self, locator: Locator) -> Path:
return self.cache_dir / locator.local_cache_key()
def _lockfile_path(self, locator: Locator) -> Path:
return self.cache_dir / (locator.local_cache_key() + ".lock")
def _wait_for_lockfile(self, lockfile_path: Path) -> None:
"""Wait until the given lockfile no longer exists."""
if not lockfile_path.exists():
return
# The first second is free.
start = time.time()
time.sleep(1)
if not lockfile_path.exists():
return
# After the first second, we print one message, then we stay silent for 10 minutes, at
# which time we print a message every minute.
def time_elapsed() -> float:
return time.time() - start
self.logger.info("Starting to wait for %s", lockfile_path)
next_message_time = time.time() + 16 * 60
while lockfile_path.exists():
if next_message_time - time.time() < 0:
self.logger.warning(
"Lockfile %s has been blocked for %.0f seconds",
lockfile_path,
time_elapsed())
next_message_time = time.time() + 60
time.sleep(1)
def exists(self, locator: Locator) -> bool:
try:
object = self.bucket.Object(locator.s3_key())
object.load()
except botocore.exceptions.ClientError as e:
e = e.response
if e is None:
raise
e = e.get('Error')
if e is None:
raise
e = e.get('Code')
if e is None:
raise
if e == '404':
return False
else:
raise
return True
def file(self, group: str, name: str, version: int) -> Path:
return self.path(Locator(group, name, version, False))
def directory(self, group: str, name: str, version: int) -> Path:
return self.path(Locator(group, name, version, True))
def path(self, locator: Locator) -> Path:
_mkpath(self.cache_dir)
lockfile_path = self._lockfile_path(locator)
_mkpath(lockfile_path.parent)
self._wait_for_lockfile(lockfile_path)
local_cache_path = self._local_cache_path(locator)
if locator.directory and local_cache_path.is_dir():
return local_cache_path
elif not locator.directory and local_cache_path.is_file():
return local_cache_path
try:
lockfile_path.touch(mode=0o644, exist_ok=False)
except FileExistsError:
return self.path(locator)
remember_cleanup(lockfile_path)
try:
# We're downloading to a temp file first. If we were downloading into the file directly,
# and we died half-way through the download, we'd leave half a file, and that's not
# good.
temp_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
prefix=f"ai2-datastore-{locator.flat_local_cache_key()}",
suffix=".tmp",
delete=False)
try:
remember_cleanup(temp_file.name)
try:
# TODO: retries
self.bucket.download_fileobj(locator.s3_key(), temp_file)
except botocore.exceptions.ClientError as e:
e = e.response
if e is None:
raise
e = e.get('Error')
if e is None:
raise
e = e.get('Code')
if e is None:
raise
if e == '404':
raise DoesNotExistError(locator)
else:
raise
temp_file.seek(0)
if locator.directory:
temp_zip_dir = tempfile.mkdtemp(
dir=self.temp_dir,
prefix=f"ai2-datastore-{locator.flat_local_cache_key()}")
remember_cleanup(temp_zip_dir)
with zipfile.ZipFile(temp_file) as zip_file:
zip_file.extractall(temp_zip_dir)
Path(temp_zip_dir).rename(local_cache_path)
forget_cleanup(temp_zip_dir)
else:
_mkpath(local_cache_path.parent)
temp_file.close()
Path(temp_file.name).rename(local_cache_path)
forget_cleanup(temp_file.name)
temp_file = None
finally:
if temp_file is not None:
temp_file.close()
os.remove(temp_file.name)
forget_cleanup(temp_file.name)
finally:
lockfile_path.unlink()
forget_cleanup(lockfile_path)
return local_cache_path
def publish_file(self, file: Path, group: str, name: str, version: int, overwrite: bool):
self.publish(file, Locator(group, name, version, False), overwrite)
def publish_directory(self, file: Path, group: str, name: str, version: int, overwrite: bool):
self.publish(file, Locator(group, name, version, True), overwrite)
def publish(self, file: Path, locator: Locator, overwrite: bool):
if not overwrite and self.exists(locator):
raise AlreadyExistsError(locator)
if locator.directory:
with tempfile.NamedTemporaryFile(
dir=self.temp_dir,
prefix=locator.flat_local_cache_key(),
suffix=".ai2-datastore.upload.zip"
) as zip_file_base:
with zipfile.ZipFile(zip_file_base, mode="w", compression=zipfile.ZIP_DEFLATED) as zip_file:
# This does not write out empty directories. I don't know if that's possible in
# zip files, but the Scala code suggests that it is.
for path in file.rglob("*"):
zip_file.write(str(path), arcname=str(path.relative_to(file)))
zip_file_base.flush()
self.bucket.upload_file(zip_file_base.name, locator.s3_key())
else:
self.bucket.upload_file(file, locator.s3_key())
# Make sure we have the file in the local cache right away.
lockfile_path = self._lockfile_path(locator)
_mkpath(lockfile_path.parent)
remember_cleanup(lockfile_path)
try:
shutil.copy(file, lockfile_path)
lockfile_path.rename(self._local_cache_path(locator))
finally:
if lockfile_path.exists():
lockfile_path.unlink()
forget_cleanup(lockfile_path)
public = Datastore("public")
private = Datastore("private")
import re
_datastore_file_with_extension = re.compile("^datastore://([^/]+)/([^/]+)/(.+)-v(\d+)\.(.*)$")
_datastore_file_without_extension = re.compile("^datastore://([^/]+)/([^/]+)/(.+)-v(\d+)$")
_datastore_directory = re.compile("^datastore://([^/]+)/([^/]+)/(.+)-d(\d+)(?:/(.*))?$")
_datastore_map = {
"public": public,
"private": private
}
def resolve_url(url_or_filename: str) -> str:
match = _datastore_file_with_extension.match(url_or_filename)
if match:
(ds, group, name, version, extension) = match.groups()
ds = _datastore_map[match.groups()[0]]
name = "%s.%s" % (name, extension)
version = int(version)
return str(ds.file(group, name, version))
match = _datastore_file_without_extension.match(url_or_filename)
if match:
(ds, group, name, version) = match.groups()
ds = _datastore_map[match.groups()[0]]
version = int(version)
return str(ds.file(group, name, version))
match = _datastore_directory.match(url_or_filename)
if match:
(ds, group, name, version, file_inside_directory) = match.groups()
ds = _datastore_map[match.groups()[0]]
version = int(version)
return str(ds.directory(group, name, version) / file_inside_directory)
return url_or_filename
| datastore-master | python/datastore.py |
import sys
import os
sys.path.append(os.path.abspath(os.path.join("..", "nla_semparse")))
from nla_semparse.nla_metric import NlaMetric
def test_metric_basic():
metric = NlaMetric()
metric(['2'], ['2'])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0}
metric.reset()
def test_metric_one_operation():
metric = NlaMetric()
metric(['(add 2 3)'], ['(add 2 3)'])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0}
metric.reset()
metric(['(add 2 3)'], ['5'])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(['(add 2 3)'], ['(add 1 4)'])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(['(add 2 3)'], ['(subtract 1 4)'])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
def test_metric_ill_formed_sequences():
metric = NlaMetric()
metric(['(add 2)'], ['(add 2 3)'])
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(['(add 2))'], ['(add 2 3)'])
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(['()'], ['(add 2 3)'])
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
def test_metric_real_cases():
predictions1 = ['(subtract (multiply (((((((((())))))',
'(subtract (add ((multiply (((()))))))))']
predictions2 = ['9', '9']
predictions3 = ['(subtract (multiply (((((((((())))))', '9']
targets = ['(add (add (multiply 5 2) (divide 2 7)) (add (add 7 7) (multiply 3 (multiply 6 6))))',
'(subtract (add 8 7) (subtract (add (add 6 (divide 7 7)) 7) (multiply (divide 5 4) 8)))']
metric = NlaMetric()
metric(predictions1, targets)
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(predictions2, targets)
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 0.5,
"sequence_accuracy": 0.0}
metric.reset()
metric(predictions3, targets)
assert metric.get_metric() == {"well_formedness": 0.5,
"denotation_accuracy": 0.5,
"sequence_accuracy": 0.0}
metric.reset()
metric(targets, targets)
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0}
metric.reset()
| allennlp-guide-examples-master | nla_semparse/tests/nla_metric_test.py |
allennlp-guide-examples-master | nla_semparse/nla_semparse/__init__.py |
|
from allennlp_semparse import DomainLanguage, predicate
class NlaLanguage(DomainLanguage):
def __init__(self):
super().__init__(
start_types={int},
allowed_constants={
"0": 0,
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9
}
)
@predicate
def add(self, num1: int, num2: int) -> int:
return num1 + num2
@predicate
def subtract(self, num1: int, num2: int) -> int:
return num1 - num2
@predicate
def multiply(self, num1: int, num2: int) -> int:
return num1 * num2
@predicate
def divide(self, num1: int, num2: int) -> int:
return num1 // num2 if num2 != 0 else 0
| allennlp-guide-examples-master | nla_semparse/nla_semparse/nla_language.py |
from typing import Dict, List, Optional
from overrides import overrides
from allennlp.training.metrics.metric import Metric
from allennlp_semparse.domain_languages.domain_language import ExecutionError
from .nla_language import NlaLanguage
@Metric.register('nla_metric')
class NlaMetric(Metric):
"""
Metric for evaluating prefix arithmetic sequences against targets, useful for Natural Language Arithmetic
parsing. This metric evaluates predicted sequences on three things: 1) whether the predicted metric is a
well-formed prefix arithmetic expression, 2) whether the predicted sequence and the target seqquence evaluate
to the same value, 3) whether the predicted sequence and the target sequence are identical.
"""
def __init__(self):
self._language = NlaLanguage()
self._num_well_formed = 0
self._num_correct_denotation = 0
self._num_same_sequence = 0
self._num_all_sequences = 0
@overrides
def __call__(self, predictions, targets) -> None:
for prediction, target in zip(predictions, targets):
if isinstance(prediction, list):
prediction = " ".join(prediction).replace("( ", "(").replace(" )", ")")
target = " ".join(target).replace("( ", "(").replace(" )", ")")
if isinstance(prediction, str) and not prediction.startswith('('):
prediction = f"({prediction})"
if isinstance(target, str) and not target.startswith('('):
target = f"({target})"
evaluated_prediction = None
evaluated_target = None
try:
evaluated_target = self._language.execute(target)
evaluated_prediction = self._language.execute(prediction)
except (TypeError, ExecutionError, IndexError):
pass
if isinstance(evaluated_prediction, int):
self._num_well_formed += 1
if evaluated_prediction == evaluated_target:
self._num_correct_denotation += 1
if prediction == target:
self._num_same_sequence += 1
self._num_all_sequences += 1
@overrides
def get_metric(self, reset: bool=False) -> Dict[str, float]:
if self._num_all_sequences == 0:
metrics = {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
else:
metrics = {"well_formedness": self._num_well_formed / self._num_all_sequences,
"denotation_accuracy": self._num_correct_denotation / self._num_all_sequences,
"sequence_accuracy": self._num_same_sequence / self._num_all_sequences}
if reset:
self.reset()
return metrics
@overrides
def reset(self):
self._num_well_formed = 0
self._num_same_sequence = 0
self._num_correct_denotation = 0
self._num_all_sequences = 0
| allennlp-guide-examples-master | nla_semparse/nla_semparse/nla_metric.py |
import sys
import os
import random
import math
import argparse
from typing import List, Dict
sys.path.append(os.path.abspath(os.path.join('..', 'nla_semparse')))
from nla_semparse.nla_language import NlaLanguage
class DataGenerator:
"""
Generator for data points for natural language arithmetic.
"""
def __init__(self):
self.language = NlaLanguage()
self.numbers = [
{'meaning': '0', 'translation': 'zero'},
{'meaning': '1', 'translation': 'one'},
{'meaning': '2', 'translation': 'two'},
{'meaning': '3', 'translation': 'three'},
{'meaning': '4', 'translation': 'four'},
{'meaning': '5', 'translation': 'five'},
{'meaning': '6', 'translation': 'six'},
{'meaning': '7', 'translation': 'seven'},
{'meaning': '8', 'translation': 'eight'},
{'meaning': '9', 'translation': 'nine'},
]
# The order below defines precedence (in ascending order).
self.operators = [
{'meaning': 'subtract', 'translation': 'minus'},
{'meaning': 'add', 'translation': 'plus'},
{'meaning': 'multiply', 'translation': 'times'},
{'meaning': 'divide', 'translation': 'over'},
]
def generate_expression(self,
num_operations: int,
allowed_operators: List[Dict] = None):
"""
Generates a single expression that contains the given number of operations.
"""
if num_operations == 0:
return random.sample(self.numbers, 1)[0]
# Expressions will be of the type (OP EXP1 EXP2)
if allowed_operators is None:
allowed_operators = self.operators
operator_index = random.randint(0, len(allowed_operators) - 1)
operator = allowed_operators[operator_index]
# Decide how many operators will be in each of EXP1 and EXP2
random_value = random.random()
num_operations_for_first = int(num_operations * random_value)
num_operations_for_second = num_operations - num_operations_for_first - 1
# The operations in the children will be the same as the operator already sampled, or one of a higher
# precedence.
first_argument = self.generate_expression(num_operations_for_first,
allowed_operators[operator_index:])
second_argument = self.generate_expression(num_operations_for_second,
allowed_operators[operator_index:])
meaning_representation_parts = [operator["meaning"],
first_argument["meaning"],
second_argument["meaning"]]
meaning_representation = '(' + " ".join(meaning_representation_parts) + ')'
return {"meaning": meaning_representation,
"translation": " ".join([first_argument["translation"], operator["translation"],
second_argument["translation"]]),
"denotation": self.language.execute(meaning_representation)}
def generate_data(self,
num_expressions: int,
min_num_operations: int = 1,
max_num_operations: int = 10,
split_data: bool = False,
train_proportion: float = 0.8,
test_proportion: float = 0.1):
"""
Returns ``num_expressions`` expressions, containing number of operations in the range
``(min_num_operations, max_num_operations)``. Optionally, you can also have the data split into
train, test, and dev sets, ans specify their proportions.
"""
data = []
while len(data) < num_expressions:
num_operations = random.randint(min_num_operations, max_num_operations)
try:
expression = self.generate_expression(num_operations)
data.append(expression)
except ZeroDivisionError:
pass
if not split_data:
return {"data": data}
test_size = math.ceil(test_proportion * num_expressions)
if train_proportion + test_proportion < 1.0:
dev_size = math.ceil((1 - (train_proportion + test_proportion)) * num_expressions)
else:
dev_size = 0
return {"test": data[:test_size],
"dev": data[test_size:test_size+dev_size],
"train": data[test_size+dev_size:]}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num-expressions', type=int, required=True, dest="num_expressions",
help="Number of expressions to generate")
parser.add_argument('--min-num-operations', type=int, dest="min_num_operations", default=1)
parser.add_argument('--max-num-operations', type=int, dest="max_num_operations", default=10)
parser.add_argument('--output', type=str, required=True,
help="""Location where output will be written. If splitting data, the name of the split
will be appended to the file name""")
parser.add_argument('--split-data', action="store_true", dest="split")
parser.add_argument('--train-proportion', type=float, dest="train_proportion",
help='How big should the train split be? (Between 0 and 1)')
parser.add_argument('--test-proportion', type=float, dest="test_proportion",
help="""How big should the test split be? (Between 0 and 1). Will also make a dev split
if train_proportion + test_proportion < 1""")
parser.add_argument('--no-meaning', action="store_true", dest="no_meaning",
help="Generated data will have denotations instead of meaning")
args = parser.parse_args()
if args.no_meaning:
raise NotImplementedError
data_generator = DataGenerator()
data = data_generator.generate_data(num_expressions=args.num_expressions,
min_num_operations=args.min_num_operations,
max_num_operations=args.max_num_operations,
split_data=args.split,
train_proportion=args.train_proportion,
test_proportion=args.test_proportion)
if args.split:
filename_parts = args.output.split(".")
assert len(filename_parts) == 2, "Cannot decide how to alter the file name. Expected just one ."
train_file_name = f"{filename_parts[0]}_train.{filename_parts[1]}"
dev_file_name = f"{filename_parts[0]}_dev.{filename_parts[1]}"
test_file_name = f"{filename_parts[0]}_test.{filename_parts[1]}"
with open(train_file_name, "w") as output_file:
for datum in data["train"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
with open(dev_file_name, "w") as output_file:
for datum in data["dev"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
with open(test_file_name, "w") as output_file:
for datum in data["test"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
else:
with open(args.output, "w") as output_file:
for datum in data["data"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
if __name__ == "__main__":
main()
| allennlp-guide-examples-master | nla_semparse/scripts/generate_data.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
import allennlp
from allennlp.common import JsonDict
from allennlp.data import DataLoader, DatasetReader, Instance
from allennlp.data import Vocabulary
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.predictors import Predictor
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.trainer import Trainer, GradientDescentTrainer
from allennlp.training.optimizers import AdamOptimizer
class ClassificationTsvReader(DatasetReader):
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None):
super().__init__(lazy)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[:self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields = {'text': text_field}
if label:
fields['label'] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as lines:
for line in lines:
text, sentiment = line.strip().split('\t')
yield self.text_to_instance(text, sentiment)
class SimpleClassifier(Model):
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
encoder: Seq2VecEncoder):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(self,
text: Dict[str, torch.Tensor],
label: torch.Tensor = None) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
output = {'probs': probs}
if label is not None:
self.accuracy(logits, label)
# Shape: (1,)
output['loss'] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(
reader: DatasetReader
) -> Tuple[Iterable[Instance], Iterable[Instance]]:
print("Reading data")
training_data = reader.read("quick_start/data/movie_review/train.tsv")
validation_data = reader.read("quick_start/data/movie_review/dev.tsv")
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)})
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
train_data: torch.utils.data.Dataset,
dev_data: torch.utils.data.Dataset,
) -> Tuple[allennlp.data.DataLoader, allennlp.data.DataLoader]:
# Note that DataLoader is imported from allennlp above, *not* torch.
# We need to get the allennlp-specific collate function, which is
# what actually does indexing and batching.
train_loader = DataLoader(train_data, batch_size=8, shuffle=True)
dev_loader = DataLoader(dev_data, batch_size=8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader
) -> Trainer:
parameters = [
[n, p]
for n, p in model.named_parameters() if p.requires_grad
]
optimizer = AdamOptimizer(parameters)
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def run_training_loop():
dataset_reader = build_dataset_reader()
# These are a subclass of pytorch Datasets, with some allennlp-specific
# functionality added.
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
# This is the allennlp-specific functionality in the Dataset object;
# we need to be able convert strings in the data to integers, and this
# is how we do it.
train_data.index_with(vocab)
dev_data.index_with(vocab)
# These are again a subclass of pytorch DataLoaders, with an
# allennlp-specific collate function, that runs our indexing and
# batching code.
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this course, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(
model,
serialization_dir,
train_loader,
dev_loader
)
trainer.train()
return model, dataset_reader
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
vocab = model.vocab
predictor = SentenceClassifierPredictor(model, dataset_reader)
output = predictor.predict('A good movie!')
print([(vocab.get_token_from_index(label_id, 'labels'), prob)
for label_id, prob in enumerate(output['probs'])])
output = predictor.predict('This was a monstrous waste of time.')
print([(vocab.get_token_from_index(label_id, 'labels'), prob)
for label_id, prob in enumerate(output['probs'])])
| allennlp-guide-examples-master | quick_start/predict.py |
allennlp-guide-examples-master | quick_start/__init__.py |
|
import tempfile
from typing import Dict, Iterable, List, Tuple
import allennlp
import torch
from allennlp.data import DataLoader, DatasetReader, Instance, Vocabulary
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.nn import util
from allennlp.training.trainer import GradientDescentTrainer, Trainer
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.metrics import CategoricalAccuracy
class ClassificationTsvReader(DatasetReader):
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None):
super().__init__(lazy)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as lines:
for line in lines:
text, sentiment = line.strip().split('\t')
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[:self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
fields = {'text': text_field, 'label': label_field}
yield Instance(fields)
class SimpleClassifier(Model):
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
encoder: Seq2VecEncoder):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
def forward(self,
text: Dict[str, torch.Tensor],
label: torch.Tensor) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
return {'loss': loss, 'probs': probs}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(
reader: DatasetReader
) -> Tuple[Iterable[Instance], Iterable[Instance]]:
print("Reading data")
training_data = reader.read("quick_start/data/movie_review/train.tsv")
validation_data = reader.read("quick_start/data/movie_review/dev.tsv")
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)})
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def run_training_loop():
dataset_reader = build_dataset_reader()
# These are a subclass of pytorch Datasets, with some allennlp-specific
# functionality added.
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
# This is the allennlp-specific functionality in the Dataset object;
# we need to be able convert strings in the data to integers, and this
# is how we do it.
train_data.index_with(vocab)
dev_data.index_with(vocab)
# These are again a subclass of pytorch DataLoaders, with an
# allennlp-specific collate function, that runs our indexing and
# batching code.
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this course, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(
model,
serialization_dir,
train_loader,
dev_loader
)
print("Starting training")
trainer.train()
print("Finished training")
# The other `build_*` methods are things we've seen before, so they are
# in the setup section above.
def build_data_loaders(
train_data: torch.utils.data.Dataset,
dev_data: torch.utils.data.Dataset,
) -> Tuple[allennlp.data.DataLoader, allennlp.data.DataLoader]:
# Note that DataLoader is imported from allennlp above, *not* torch.
# We need to get the allennlp-specific collate function, which is
# what actually does indexing and batching.
train_loader = DataLoader(train_data, batch_size=8, shuffle=True)
dev_loader = DataLoader(dev_data, batch_size=8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader
) -> Trainer:
parameters = [
[n, p]
for n, p in model.named_parameters() if p.requires_grad
]
optimizer = AdamOptimizer(parameters)
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
run_training_loop()
| allennlp-guide-examples-master | quick_start/train.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
import allennlp
from allennlp.data import DataLoader, DatasetReader, Instance, Vocabulary
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.trainer import Trainer, GradientDescentTrainer
from allennlp.training.util import evaluate
class ClassificationTsvReader(DatasetReader):
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None):
super().__init__(lazy)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as lines:
for line in lines:
text, sentiment = line.strip().split('\t')
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[:self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
fields = {'text': text_field, 'label': label_field}
yield Instance(fields)
class SimpleClassifier(Model):
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
encoder: Seq2VecEncoder):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(self,
text: Dict[str, torch.Tensor],
label: torch.Tensor) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
self.accuracy(logits, label)
output = {'loss': loss, 'probs': probs}
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(
reader: DatasetReader
) -> Tuple[Iterable[Instance], Iterable[Instance]]:
print("Reading data")
training_data = reader.read("quick_start/data/movie_review/train.tsv")
validation_data = reader.read("quick_start/data/movie_review/dev.tsv")
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)})
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
train_data: torch.utils.data.Dataset,
dev_data: torch.utils.data.Dataset,
) -> Tuple[allennlp.data.DataLoader, allennlp.data.DataLoader]:
# Note that DataLoader is imported from allennlp above, *not* torch.
# We need to get the allennlp-specific collate function, which is
# what actually does indexing and batching.
train_loader = DataLoader(train_data, batch_size=8, shuffle=True)
dev_loader = DataLoader(dev_data, batch_size=8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader
) -> Trainer:
parameters = [
[n, p]
for n, p in model.named_parameters() if p.requires_grad
]
optimizer = AdamOptimizer(parameters)
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def run_training_loop():
dataset_reader = build_dataset_reader()
# These are a subclass of pytorch Datasets, with some allennlp-specific
# functionality added.
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
# This is the allennlp-specific functionality in the Dataset object;
# we need to be able convert strings in the data to integers, and this
# is how we do it.
train_data.index_with(vocab)
dev_data.index_with(vocab)
# These are again a subclass of pytorch DataLoaders, with an
# allennlp-specific collate function, that runs our indexing and
# batching code.
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this course, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(
model,
serialization_dir,
train_loader,
dev_loader
)
trainer.train()
return model, dataset_reader
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
# Now we can evaluate the model on a new dataset.
test_data = dataset_reader.read('quick_start/data/movie_review/test.tsv')
test_data.index_with(model.vocab)
data_loader = DataLoader(test_data, batch_size=8)
results = evaluate(model, data_loader)
print(results)
| allennlp-guide-examples-master | quick_start/evaluate.py |
from .dataset_readers import *
from .models import *
from .predictors import *
| allennlp-guide-examples-master | quick_start/my_text_classifier/__init__.py |
from .classification_tsv import ClassificationTsvReader
| allennlp-guide-examples-master | quick_start/my_text_classifier/dataset_readers/__init__.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
@DatasetReader.register('classification-tsv')
class ClassificationTsvReader(DatasetReader):
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[:self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields = {'text': text_field}
if label:
fields['label'] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as lines:
for line in lines:
text, sentiment = line.strip().split('\t')
yield self.text_to_instance(text, sentiment)
| allennlp-guide-examples-master | quick_start/my_text_classifier/dataset_readers/classification_tsv.py |
from .sentence_classifier_predictor import SentenceClassifierPredictor
| allennlp-guide-examples-master | quick_start/my_text_classifier/predictors/__init__.py |
from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
from overrides import overrides
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
| allennlp-guide-examples-master | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py |
from .simple_classifier import SimpleClassifier
| allennlp-guide-examples-master | quick_start/my_text_classifier/models/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary
from allennlp.data import TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register('simple_classifier')
class SimpleClassifier(Model):
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
encoder: Seq2VecEncoder):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(self,
text: TextFieldTensors,
label: torch.Tensor = None) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
output = {'probs': probs}
if label is not None:
self.accuracy(logits, label)
output['loss'] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
| allennlp-guide-examples-master | quick_start/my_text_classifier/models/simple_classifier.py |
#!/usr/bin/python
import os
import setuptools
requirements_file = os.path.join(
os.path.dirname(__file__),
'requirements.txt')
requirements = open(requirements_file).read().split('\n')
requirements = [r for r in requirements if not '-e' in r]
setuptools.setup(
name='deepfigures-open',
version='0.0.1',
url='http://github.com/allenai/deepfigures-open',
packages=setuptools.find_packages(),
install_requires=requirements,
tests_require=[],
zip_safe=False,
test_suite='py.test',
entry_points='',
cffi_modules=['deepfigures/utils/stringmatch/stringmatch_builder.py:ffibuilder']
)
| deepfigures-open-master | setup.py |
"""Management commands for the deepfigures project.
``manage.py`` provides an interface to the scripts
automating development activities found in the `scripts`
directory.
See the ``scripts`` directory for examples.
"""
import logging
import sys
import click
from scripts import (
build,
detectfigures,
generatearxiv,
generatepubmed,
testunits)
logger = logging.getLogger(__name__)
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
@click.group(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.option(
'--verbose', '-v',
is_flag=True,
help='Turn on verbose logging for debugging purposes.')
@click.option(
'--log-file', '-l',
type=str,
help='Log to the provided file path instead of stdout.')
def manage(verbose, log_file):
"""A high-level interface to admin scripts for deepfigures."""
log_level = logging.DEBUG if verbose else logging.INFO
if log_file:
logging.basicConfig(
filename=log_file,
filemode='a',
format=LOG_FORMAT,
level=log_level)
else:
logging.basicConfig(
stream=sys.stdout,
format=LOG_FORMAT,
level=log_level)
subcommands = [
build.build,
detectfigures.detectfigures,
generatearxiv.generatearxiv,
generatepubmed.generatepubmed,
testunits.testunits
]
for subcommand in subcommands:
manage.add_command(subcommand)
if __name__ == '__main__':
manage()
| deepfigures-open-master | manage.py |
"""Build docker images for deepfigures.
See ``build.py --help`` for more information.
"""
import logging
import click
from deepfigures import settings
from scripts import execute
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
def build():
"""Build docker images for deepfigures."""
for _, docker_img in settings.DEEPFIGURES_IMAGES.items():
tag = docker_img['tag']
dockerfile_path = docker_img['dockerfile_path']
execute(
'docker build'
' --tag {tag}:{version}'
' --file {dockerfile_path} .'.format(
tag=tag,
version=settings.VERSION,
dockerfile_path=dockerfile_path),
logger)
if __name__ == '__main__':
build()
| deepfigures-open-master | scripts/build.py |
"""Detect the figures in a PDF."""
import logging
import os
import click
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'output_directory',
type=click.Path(file_okay=False))
@click.argument(
'pdf_path',
type=click.Path(exists=True, dir_okay=False))
def rundetection(output_directory, pdf_path):
"""Detect figures from the pdf at PDF_PATH.
Detect the figures from the pdf located at PDF_PATH and write the
detection results to the directory specified by OUTPUT_DIRECTORY.
"""
# import lazily to speed up response time for returning help text
from deepfigures.extraction import pipeline
figure_extractor = pipeline.FigureExtractionPipeline()
figure_extractor.extract(pdf_path, output_directory)
if __name__ == '__main__':
rundetection()
| deepfigures-open-master | scripts/rundetection.py |
"""Generate pubmed data for deepfigures.
See ``generatepubmed.py --help`` for more information.
"""
import logging
import click
from deepfigures import settings
from scripts import build, execute
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.option(
'--skip-dependencies', '-s',
is_flag=True,
help='skip running dependency commands.')
def generatepubmed(skip_dependencies=True):
"""Generate pubmed data for deepfigures.
Generate the pubmed data for deepfigures, which can involve pulling
the data from S3 (which the requestor has to pay for).
"""
if not skip_dependencies:
build.build.callback()
cpu_docker_img = settings.DEEPFIGURES_IMAGES['cpu']
execute(
'docker run'
' --rm'
' --env-file deepfigures-local.env'
' --volume {LOCAL_PUBMED_DISTANT_DATA_DIR}:{LOCAL_PUBMED_DISTANT_DATA_DIR}'
' {tag}:{version}'
' python3'
' /work/deepfigures/data_generation/pubmed_pipeline.py'.format(
tag=cpu_docker_img['tag'],
version=settings.VERSION,
LOCAL_PUBMED_DISTANT_DATA_DIR=settings.LOCAL_PUBMED_DISTANT_DATA_DIR),
logger,
raise_error=True)
if __name__ == '__main__':
generatepubmed()
| deepfigures-open-master | scripts/generatepubmed.py |
"""Run figure detection on a PDF.
See ``detectfigures.py --help`` for more information.
"""
import logging
import os
import click
from deepfigures import settings
from scripts import build, execute
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.option(
'--skip-dependencies', '-s',
is_flag=True,
help='skip running dependency commands.')
@click.argument(
'output_directory',
type=click.Path(
file_okay=False,
dir_okay=True,
resolve_path=True))
@click.argument(
'pdf_path',
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
resolve_path=True))
def detectfigures(
output_directory,
pdf_path,
skip_dependencies=False):
"""Run figure extraction on the PDF at PDF_PATH.
Run figure extraction on the PDF at PDF_PATH and write the results
to OUTPUT_DIRECTORY.
"""
if not skip_dependencies:
build.build.callback()
cpu_docker_img = settings.DEEPFIGURES_IMAGES['cpu']
pdf_directory, pdf_name = os.path.split(pdf_path)
internal_output_directory = '/work/host-output'
internal_pdf_directory = '/work/host-input'
internal_pdf_path = os.path.join(
internal_pdf_directory, pdf_name)
execute(
'docker run'
' --rm'
' --env-file deepfigures-local.env'
' --volume {output_directory}:{internal_output_directory}'
' --volume {pdf_directory}:{internal_pdf_directory}'
' {tag}:{version}'
' python3 /work/scripts/rundetection.py'
' {internal_output_directory}'
' {internal_pdf_path}'.format(
tag=cpu_docker_img['tag'],
version=settings.VERSION,
output_directory=output_directory,
internal_output_directory=internal_output_directory,
pdf_directory=pdf_directory,
internal_pdf_directory=internal_pdf_directory,
internal_pdf_path=internal_pdf_path),
logger,
raise_error=True)
if __name__ == '__main__':
detectfigures()
| deepfigures-open-master | scripts/detectfigures.py |
"""Scripts automating development tasks for deepfigures."""
import subprocess
def execute(
command,
logger,
quiet=False,
raise_error=True):
"""Execute ``command``.
Parameters
----------
command : str
The command to execute in the shell.
logger : logging.RootLogger
The logger to use for logging output about the command.
quiet : bool
Prevent the subprocess from printing output to stdout.
raise_error : bool
If ``True`` then raise an error when the command returns a
non-zero exit status, else log the error as a warning.
Returns
-------
None
"""
if quiet :
logger.info(
'Executing command and supressing stdout: {command}'.format(
command=command))
p = subprocess.Popen(
command,
stdout=subprocess.DEVNULL,
shell=True)
else:
logger.info(
'Executing: {command}'.format(
command=command))
p = subprocess.Popen(
command,
shell=True)
p.communicate()
returncode = p.returncode
if raise_error and returncode != 0:
raise subprocess.CalledProcessError(
returncode=returncode,
cmd=command)
elif not raise_error and returncode != 0:
logger.warning(
'Command: "{command}" exited with returncode'
' {returncode}'.format(
command=command,
returncode=returncode))
return None
| deepfigures-open-master | scripts/__init__.py |
"""Generate arxiv data for deepfigures.
Generate the arxiv data for deepfigures. This data generation process
requires pulling down all the arxiv source files from S3 which the
requester (person executing this script) must pay for.
See ``generatearxiv.py --help`` for more information.
"""
import logging
import click
from deepfigures import settings
from scripts import build, execute
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.option(
'--skip-dependencies', '-s',
is_flag=True,
help='skip running dependency commands.')
def generatearxiv(skip_dependencies=True):
"""Generate arxiv data for deepfigures.
Generate the arxiv data for deepfigures, which involves pulling the
data from S3 (which the requestor has to pay for).
"""
if not skip_dependencies:
build.build.callback()
cpu_docker_img = settings.DEEPFIGURES_IMAGES['cpu']
execute(
'docker run'
' --rm'
' --env-file deepfigures-local.env'
' --volume {ARXIV_DATA_TMP_DIR}:{ARXIV_DATA_TMP_DIR}'
' --volume {ARXIV_DATA_OUTPUT_DIR}:{ARXIV_DATA_OUTPUT_DIR}'
' {tag}:{version}'
' python3'
' /work/deepfigures/data_generation/arxiv_pipeline.py'.format(
tag=cpu_docker_img['tag'],
version=settings.VERSION,
ARXIV_DATA_TMP_DIR=settings.ARXIV_DATA_TMP_DIR,
ARXIV_DATA_OUTPUT_DIR=settings.ARXIV_DATA_OUTPUT_DIR),
logger,
raise_error=True)
if __name__ == '__main__':
generatearxiv()
| deepfigures-open-master | scripts/generatearxiv.py |
"""Run tests for deepfigures."""
import logging
import click
from scripts import execute
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
def runtests():
"""Run tests for deepfigures."""
# init logging
logger.setLevel(logging.INFO)
logging.basicConfig()
logger.info('Running tests for deepfigures.')
execute(
'pytest -n auto /work/deepfigures',
logger)
if __name__ == '__main__':
runtests()
| deepfigures-open-master | scripts/runtests.py |
"""Run unit tests for deepfigures.
Run unit tests for deepfigures locally in a docker container, building
the required docker images before hand.
See ``testunits.py --help`` for more information.
"""
import logging
import click
from deepfigures import settings
from scripts import build, execute
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.option(
'--skip-dependencies', '-s',
is_flag=True,
help='skip running dependency commands.')
def testunits(skip_dependencies=False):
"""Run unit tests for deepfigures."""
if not skip_dependencies:
build.build.callback()
cpu_docker_img = settings.DEEPFIGURES_IMAGES['cpu']
execute(
'docker run'
' --rm'
' --env-file deepfigures-local.env'
' {tag}:{version}'
' python3 /work/scripts/runtests.py'.format(
tag=cpu_docker_img['tag'],
version=settings.VERSION),
logger,
raise_error=True)
if __name__ == '__main__':
testunits()
| deepfigures-open-master | scripts/testunits.py |
deepfigures-open-master | deepfigures/__init__.py |
|
"""Constants and settings for deepfigures."""
import logging
import os
logger = logging.getLogger(__name__)
# path to the deepfigures project root
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
# version number for the current release
VERSION = '0.0.1'
# descriptions of the docker images deepfigures builds
DEEPFIGURES_IMAGES = {
'cpu': {
'tag': 'deepfigures-cpu',
'dockerfile_path': os.path.join(BASE_DIR, 'dockerfiles/cpu/Dockerfile')
},
'gpu': {
'tag': 'deepfigures-gpu',
'dockerfile_path': os.path.join(BASE_DIR, 'dockerfiles/gpu/Dockerfile')
}
}
# path to the directory containing all the project-level test data.
TEST_DATA_DIR = os.path.join(BASE_DIR, 'tests/data')
# settings for PDFRenderers
DEFAULT_INFERENCE_DPI = 100
DEFAULT_CROPPED_IMG_DPI = 200
BACKGROUND_COLOR = 255
# weights for the model
TENSORBOX_MODEL = {
'save_dir': os.path.join(BASE_DIR, 'weights/'),
'iteration': 500000
}
# paths to binary dependencies
PDFFIGURES_JAR_NAME = 'pdffigures2-assembly-0.0.12-SNAPSHOT.jar'
PDFFIGURES_JAR_PATH = os.path.join(
BASE_DIR,
'bin/',
PDFFIGURES_JAR_NAME)
# PDF Rendering backend settings
DEEPFIGURES_PDF_RENDERER = 'deepfigures.extraction.renderers.GhostScriptRenderer'
# settings for data generation
# The location to temporarily store arxiv source data
ARXIV_DATA_TMP_DIR = ''
# The location to store the final output labels
ARXIV_DATA_OUTPUT_DIR = ''
# The location of the PMC open access data
PUBMED_INPUT_DIR = ''
# A directory for storing intermediate results
PUBMED_INTERMEDIATE_DIR = ''
# A directory for storing the output pubmed data
PUBMED_DISTANT_DATA_DIR = ''
# a local directory for storing the output data
LOCAL_PUBMED_DISTANT_DATA_DIR = ''
| deepfigures-open-master | deepfigures/settings.py |
"""Miscellaneous utilities."""
import hashlib
def read_chunks(input_path, block_size):
"""Iterate over ``block_size`` chunks of file at ``input_path``.
:param str input_path: the path to the input file to iterate over.
:param int block_size: the size of the chunks to return at each
iteration.
:yields: a binary chunk of the file at ``input_path`` of size
``block_size``.
"""
with open(input_path, 'rb') as f_in:
while True:
chunk = f_in.read(block_size)
if chunk:
yield chunk
else:
return
def hash_out_of_core(hash_func, input_path):
"""Return hexdigest of file at ``input_path`` using ``hash_func``.
Hash the file at ``input_path`` using ``hash_func`` in an
out-of-core way, allowing hashing of arbitrarily large files, and
then return the hexdigest.
:param _hashlib.HASH hash_func: a hashing function from hashlib such
as sha1 or md5.
:param str input_path: path to the input file.
:returns: the hexdigest of the file at ``input_path`` hashed using
``hash_func``.
Example
-------
To use SHA256 to compute the hash of a file out of core:
hash_out_of_core(hashlib.sha256, '/path/to/file')
"""
hf = hash_func()
for chunk in read_chunks(input_path, 256 * (128 * hf.block_size)):
hf.update(chunk)
return hf.hexdigest()
| deepfigures-open-master | deepfigures/utils/misc.py |
"""Classes for serializing data to json."""
import typing
import traitlets
JsonData = typing.Union[list, dict, str, int, float]
class JsonSerializable(traitlets.HasTraits):
def to_dict(self) -> dict:
"""Recursively convert objects to dicts to allow json serialization."""
return {
JsonSerializable.serialize(k): JsonSerializable.serialize(v)
for (k, v) in self._trait_values.items()
}
@staticmethod
def serialize(obj: typing.Union['JsonSerializable', JsonData]):
if isinstance(obj, JsonSerializable):
return obj.to_dict()
elif isinstance(obj, list):
return [JsonSerializable.serialize(v) for v in obj]
elif isinstance(obj, dict):
res_dict = dict()
for (key, value) in obj.items():
assert type(key) == str
res_dict[key] = JsonSerializable.serialize(value)
return res_dict
else:
return obj
@classmethod
def from_dict(cls, json_data: dict):
assert (type(json_data) == dict)
args = {}
for (k, v) in cls.class_traits().items():
args[k] = JsonSerializable.deserialize(v, json_data[k])
return cls(**args)
@staticmethod
def deserialize(target_trait: traitlets.TraitType, json_data: JsonData):
"""
N.B. Using this function on complex objects is not advised; prefer to use an explicit serialization scheme.
"""
# Note: calling importlib.reload on this file breaks issubclass (http://stackoverflow.com/a/11461574/6174778)
if isinstance(target_trait, traitlets.Instance
) and issubclass(target_trait.klass, JsonSerializable):
return target_trait.klass.from_dict(json_data)
elif isinstance(target_trait, traitlets.List):
assert isinstance(json_data, list)
return [
JsonSerializable.deserialize(target_trait._trait, element) for element in json_data
]
elif isinstance(target_trait, traitlets.Dict):
# Assume all dictionary keys are strings
assert isinstance(json_data, dict)
res_dict = dict()
for (key, value) in json_data.items():
assert type(key) == str
res_dict[key] = JsonSerializable.deserialize(target_trait._trait, value)
return res_dict
else:
return json_data
def __repr__(self):
traits_list = ['%s=%s' % (k, repr(v)) for (k, v) in self._trait_values.items()]
return type(self).__name__ + '(' + ', '.join(traits_list) + ')'
| deepfigures-open-master | deepfigures/utils/config.py |
deepfigures-open-master | deepfigures/utils/__init__.py |
|
import errno
import io
import json
import logging
import os
import pickle
import string
import random
import tarfile
import typing
import tempfile
import hashlib
import subprocess
from os.path import abspath, dirname, join
from gzip import GzipFile
import arrow
import boto3
ROOT = abspath(dirname(dirname(dirname(__file__))))
class S3FileNotFoundError(FileNotFoundError):
pass
def _expand(filename):
return os.path.expanduser(filename)
def _is_okay_cache_dir(name):
if os.path.exists(name) or os.system('mkdir -p %s' % name) == 0:
return name
def _cache_dir():
dirs = [
'/tmp/deepfigures-cache/',
]
for name in dirs:
if _is_okay_cache_dir(name):
logging.info('Using %s for caching', name)
return name
assert False, 'Failed to find suitable cache directory'
def _s3_key(filename):
s3 = boto3.resource('s3')
bucket_name, key_name = filename[5:].split('/', 1)
key = s3.Object(bucket_name, key_name)
try:
key.last_modified
except:
return None
return key
def last_modified(filename):
if filename.startswith('s3://'):
return S3File.last_modified(filename)
else:
if os.path.exists(filename):
return arrow.get(os.path.getmtime(filename))
else:
return None
class StreamingS3File(object):
def __init__(self, name, mode, encoding):
assert 'w' not in mode and 'a' not in mode, 'Streaming writes not supported.'
key = _s3_key(name)
if key is None:
raise FileNotFoundError(name)
streaming_file = key.get()['Body']
def _readinto(buf):
bytes_read = streaming_file.read(len(buf))
buf[:len(bytes_read)] = bytes_read
return len(bytes_read)
streaming_file.readinto = _readinto
streaming_file.readable = lambda: True
streaming_file.writable = lambda: False
streaming_file.seekable = lambda: False
streaming_file.closeable = lambda: False
streaming_file.closed = False
streaming_file.flush = lambda: 0
self._file = io.BufferedReader(streaming_file, buffer_size=512000)
if encoding is not None or 't' in mode:
# The S3 file interface from boto doesn't conform to the standard python file interface.
# Add dummy methods to make the text wrapper happy.
self._file = io.TextIOWrapper(self._file, encoding=encoding)
def readable(self):
return True
def writeable(self):
return False
def seekable(self):
return False
def closeable(self):
return False
@property
def closed(self):
return False
def flush(self):
return 0
def read(self, *args):
return self._file.read(*args)
def readline(self):
return self._file.readline()
def close(self):
return self._file.close()
def seekable(self):
return False
def __enter__(self):
return self._file
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
def cache_file(name):
if not name.startswith('s3://'):
return name
s3_last_modified = last_modified(name)
cleaned_name = name[5:].replace('/', '_')
target_filename = os.path.join(_cache_dir(), cleaned_name)
if os.path.exists(target_filename):
if s3_last_modified is None or last_modified(
target_filename
) >= s3_last_modified:
return target_filename
logging.info('Cache file for %s does not exist, copying.', name)
parse = _parse_s3_location(name)
retcode = subprocess.call(
'aws s3api get-object --bucket "%s" --key "%s" "%s.tmp.%d" --request-payer=requester'
% (parse['bucket'], parse['key'], target_filename, os.getpid()),
stdout=subprocess.DEVNULL,
shell=True
)
if retcode != 0:
raise FileNotFoundError('Failed to copy %s' % name)
assert os.system(
'mv "%s.tmp.%d" "%s"' %
(target_filename, os.getpid(), target_filename)
) == 0
assert os.system('chmod 777 "%s"' % (target_filename)) == 0
return target_filename
def s3_location_to_object(path):
s3 = boto3.resource('s3')
parse = _parse_s3_location(path)
bucket_name = parse['bucket']
key = parse['key']
return s3.Object(bucket_name, key)
def _parse_s3_location(path):
logging.debug('Parsing path %s' % path)
if not path.startswith('s3://'):
raise ValueError('s3 location must start with s3://')
path = path[5:]
parts = path.split('/', 1)
if len(parts) == 1:
bucket = parts[0]
key = None
else:
bucket, key = parts
return {'bucket': bucket, 'key': key}
# Yield S3 objects with a given prefix.
def iterate_s3_objects(path, max_files=None):
import boto3
# Check if path exists on S3
if path.startswith('s3://'):
parsed_location = _parse_s3_location(path)
bucket = parsed_location['bucket']
folder_key = parsed_location['key']
s3 = boto3.resource('s3')
client = boto3.client('s3')
s3_bucket = s3.Bucket(bucket)
if max_files:
s3_obj_iterator = \
s3_bucket.objects.filter(Prefix=folder_key, RequestPayer='requester').limit(max_files)
else:
s3_obj_iterator = s3_bucket.objects.filter(
Prefix=folder_key, RequestPayer='requester'
).all()
yield from s3_obj_iterator
# Yield s3 filenames with a given prefix.
def iterate_s3_files(path_prefix, max_files=None):
# return the full name of each file.
for s3_object in iterate_s3_objects(path_prefix, max_files):
yield 's3://{}/{}'.format(s3_object.bucket_name, s3_object.key)
# Deprecated. For backward compatibility.
def iterate_s3(path):
yield from iterate_s3_objects(path)
def iterate_s3_files(path_prefix, max_files=None):
"""Yield s3 filenames with a given prefix."""
# return the full name of each file.
for s3_object in iterate_s3_objects(path_prefix, max_files):
yield 's3://{}/{}'.format(s3_object.bucket_name, s3_object.key)
def iterate_files(path_prefix: str) -> typing.Iterable[str]:
"""Yield filenames with a given prefix."""
if path_prefix.startswith('s3://'):
yield from iterate_s3_files(path_prefix)
else:
for (root, directories, filenames) in os.walk(path_prefix):
for filename in filenames:
yield os.path.join(root, filename)
class S3File(object):
def __init__(self, name, mode, encoding):
self.name = name
self.mode = mode
self.encoding = encoding
if 'r' in mode:
self._local_name = self._cache()
self._local_file = io.open(self._local_name, mode)
else:
prefix = self.name.split('//')[1].replace('/', '_')
self._local_name = join(_cache_dir(), '.tmp_' + prefix)
self._local_file = io.open(
self._local_name, mode=mode, encoding=encoding
)
@staticmethod
def last_modified(filename):
key = _s3_key(filename)
if key is None:
return None
return arrow.get(key.last_modified)
def flush(self):
logging.info('Syncing "%s" to S3' % self.name)
self._local_file.flush()
assert os.system(
'aws s3 cp "%s" "%s"' % (self._local_name, self.name)
) == 0
def write(self, *args):
return self._local_file.write(*args)
def read(self, *args):
return self._local_file.read(*args)
def read_lines(self, *args):
return self._local_file.read(*args)
def _cache(self):
return cache_file(self.name)
def seekable(self):
return True
def close(self):
if 'w' in self.mode or 'a' in self.mode:
self.flush()
os.unlink(self._local_name)
else:
self._local_file.close()
def __enter__(self):
return self._local_file
def __exit__(self, type, value, traceback):
self.close()
def _gzip_file(fileobj, mode, encoding):
def _fix_fileobj(gzip_file):
"""
Terrible hack to ensure that GzipFile actually calls close on the fileobj passed into it.
"""
gzip_file.myfileobj = gzip_file.fileobj
return gzip_file
if 't' in mode or encoding is not None:
mode = mode.replace('t', '')
f = _fix_fileobj(GzipFile(fileobj=fileobj, mode=mode))
return io.TextIOWrapper(f, encoding=encoding)
else:
f = _fix_fileobj(GzipFile(fileobj=fileobj, mode=mode))
if 'r' in mode:
return io.BufferedReader(f)
else:
return io.BufferedWriter(f)
def _bzip_file(fileobj, mode, encoding):
import bz2
if 't' in mode:
bz2_file = bz2.BZ2File(fileobj, mode=mode.replace('t', 'b'))
bz2_file._closefp = True
return io.TextIOWrapper(bz2_file, encoding)
else:
bz2_file = bz2.BZ2File(fileobj, mode=mode)
bz2_file._closefp = True
return bz2_file
def slurp(filename, mode='r', encoding=None):
"""Read all content from `filename`"""
with open(
_expand(filename), mode=mode, encoding=encoding, streaming=True
) as f:
return f.read()
def read_json(filename):
"""Read JSON from `filename`."""
with open(_expand(filename), 'rt') as f:
return json.load(f)
def write_json(filename, obj, indent=None, sort_keys=None):
"""Write JSON to `filename`"""
with open(_expand(filename), 'w') as f:
json.dump(obj, f, indent=indent, sort_keys=sort_keys)
def write_json_atomic(filename, obj, indent=None, sort_keys=None):
"""Write JSON to `filename` such that `filename` never exists in a partially written state."""
filename = _expand(filename)
if filename.startswith('s3://'):
write_json(
filename, obj, indent, sort_keys
) # s3 operations are already atomic
with tempfile.NamedTemporaryFile(
'w', dir=os.path.dirname(filename), delete=False
) as f:
json.dump(obj, f, indent=indent, sort_keys=sort_keys)
tempname = f.name
os.rename(tempname, filename)
def read_pickle(filename, streaming=False):
"""Read pickled data from `name`."""
with open(_expand(filename), 'rb', streaming=streaming) as f:
return pickle.load(f)
def write_pickle(filename, obj):
with open(_expand(filename), 'wb') as f:
pickle.dump(obj, f, -1)
def write_file(filename, value: typing.Union[bytes, str], mode='w'):
with open(_expand(filename), mode) as f:
f.write(value)
def write_file_if_not_exists(
filename, value: typing.Union[bytes, str], mode='w'
):
if os.path.exists(_expand(filename)):
return
write_file(filename, value, mode)
def write_file_atomic(
filename: str, value: typing.Union[bytes, str], mode='w'
) -> None:
if filename.startswith('s3://'):
write_file(filename, value, mode)
else:
with tempfile.NamedTemporaryFile(
'w', dir=os.path.dirname(filename), delete=False
) as f:
f.write(value)
tempname = f.name
os.rename(tempname, filename)
def read_lines(filename, comment=None, streaming=False):
"""
Read all non-blank lines from `filename`.
Skip any lines that begin the comment character.
:param filename: Filename to read from.
:param comment: If defined, ignore lines starting with this text.
:return:
"""
with open(_expand(filename), 'rt', streaming=streaming) as f:
for l in f:
if comment and not l.startswith(comment):
continue
yield l.strip()
def read_json_lines(filename, streaming=False):
for line in read_lines(filename, streaming=streaming):
yield json.loads(line)
def exists(filename):
return last_modified(filename) is not None
def open(filename, mode='rb', encoding=None, **kw):
"""
Open `filename` for reading. If filename is compressed with a known format,
it will be transparently decompressed.
Optional keyword args:
`streaming`: if true, remote files will be streamed directly; no local cache
will be generated.
`no_zip`: do not try to automatically decompress the input file
"""
if filename.endswith('.gz') and 'no_decompress' not in kw:
if 'r' in mode:
target_mode = 'rb'
else:
target_mode = 'wb'
target = open(
filename,
no_decompress=True,
mode=target_mode,
encoding=None,
**kw
)
return _gzip_file(target, mode, encoding)
if filename.endswith('.bz2') and 'no_decompress' not in kw:
if 'r' in mode:
target_mode = 'rb'
else:
target_mode = 'wb'
target = open(
filename,
no_decompress=True,
mode=target_mode,
encoding=None,
**kw
)
return _bzip_file(target, mode, encoding)
if filename.startswith('s3://'):
if kw.get('streaming', False):
return StreamingS3File(filename, mode, encoding)
else:
return S3File(filename, mode, encoding)
import io
return io.open(filename, mode, encoding=encoding)
def safe_makedirs(dir_path: str) -> None:
"""Create a directory if it doesn't already exist, avoiding race conditions if called from multiple processes."""
dir_path = _expand(dir_path)
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
raise
def copy(src: str, dst: str) -> None:
"""Copy src to dst."""
src = _expand(src)
dst = _expand(dst)
with open(src, 'rb') as src_f, open(dst, 'wb') as dst_f:
while True:
chunk = src_f.read(4096)
if chunk is None or len(chunk) == 0:
break
dst_f.write(chunk)
def extract_tarfile_from_bytes(b: bytes, dst: str, mode='r') -> None:
seekable_f = io.BytesIO(b)
safe_makedirs(os.path.dirname(dst))
with tarfile.open(fileobj=seekable_f, mode=mode) as t:
t.extractall(path=dst)
def extract_tarfile(src: str, dst: str, streaming=True) -> None:
"""Extract a tarfile at 'src' to 'dst'."""
src = _expand(src)
dst = _expand(dst)
with open(src, mode='rb', streaming=streaming) as f:
b = f.read()
extract_tarfile_from_bytes(b, dst)
def compute_sha1(filename: str, buf_size=int(1e6)) -> str:
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(buf_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
| deepfigures-open-master | deepfigures/utils/file_util.py |
"""Utilities for tests with deepfigures."""
import json
import logging
logger = logging.getLogger(__name__)
def test_deepfigures_json(
self,
expected_json,
actual_json):
"""Run tests comparing two deepfigures JSON files.
Compare two json files outputted from deepfigures and verify that
they are sufficiently similar, this includes comparing the general
structure of the files as well as specific values like the figure
captions, intersection over union for the bounding boxes, etc.
:param unittest.TestCase self: the TestCase to use for running the
comparsions.
:param str expected_json: a file path string to the
expected / baseline deepfigures JSON on disk.
:param str actual_json: a file path string to the
actual / to be tested deepfigures JSON on disk.
:returns: None
"""
with open(expected_json, 'r') as expected_file:
expected = json.load(expected_file)
with open(actual_json, 'r') as actual_file:
actual = json.load(actual_file)
# make sure keys are the same
self.assertEqual(
expected.keys(),
actual.keys())
# compare top level attributes
self.assertEqual(
expected['dpi'],
actual['dpi'])
self.assertEqual(
expected['error'],
actual['error'])
self.assertEqual(
len(expected['figures']),
len(actual['figures']))
# compare generated figures
for expected_figure, actual_figure in zip(
expected['figures'],
actual['figures']):
exact_match_attrs = [
'caption_text',
'dpi',
'figure_type',
'name',
'page',
'page_height',
'page_width'
]
for attr in exact_match_attrs:
self.assertEqual(
expected_figure[attr],
actual_figure[attr])
bounding_box_attrs = [
'caption_boundary',
'figure_boundary'
]
for attr in bounding_box_attrs:
intersection = {
'x1': max(expected_figure[attr]['x1'], actual_figure[attr]['x1']),
'x2': min(expected_figure[attr]['x2'], actual_figure[attr]['x2']),
'y1': max(expected_figure[attr]['y1'], actual_figure[attr]['y1']),
'y2': min(expected_figure[attr]['y2'], actual_figure[attr]['y2'])
}
# check that the boxes actually do overlap
self.assertLess(
intersection['x1'],
intersection['x2'],
msg="expected and actual box for {attr} in {figname}"
"don't overlap".format(attr=attr, figname=expected_figure['name']))
self.assertLess(
intersection['y1'],
intersection['y2'],
msg="expected and actual box for {attr} in {figname}"
"don't overlap".format(attr=attr, figname=expected_figure['name']))
union = {
'x1': min(expected_figure[attr]['x1'], actual_figure[attr]['x1']),
'x2': max(expected_figure[attr]['x2'], actual_figure[attr]['x2']),
'y1': min(expected_figure[attr]['y1'], actual_figure[attr]['y1']),
'y2': max(expected_figure[attr]['y2'], actual_figure[attr]['y2'])
}
i_area = (
(intersection['x2'] - intersection['x1']) *
(intersection['y2'] - intersection['y1'])
)
u_area = (
(union['x2'] - union['x1']) *
(union['y2'] - union['y1'])
)
iou = i_area / u_area
self.assertGreater(
iou,
0.8,
msg="intersection over union for {attr} on {figname} has"
"dropped below acceptable thresholds.".format(
attr=attr,
figname=expected_figure['name']))
| deepfigures-open-master | deepfigures/utils/test.py |
import os
import typing
import numpy as np
from scipy import misc
from deepfigures.utils import file_util
import logging
class FileTooLargeError(Exception):
pass
def read_tensor(path: str, maxsize: int=None) -> typing.Optional[np.ndarray]:
"""
Load a saved a tensor, saved either as an image file for standard RGB images or as a numpy archive for more general
tensors.
"""
path = file_util.cache_file(path)
if maxsize is not None:
if os.path.getsize(path) > maxsize:
raise FileTooLargeError
(_, ext) = os.path.splitext(path)
ext = ext.lower()
if ext in {'.png', '.jpg', '.jpeg'}:
res = misc.imread(path, mode='RGB')
assert len(res.shape) == 3
assert res.shape[2] == 3
return res
elif ext in {'.npz'}:
try:
data = np.load(path)
assert len(list(data.items())) == 1
except Exception as e:
logging.exception('Error unzipping %s' % path)
return None
return data['arr_0']
else:
raise RuntimeError('Extension %s for file %s not supported' % (ext, path))
def write_tensor(dst: str, value: np.ndarray) -> None:
"""Save a numpy tensor to a given location."""
(_, ext) = os.path.splitext(dst)
assert (ext == '' or ext == '.npz')
with open(dst, 'wb') as f:
np.savez_compressed(f, value)
def imresize_multichannel(im: np.ndarray, target_size: typing.Tuple[int, int],
**kwargs) -> np.ndarray:
n_channels = im.shape[2]
resized_channels = [
misc.imresize(im[:, :, n], target_size, **kwargs) for n in range(n_channels)
]
return np.stack(resized_channels, axis=2)
def imrescale_multichannel(im: np.ndarray, scale_factor: float, **kwargs) -> np.ndarray:
n_channels = im.shape[2]
resized_channels = [
misc.imresize(im[:, :, n], scale_factor, **kwargs) for n in range(n_channels)
]
return np.stack(resized_channels, axis=2)
| deepfigures-open-master | deepfigures/utils/image_util.py |
import typing
import traitlets
T1 = typing.TypeVar('T1')
T2 = typing.TypeVar('T2')
T3 = typing.TypeVar('T3')
T4 = typing.TypeVar('T4')
T = typing.TypeVar('T')
K = typing.TypeVar('K')
V = typing.TypeVar('V')
# Define wrappers for traitlets classes. These simply provide Python type hints
# that correspond to actual instance type that will result after a class is
# instantiated (e.g. Unicode() becomes a string).
#
# This allows PyCharm style type hinting to resolve types properly.
def Float(*args, **kw) -> float:
return traitlets.Float(*args, **kw)
def CFloat(*args, **kw) -> float:
return traitlets.CFloat(*args, **kw)
def Int(*args, **kw) -> int:
return traitlets.Int(*args, **kw)
def Bool(*args, **kw) -> bool:
return traitlets.Bool(*args, **kw)
def Enum(options: typing.List[T], **kw) -> T:
return traitlets.Enum(options, **kw)
def List(klass: T, **kw) -> typing.List[T]:
return traitlets.List(klass, **kw)
def Set(klass: T, **kw) -> typing.Set[T]:
return traitlets.Set(klass, **kw)
# N.B. traitlets.Dict does not check key types.
def Dict(val_class: V, **kw) -> typing.Dict[typing.Any, V]:
return traitlets.Dict(val_class, **kw)
def Tuple1(a: T1) -> typing.Tuple[T1]:
return traitlets.Tuple(a)
def Tuple2(a: T1, b: T2) -> typing.Tuple[T1, T2]:
return traitlets.Tuple(a, b)
def Unicode(*args, **kw) -> str:
return traitlets.Unicode(*args, **kw)
def Instance(klass: T, **kw) -> T:
return traitlets.Instance(klass, **kw)
def Array(**kw):
import numpy
return Instance(numpy.ndarray, **kw)
def DataFrameType(**kw):
import pandas
return Instance(pandas.DataFrame, **kw)
def Any(**kw) -> typing.Any:
return traitlets.Any(**kw)
# Just a direct copy for now to provide a consistent interface.
HasTraits = traitlets.HasTraits
| deepfigures-open-master | deepfigures/utils/traits.py |
"""Utilities for managing settings."""
from importlib import import_module
def import_setting(import_string):
"""Import and return the object defined by import_string.
This function is helpful because by the nature of settings files,
they often end up with circular imports, i.e. ``foo`` will import
``settings`` to get configuration information but ``settings`` will
have some setting set to an object imported from ``foo``. Because
python can't do circular imports, we make the settings strings and
then import them at runtime from the string using this function.
Parameters
----------
:param str import_string: the python path to the object you wish to
import. ``import_string`` should be a dot separated path the same
as you would use in a python import statement.
Returns
-------
:returns: any module or object located at ``import_string`` or
``None`` if no module exists.
"""
try:
module = import_module(import_string)
except ImportError:
module = None
if not module:
mod_string, obj_string = import_string.rsplit('.', 1)
obj = getattr(import_module(mod_string), obj_string)
return module or obj
| deepfigures-open-master | deepfigures/utils/settings_utils.py |
import cffi
import os
ffibuilder = cffi.FFI()
cur_dir = os.path.dirname(os.path.abspath(__file__))
with open(cur_dir + '/stringmatch.cpp') as f:
code = f.read()
ffibuilder.set_source(
'_stringmatch', code,
source_extension='.cpp',
)
ffibuilder.cdef('''
typedef struct {
int start_pos;
int end_pos;
int cost;
} MatchResult;
MatchResult match(const wchar_t* a, const wchar_t* b);
''')
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
| deepfigures-open-master | deepfigures/utils/stringmatch/stringmatch_builder.py |
from _stringmatch import lib
def match(key: str, text: str):
'''
Find the location of the substring in text with the
minimum edit distance (Levenshtein) to key.
'''
return lib.match(key, text)
| deepfigures-open-master | deepfigures/utils/stringmatch/__init__.py |
#!/usr/bin/env python
from deepfigures.utils.stringmatch import match
def test_match():
m = match('hello', 'hello')
assert m.cost == 0
assert m.start_pos == 0
assert m.end_pos == 5
m = match('e', 'hello')
assert m.cost == 0
assert m.start_pos == 1
assert m.end_pos == 2
m = match('hello', 'e')
assert m.cost == 4
assert m.start_pos == 0
assert m.end_pos == 1
# Prefer character omissions over character edits in match bounds
m = match('bab', 'cac')
assert m.cost == 2
assert m.start_pos == 1
assert m.end_pos == 2
# Select first match in the text in case of ties
m = match('ab', 'ba')
assert m.cost == 1
assert m.start_pos == 0
assert m.end_pos == 1
m = match('hello', 'world')
assert m.cost == 4
assert m.start_pos == 1
assert m.end_pos == 2
def test_unicode_match():
m = match('æther', 'aether')
assert m.cost == 1
assert m.start_pos == 2
assert m.end_pos == 6
m = match('こんにちは世界', 'こんばんは世界')
assert m.cost == 2
assert m.start_pos == 0
assert m.end_pos == 7
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| deepfigures-open-master | deepfigures/utils/stringmatch/test_stringmatch.py |
"""Test miscellaneous utilities."""
import hashlib
import os
import unittest
from deepfigures.utils import misc
class TestReadChunks(unittest.TestCase):
"""Test deepfigures.utils.misc.read_chunks."""
def test_read_chunks(self):
"""Test read_chunks."""
chunks_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data/chunks.txt')
# read in the file as a string
with open(chunks_path, 'rb') as f_in:
contents = f_in.read()
# verify that we iterate through the file correctly
for i, chunk in enumerate(misc.read_chunks(chunks_path, block_size=1)):
self.assertEqual(chunk, contents[i:i+1])
for i, chunk in enumerate(misc.read_chunks(chunks_path, block_size=4)):
self.assertEqual(chunk, contents[4*i:4*(i+1)])
class TestHashOutOfCore(unittest.TestCase):
"""Test deepfigures.utils.misc.hash_out_of_core."""
def test_hash_out_of_core(self):
"""Test hash_out_of_core."""
bigfile_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data/bigfile.txt')
self.assertEqual(
misc.hash_out_of_core(hashlib.sha1, bigfile_path),
"329f37bbe1d7f23caf4f1868a4a256f168d84f15")
self.assertEqual(
misc.hash_out_of_core(hashlib.sha256, bigfile_path),
"cbe4b71d97967575d12084b3702467f9dec2b22859c9a2407ea671fe17ed3d4a")
self.assertEqual(
misc.hash_out_of_core(hashlib.md5, bigfile_path),
"ad4b675109d472d8c1ed006e395f8f14")
| deepfigures-open-master | deepfigures/utils/tests/test_misc.py |
import os
import glob
import datetime
import tempfile
import tarfile
import logging
import multiprocessing
import multiprocessing.pool
import re
import time
import functools
import collections
from typing import List, Optional, Tuple
import numpy as np
from scipy.ndimage import imread
from scipy.misc import imsave
from skimage import measure
from PIL import Image
import scipy as sp
import bs4
from deepfigures import settings
from deepfigures.utils import file_util, config, settings_utils
from deepfigures.extraction import figure_utils, renderers
from deepfigures.extraction.figure_utils import Figure, BoxClass
pdf_renderer = settings_utils.import_setting(
settings.DEEPFIGURES_PDF_RENDERER)()
IMPORT_STR = r'''
\usepackage{color}
\usepackage{floatrow}
\usepackage{tcolorbox}
\DeclareColorBox{figurecolorbox}{\fcolorbox{%s}{white}}
\DeclareColorBox{tablecolorbox}{\fcolorbox{%s}{white}}
\floatsetup[figure]{framestyle=colorbox, colorframeset=figurecolorbox, framearound=all, frameset={\fboxrule1pt\fboxsep0pt}}
\floatsetup[table]{framestyle=colorbox, colorframeset=tablecolorbox, framearound=all, frameset={\fboxrule1pt\fboxsep0pt}}
\usepackage[labelfont={color=%s},textfont={color=%s}]{caption}
'''
BEGIN_DOC = r'\begin{document}'
COLOR_STR = (IMPORT_STR % ('red', 'yellow', 'green', 'blue')) + BEGIN_DOC
BLACK_STR = (IMPORT_STR % ('white', 'white', 'black', 'black')) + BEGIN_DOC
ARXIV_SRC_DIR = os.path.join(
settings.ARXIV_DATA_OUTPUT_DIR,
'src/')
ARXIV_MODIFIED_SRC_DIR = os.path.join(
settings.ARXIV_DATA_OUTPUT_DIR,
'modified_src/')
ARXIV_DIFF_DIR = os.path.join(
settings.ARXIV_DATA_OUTPUT_DIR,
'diffs_%ddpi/' % settings.DEFAULT_INFERENCE_DPI)
ARXIV_FIGURE_JSON_DIR = os.path.join(
settings.ARXIV_DATA_OUTPUT_DIR,
'figure-jsons/')
MAX_PAGES = 50
ARXIV_TAR_SRC = 's3://arxiv/src/'
ARXIV_TAR_RE = re.compile(
ARXIV_TAR_SRC +
'arXiv_src_(?P<year>\d\d)(?P<month>\d\d)_(?P<chunk>\d\d\d).tar'
)
ARXIV_TAR_TEMPLATE = ARXIV_TAR_SRC + 'arXiv_src_%02d%02d_%03d.tar'
PDFLATEX_TIMEOUT = 120
def parse_arxiv_tarname(tarname: str) -> Tuple[int, int, int]:
match = ARXIV_TAR_RE.fullmatch(tarname)
assert match is not None, 'Failed to match %s' % tarname
return (
int(match.group('year')),
int(match.group('month')),
int(match.group('chunk'))
)
def generate_diffs(paper_src_dir: str,
dpi: int=settings.DEFAULT_INFERENCE_DPI) -> Optional[List[str]]:
"""
Given the directory of a latex source file, create a modified copy of the source that includes colored boxes
surrounding each figure and table.
"""
paper_tex = glob.glob(paper_src_dir + '/' + '*.tex')
if len(paper_tex) > 1:
logging.warning('Multiple .tex files found')
return None
elif len(paper_tex) < 1:
logging.warning('No .tex files found')
return None
texfile = paper_tex[0]
chunk_dir, paper_id = os.path.split(paper_src_dir)
chunk_id = os.path.basename(chunk_dir)
# Modify latex source
with open(texfile, 'rb') as f:
# Some files may cause a UnicodeDecodeError if read directly as text
# so use bs4 to fix them up
text = bs4.UnicodeDammit(f.read()).unicode_markup
paper_modified_src_dir = ARXIV_MODIFIED_SRC_DIR + chunk_id + '/' + paper_id
if not os.path.isdir(paper_modified_src_dir):
os.makedirs(paper_modified_src_dir)
color_filename = paper_modified_src_dir + '/color.tex'
black_filename = paper_modified_src_dir + '/black.tex'
with open(color_filename, 'w') as f:
print(text.replace(BEGIN_DOC, COLOR_STR), file=f)
with open(black_filename, 'w') as f:
print(text.replace(BEGIN_DOC, BLACK_STR), file=f)
result_dir = ARXIV_DIFF_DIR + chunk_id + '/' + paper_id + '/'
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
try:
# on some PDFs, call_pdflatex doesn't raise an exception even
# after the timeout, and instead hangs indefinitely (> 24
# hours).
color_pdf = figure_utils.call_pdflatex(
src_tex=color_filename,
src_dir=paper_src_dir,
dest_dir=result_dir,
timeout=PDFLATEX_TIMEOUT
)
black_pdf = figure_utils.call_pdflatex(
src_tex=black_filename,
src_dir=paper_src_dir,
dest_dir=result_dir,
timeout=PDFLATEX_TIMEOUT
)
except figure_utils.LatexException as e:
logging.warning('Pdflatex failure: %s' % e.stdout)
return None
color_ims = pdf_renderer.render(color_pdf, dpi=dpi, max_pages=MAX_PAGES)
black_ims = pdf_renderer.render(black_pdf, dpi=dpi, max_pages=MAX_PAGES)
diff_names = []
for (color_page, black_page) in zip(color_ims, black_ims):
assert os.path.isfile(color_page) and os.path.isfile(black_page)
color_page_im = imread(color_page)
black_page_im = imread(black_page)
assert color_page_im.shape == black_page_im.shape
diff_page = figure_utils.im_diff(color_page_im, black_page_im)
diff_name = result_dir + 'diff-' + os.path.basename(black_page)
imsave(diff_name, diff_page)
diff_names.append(diff_name)
return diff_names
CAPTION_LABEL_COLOR = [0, 255, 0]
CAPTION_TEXT_COLOR = [0, 0, 255]
FIGURE_BOX_COLOR = [255, 0, 0]
TABLE_BOX_COLOR = [255, 242, 0]
BACKGROUND_COLOR = [255, 255, 255]
CAPTION_OFFSET = 1
def proposal_up(full_box: BoxClass, caption_box: BoxClass) -> BoxClass:
return BoxClass(
x1=full_box.x1,
y1=full_box.y1,
x2=full_box.x2,
y2=caption_box.y1 - CAPTION_OFFSET
)
def proposal_down(full_box: BoxClass, caption_box: BoxClass) -> BoxClass:
return BoxClass(
x1=full_box.x1,
y1=caption_box.y2 + CAPTION_OFFSET,
x2=full_box.x2,
y2=full_box.y2
)
def proposal_left(full_box: BoxClass, caption_box: BoxClass) -> BoxClass:
return BoxClass(
x1=full_box.x1,
y1=full_box.y1,
x2=caption_box.x1 - CAPTION_OFFSET,
y2=full_box.y2
)
def proposal_right(full_box: BoxClass, caption_box: BoxClass) -> BoxClass:
return BoxClass(
x1=caption_box.x2 + CAPTION_OFFSET,
y1=full_box.y1,
x2=full_box.x2,
y2=full_box.y2
)
def get_figure_box(full_box: BoxClass, caption_box: BoxClass,
im: np.ndarray) -> Optional[BoxClass]:
"""Find the largest box inside the full figure box that doesn't overlap the caption."""
proposals = [
f(full_box, caption_box)
for f in [proposal_up, proposal_down, proposal_left, proposal_right]
]
proposal_areas = [p.get_area() for p in proposals]
proposal = proposals[np.argmax(proposal_areas)]
return proposal.crop_whitespace_edges(im)
def find_figures_and_captions(
diff_im: np.ndarray, im: np.ndarray, page_num: int
) -> List[Figure]:
figures = []
all_box_mask = (
np.logical_or(diff_im == FIGURE_BOX_COLOR, diff_im == TABLE_BOX_COLOR)
).all(axis=2)
all_caption_mask = (
np.logical_or(
diff_im == CAPTION_LABEL_COLOR, diff_im == CAPTION_TEXT_COLOR
)
).all(axis=2)
components = measure.label(all_box_mask)
# Component id 0 is for background
for component_id in np.unique(components)[1:]:
(box_ys, box_xs) = np.where(components == component_id)
assert (len(box_ys) > 0
) # It was found from np.unique so it must exist somewhere
assert (len(box_xs) > 0)
full_box = BoxClass(
x1=float(min(box_xs)),
y1=float(min(box_ys)),
x2=float(max(box_xs) + 1),
y2=float(max(box_ys) + 1)
)
caption_mask = all_caption_mask.copy()
caption_mask[:, :round(full_box.x1)] = 0
caption_mask[:, round(full_box.x2):] = 0
caption_mask[:round(full_box.y1), :] = 0
caption_mask[round(full_box.y2):, :] = 0
(cap_ys, cap_xs) = np.where(caption_mask)
if len(cap_ys) == 0:
continue # Ignore boxes with no captions
cap_box = BoxClass(
x1=float(min(cap_xs)),
y1=float(min(cap_ys)),
x2=float(max(cap_xs) + 1),
y2=float(max(cap_ys) + 1),
)
fig_box = get_figure_box(full_box, cap_box, im)
if fig_box is None:
continue
box_color = diff_im[box_ys[0], box_xs[0], :]
if np.all(box_color == FIGURE_BOX_COLOR):
figure_type = 'Figure'
else:
assert np.all(box_color == TABLE_BOX_COLOR), print(
'Bad box color: %s' % str(box_color)
)
figure_type = 'Table'
(page_height, page_width) = diff_im.shape[:2]
figures.append(
Figure(
figure_boundary=fig_box,
caption_boundary=cap_box,
figure_type=figure_type,
name='',
page=page_num,
caption='',
dpi=settings.DEFAULT_INFERENCE_DPI,
page_width=page_width,
page_height=page_height
)
)
return figures
def consume_diff_generate_figures(diff) -> Optional[List[Figure]]:
dirname = os.path.dirname(diff) + '/'
pagenum = figure_utils.pagename_to_pagenum(diff)
page_image_name = dirname + 'black.pdf-images/ghostscript/dpi100/black.pdf-dpi100-page%.04d.png' % (
pagenum + 1
)
try:
page_image = sp.ndimage.imread(page_image_name)
diff_im = imread(diff)
except Image.DecompressionBombWarning as e:
logging.warning('Image %s too large, failed to read' % page_image_name)
logging.warning(e)
return None
page_num = figure_utils.pagename_to_pagenum(page_image_name)
figures = find_figures_and_captions(diff_im, page_image, page_num)
return figures
def process_paper_tar(paper_tarname: str) -> None:
parts = paper_tarname.split('/')
partition_name = parts[-2]
paper_name = os.path.splitext(parts[-1])[0]
result_path = os.path.join(
ARXIV_FIGURE_JSON_DIR, partition_name, paper_name + '.json'
)
paper_dir = os.path.join(ARXIV_SRC_DIR, partition_name, paper_name)
if os.path.isfile(result_path):
return
print('.', end='', flush=True)
try:
file_util.extract_tarfile(paper_tarname, paper_dir)
except tarfile.ReadError:
logging.debug('File %s is not a tar' % paper_tarname)
return
diffs = generate_diffs(paper_dir)
if diffs is None:
return
figures_by_page = dict()
for diff in diffs:
figures = consume_diff_generate_figures(diff)
if figures is None:
continue
page_name = os.path.dirname(diff) + '/' + diff[diff.find('black.pdf-'):]
figures_by_page[page_name] = figures
file_util.safe_makedirs(os.path.dirname(result_path))
file_util.write_json_atomic(
result_path,
config.JsonSerializable.serialize(figures_by_page),
sort_keys=True
)
def download_and_extract_tar(
tarname: str, extract_dir: str, n_attempts: int=100
) -> None:
print('.', end='', flush=True)
logging.info('Downloading %s' % tarname)
for attempt in range(n_attempts):
try:
cached_file = file_util.cache_file(tarname)
break
except FileNotFoundError:
if attempt == n_attempts - 1:
raise
logging.exception('Download failed, retrying')
time.sleep(10)
file_util.extract_tarfile(cached_file, extract_dir)
os.remove(cached_file)
def run_on_all() -> None:
Image.MAX_IMAGE_PIXELS = int(1e8) # Don't render very large PDFs.
Image.warnings.simplefilter('error', Image.DecompressionBombWarning)
tarnames = [
tarname for tarname in file_util.iterate_s3_files(ARXIV_TAR_SRC)
if os.path.splitext(tarname)[1] == '.tar'
]
# Process all papers simultaneously to avoid blocking on the ones
# where pdflatex runs forever
grouped_tarnames = figure_utils.ordered_group_by(
tarnames, lambda x: True
)
for group_key, group_tars in grouped_tarnames.items():
print(datetime.datetime.now())
with tempfile.TemporaryDirectory(
prefix=settings.ARXIV_DATA_TMP_DIR
) as tmpdir:
tmpdir += '/'
f = functools.partial(download_and_extract_tar, extract_dir=tmpdir)
print(
'Downloading %d tarfiles in group %s' %
(len(group_tars), str(group_key))
)
with multiprocessing.Pool() as p:
p.map(f, group_tars)
paper_tarnames = glob.glob(tmpdir + '*/*.gz')
print(datetime.datetime.now())
print(
'Processing %d papers in group %s' %
(len(paper_tarnames), str(group_key))
)
with multiprocessing.Pool(processes=round(2 * os.cpu_count())
) as p:
p.map(process_paper_tar, paper_tarnames)
if __name__ == "__main__":
logging.basicConfig(filename='logger_arxiv.log', level=logging.WARNING)
run_on_all()
print('All done')
| deepfigures-open-master | deepfigures/data_generation/arxiv_pipeline.py |
import collections
import datetime
import glob
import logging
import math
import multiprocessing
import os
import re
import subprocess
from typing import List, Tuple, Optional, Dict, Iterable
import bs4
from bs4 import BeautifulSoup
import cv2
import editdistance
import numpy as np
import scipy as sp
from PIL import Image
from botocore.vendored.requests.exceptions import ReadTimeout
from deepfigures import settings
from deepfigures.extraction import (
figure_utils,
exceptions,
datamodels,
renderers)
from deepfigures.utils import (
stringmatch,
config,
traits,
file_util,
image_util,
settings_utils)
from deepfigures.settings import (
PUBMED_INPUT_DIR,
PUBMED_INTERMEDIATE_DIR,
PUBMED_DISTANT_DATA_DIR,
LOCAL_PUBMED_DISTANT_DATA_DIR)
LOCAL_INTERMEDIATE_DIR = LOCAL_PUBMED_DISTANT_DATA_DIR + 'intermediate/'
LOCAL_FIGURE_JSON_DIR = LOCAL_PUBMED_DISTANT_DATA_DIR + 'figure-jsons/'
PDFTOTEXT_DPI = 72
MAX_PAGES = 50
pdf_renderer = settings_utils.import_setting(
settings.DEEPFIGURES_PDF_RENDERER)()
def get_input_tars(suffix: str='') -> List[str]:
"""Returns a list of PMC source tarfiles, restricted to suffix (e.g. '00/')."""
dirname = PUBMED_INPUT_DIR + suffix
while True:
try:
return list(file_util.iterate_files(dirname))
except ReadTimeout as e:
logging.exception(
'Timeout listing files in %s, retrying' % dirname, flush=True
)
def get_result_jsons(prefix: str) -> List[str]:
logging.info(datetime.datetime.now(), flush=True)
search_dir = LOCAL_FIGURE_JSON_DIR + prefix
jsons = sorted(glob.glob(search_dir))
logging.info('Found %d jsons at %s' % (len(jsons), search_dir))
logging.info(datetime.datetime.now(), flush=True)
return jsons
def get_bin(pdf: str) -> str:
"""
Get the bins of the pdf, e.g. './00/02/Br_J_Cancer_1977_Jan_35(1)_78-86.tar.gz'
returns '00/02'.
"""
parts = pdf.split('/')
return parts[-3] + '/' + parts[-2] + '/'
class MatchedString(config.JsonSerializable):
"""A typed object representing the result of running stringmatch."""
start_pos = traits.Int()
end_pos = traits.Int()
cost = traits.Int()
@staticmethod
def from_match(match) -> 'MatchedString':
return MatchedString(
start_pos=match.start_pos, end_pos=match.end_pos, cost=match.cost
)
class PubmedMatchedFigure(config.JsonSerializable):
"""
Contains data on a figure extracted from a PMC paper via caption matching with the included nxml file.
"""
fig_im = traits.Instance(np.ndarray)
page_image_name = traits.Unicode()
caption = traits.Unicode()
name = traits.Unicode()
matched_caption = traits.Unicode()
html_page = traits.Unicode()
start_pos = traits.Int()
end_pos = traits.Int()
pdf = traits.Unicode()
page_num = traits.Int()
def get_xml_soup(pdf: str) -> Optional[BeautifulSoup]:
xml = pdf[:-4] + '.nxml'
if not os.path.isfile(xml):
return None
with open(xml, 'r') as f:
xml_soup = BeautifulSoup(f, 'xml')
return xml_soup
def get_author_name(author: bs4.Tag) -> Optional[str]:
"""
Given an xml tag representing an author, return that author's name as it will appear in the PDF, with any given
names followed by surname.
"""
surname = author.surname
if surname is None:
return None
given_names = author.find_all('given-names')
return ' '.join([name.text for name in given_names + [surname]])
def find_str_words_in_pdf(
key: str,
html_pages: List[bs4.Tag],
pages: Optional[List[int]]=None,
max_dist: int=math.inf,
) -> Tuple[Optional[List[bs4.Tag]], int]:
if pages is None:
pages = list(range(len(html_pages)))
text_pages = [
re.sub('\n', ' ', html_page.text) for html_page in html_pages
]
clean_key = clean_str(key)
matches = [
MatchedString.
from_match(stringmatch.match(clean_key, clean_str(page)))
for (page_num, page) in enumerate(text_pages) if page_num in pages
]
page_num = int(np.argmin([match.cost for match in matches]))
match = matches[page_num]
if match.cost > max_dist:
matched_words = None
else:
matched_words = find_match_words(html_pages[page_num], match)
matched_word_text = ' '.join([word.text for word in matched_words])
if editdistance.eval(key, matched_word_text) > max_dist:
matched_words = None
return matched_words, page_num
def find_match_words(page: bs4.Tag, match: MatchedString) -> List[bs4.Tag]:
words = page.find_all('word')
start_pos = 0
start_token_idx = 0
while start_pos < match.start_pos:
# Start at the end of partially matching tokens
start_pos += len(clean_str(words[start_token_idx].text))
start_token_idx += 1
end_pos = start_pos
end_token_idx = start_token_idx
while end_pos < match.end_pos:
# Stop at the end of partially matching tokens
end_pos += len(clean_str(words[end_token_idx].text))
end_token_idx += 1
matching_words = words[start_token_idx:end_token_idx]
return matching_words
def words_to_box(
words: Optional[List[bs4.Tag]], target_dpi=settings.DEFAULT_INFERENCE_DPI
) -> Optional[datamodels.BoxClass]:
if words is None or len(words) == 0:
return None
word_boxes = [
datamodels.BoxClass.from_xml(word, target_dpi) for word in words
]
return datamodels.enclosing_box(word_boxes)
def tag_to_tokens(tag: bs4.Tag) -> Iterable[str]:
for c in tag.contents:
if type(c) == bs4.NavigableString:
s = c
elif hasattr(c, 'text'):
s = c.text
else:
s = ''
for token in s.split():
yield token
def match_figures(pdf: str, ignore_errors=False
) -> Optional[Dict[str, List[datamodels.Figure]]]:
print('.', end='', flush=True)
logging.info('Matching figures for %s' % pdf)
try:
page_names = pdf_renderer.render(
pdf_path=pdf,
output_dir=os.path.dirname(pdf),
dpi=settings.DEFAULT_INFERENCE_DPI,
max_pages=MAX_PAGES,
check_retcode=True
)
except subprocess.CalledProcessError:
logging.exception('Failed to render pdf: %s' % pdf)
return None
try:
xml_soup = get_xml_soup(pdf)
if xml_soup is None:
# This can be caused by files with multiple PDFs
logging.info('No xml soup found for %s' % pdf)
return None
html_soup = pdf_renderer.extract_text(pdf)
if html_soup is None:
# pdftotext fails on some corrupt pdfs
logging.warning('Pdftotext failed, pdf corrupt: %s' % pdf)
html_pages = html_soup.findAll('page')
xml_figures = xml_soup.findAll('fig')
xml_tables = xml_soup.findAll('table-wrap')
matched_figures = []
for xml_fig in xml_figures + xml_tables:
matched_figure = match_figure(xml_fig, html_pages, pdf, page_names)
if matched_figure is None:
return None
else:
matched_figures.append(matched_figure)
if len(matched_figures) == 0:
# Some papers contain figures but don't use standard XML tags
return None
else:
figures_by_page = {page_name: []
for page_name in page_names
} # type: Dict[str, List[datamodels.Figure]]
for fig in matched_figures:
figures_by_page[page_names[fig.page]].append(fig)
return figures_by_page
except Exception:
logging.exception('Exception for pdf %s' % pdf)
if ignore_errors:
return None
else:
raise
def match_figure(
xml_fig: bs4.Tag,
html_pages: List[bs4.Tag],
pdf: str,
page_names: Optional[List[str]]=None
) -> Optional[datamodels.Figure]:
if xml_fig.caption is None or xml_fig.label is None:
# Some tables contain no caption
logging.warning(
'No caption or label found for %s in %s' % (xml_fig.name, pdf)
)
return None
label = xml_fig.label.text
caption = label + ' ' + xml_fig.caption.text
caption_words, page_num = find_str_words_in_pdf(caption, html_pages)
caption_boundary = words_to_box(caption_words)
if caption_boundary is None:
logging.warning('Failed to locate caption for %s in %s' % (label, pdf))
return None
html_page = html_pages[page_num]
page_words = html_page.find_all('word')
words_inside_box = [
word for word in page_words
if caption_boundary.contains_box(datamodels.BoxClass.from_xml(word))
]
if len(words_inside_box) / len(caption_words) > 1.5:
logging.warning(
'%s in %s includes too many non-caption words: %f' %
(label, pdf, len(words_inside_box) / len(caption_words))
)
if page_num >= MAX_PAGES: # page_num is 0 indexed
return None
page_im = image_util.read_tensor(page_names[page_num])
page_height, page_width = page_im.shape[:2]
if xml_fig.graphic is not None:
image_name = xml_fig.graphic.get('xlink:href')
if image_name is None:
image_name = xml_fig.graphic.get('href')
if image_name is None:
logging.warning('Figure graphic contains no image')
return None
fig_image_name = os.path.dirname(pdf) + '/' + image_name + '.jpg'
if not os.path.isfile(fig_image_name):
logging.warning('Image file not found for %s in %s' % (label, pdf))
return None
fig_im = image_util.read_tensor(fig_image_name)
figure_boundary = find_fig_box(fig_im, page_im)
if figure_boundary is None:
logging.warning(
'Failed to match figure for %s in %s' % (label, pdf)
)
return None
elif xml_fig.name == 'table-wrap':
# Need to search for footer and table separately since they can be separated in the token stream
table_header = xml_fig.find_all('th')
table_body = xml_fig.find_all('td')
table_footer = xml_fig.find_all('table-wrap-foot')
table_tokens = [
token
for t in table_header + table_body for token in tag_to_tokens(t)
]
footer_tokens = [
token for t in table_footer for token in t.text.split()
]
page_table_content_words, content_dist = find_page_table_words(
table_tokens, page_words
)
page_table_footer_words, footer_dist = find_page_table_words(
footer_tokens, page_words
)
total_dist = content_dist + footer_dist
total_tokens = len(table_tokens) + len(footer_tokens)
if total_tokens == 0:
logging.warning(
'Failed to match any table contents for %s in %s' %
(label, pdf)
)
return None
if total_dist / total_tokens > .5:
logging.warning(
'%s in %s table is too far from the xml table: %f' %
(label, pdf, total_dist / total_tokens)
)
return None
page_table_words = page_table_content_words + page_table_footer_words
figure_boundary = words_to_box(page_table_words)
words_inside_box = [
word for word in page_words
if
figure_boundary.contains_box(datamodels.BoxClass.from_xml(word))
]
if len(words_inside_box) / total_tokens > 1.2:
logging.warning(
'%s in %s includes too many non-table words: %f' %
(label, pdf, len(words_inside_box) / total_tokens)
)
return None
else:
logging.warning('No graphic found for %s' % pdf)
return None
if xml_fig.name == 'fig':
figure_type = 'Figure'
else:
assert xml_fig.name == 'table-wrap'
figure_type = 'Table'
return datamodels.Figure(
figure_boundary=figure_boundary,
caption_boundary=caption_boundary,
caption_text=caption, # Caption from the PubMed XML
name=label,
page=page_num,
figure_type=figure_type,
dpi=settings.DEFAULT_INFERENCE_DPI,
page_height=page_height,
page_width=page_width
)
def find_page_table_words(table_tokens: List[str], page_words: List[bs4.Tag]) -> \
Tuple[List[bs4.Tag], int]:
if len(table_tokens) == 0:
return [], 0
assert len(page_words) > 0
table_token_counter = collections.Counter(table_tokens)
table_token_count = sum(table_token_counter.values())
page_tokens = [word.text for word in page_words]
best_dist = math.inf
best_seq = None
for start_idx in range(len(page_words)):
diff_counter = table_token_counter.copy()
cur_dist = table_token_count
for end_idx in range(start_idx + 1, len(page_tokens)):
cur_token = page_tokens[end_idx - 1]
token_count = diff_counter[cur_token]
diff_counter[cur_token] = token_count - 1
if token_count <= 0:
cur_dist += 1
else:
cur_dist -= 1
if cur_dist < best_dist:
best_dist = cur_dist
best_seq = (start_idx, end_idx)
assert best_seq is not None
best_start, best_end = best_seq
return page_words[best_start:best_end], best_dist
def clean_str(s: str) -> str:
# Some figures have labels with mismatching cases
# so we should be case insensitive
return ''.join(s.split()).lower()
SCALE_FACTOR = .1
def find_template_in_image(fig_im: np.ndarray, page_im: np.ndarray, scales: List[float], use_canny: bool) -> \
Optional[Tuple[datamodels.BoxClass, float, float]]:
"""
Find the position of the best match for fig_im on page_im by checking at each of a list of scales.
Each scale is a float in (0,1] representing the ratio of the size of fig_im to page_im (maximum of height ratio and
width ratio).
"""
try:
template = sp.misc.imresize(fig_im, SCALE_FACTOR)
except ValueError:
# This may cause some very small images to have size 0 which causes a ValueError
return None
(template_height, template_width) = template.shape[:2]
(page_height, page_width) = page_im.shape[:2]
if use_canny:
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 100, 200)
page_im = cv2.cvtColor(page_im, cv2.COLOR_BGR2GRAY)
found = None
best_scale = None
template_page_size_ratio = max(
template_height / page_height, template_width / page_width
)
# loop over the scales of the image
for scale in (scales)[::-1]:
# resize the image according to the scale, and keep track of the ratio of the resizing.
page_resized = sp.misc.imresize(
page_im, template_page_size_ratio / scale
)
r = page_im.shape[1] / float(page_resized.shape[1])
assert (
page_resized.shape[0] >= template_height and
page_resized.shape[1] >= template_width
)
if use_canny:
page_resized = cv2.Canny(page_resized, 50, 200)
result = cv2.matchTemplate(
page_resized, template, cv2.TM_CCOEFF_NORMED
)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
best_scale = scale
logging.debug('Scale: %.03f, Score: %.03f' % (scale, maxVal))
assert found is not None
(score, maxLoc, r) = found
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (
int((maxLoc[0] + template_width) * r),
int((maxLoc[1] + template_height) * r)
)
fig_box = datamodels.BoxClass(x1=startX, y1=startY, x2=endX, y2=endY)
return fig_box, score, best_scale
def find_fig_box(
fig_im: np.ndarray, page_im: np.ndarray, use_canny: bool=False
) -> Optional[datamodels.BoxClass]:
"""Find the position of the best match for fig_im on page_im through multi scale template matching."""
# If we get a score below this threshold, it's probably a bad detection
score_threshold = 0.8
scales = np.concatenate(
(
np.logspace(np.log10(.1), np.log10(.2), 5),
np.logspace(np.log10(.2), np.log10(.95), 40)
),
axis=0
) # type: List
res = find_template_in_image(fig_im, page_im, scales, use_canny)
if res is None:
return None
fig_box, score, best_scale = res
refined_scales = [
scale
for scale in np.linspace(.97 * best_scale, 1.03 * best_scale, 16)
if scale <= 1.0
]
(refined_fig_box, refined_score,
best_refined_scale) = find_template_in_image(
fig_im, page_im, refined_scales, use_canny
)
if refined_score < score_threshold:
return None
else:
return refined_fig_box
def run_full_pipeline(
tarpath: str, skip_done: bool=True, save_intermediate: bool=False
) -> None:
foldername = str(os.path.basename(tarpath).split('.')[0])
result_path = LOCAL_FIGURE_JSON_DIR + get_bin(
tarpath
) + foldername + '.json'
if skip_done and file_util.exists(result_path):
return
d = LOCAL_INTERMEDIATE_DIR + get_bin(tarpath)
while True:
try:
file_util.extract_tarfile(tarpath, d, streaming=False)
# botocore.vendored.requests.packages.urllib3.exceptions.ReadTimeoutError can't be caught because it doesn't
# inherit from BaseException, so don't use streaming
break
except FileNotFoundError as e:
logging.exception('Failure reading %s, retrying' % tarpath)
except ReadTimeout as e:
logging.exception('Timeout reading %s, retrying' % tarpath)
pdfs = glob.glob(d + foldername + '/' + '*.pdf')
res = dict()
for pdf in pdfs:
sha1sum = file_util.compute_sha1(pdf)
with open(pdf + '.sha1', 'w') as f:
print(sha1sum, file=f)
paper_figures = match_figures(pdf)
if paper_figures is not None:
res.update(paper_figures)
if save_intermediate:
intermediate_path = PUBMED_INTERMEDIATE_DIR + get_bin(
tarpath
) + foldername + '/'
for file in glob.glob(d + '/' + foldername + '/' + '*'):
file_util.copy(file, intermediate_path + os.path.basename(file))
file_util.safe_makedirs(os.path.dirname(result_path))
file_util.write_json_atomic(
result_path,
config.JsonSerializable.serialize(res),
indent=2,
sort_keys=True
)
def run_on_all() -> None:
Image.MAX_IMAGE_PIXELS = int(1e8) # Don't render very large PDFs.
Image.warnings.simplefilter('error', Image.DecompressionBombWarning)
print(datetime.datetime.now())
print('Starting', flush=True)
topdirs = ['%.2x' % n for n in range(256)]
dirs_per_partition = 32
for partition in range(0, len(topdirs), dirs_per_partition):
curdirs = topdirs[partition:partition + dirs_per_partition]
print(datetime.datetime.now())
print('Processing dirs: %s' % str(curdirs))
with multiprocessing.Pool() as p:
nested_tarfiles = p.map(
get_input_tars, [topdir for topdir in curdirs]
)
tarfiles = [t for tarfiles in nested_tarfiles for t in tarfiles]
assert len(tarfiles) == len(set(tarfiles))
print(datetime.datetime.now())
print('Processing %d tarfiles in %s' % (len(tarfiles), str(curdirs)))
with multiprocessing.Pool(processes=round(1.5 * os.cpu_count())) as p:
p.map(run_full_pipeline, tarfiles)
print('All done')
if __name__ == "__main__":
logging.basicConfig(filename='logger_pubmed.log', level=logging.WARNING)
run_on_all()
| deepfigures-open-master | deepfigures/data_generation/pubmed_pipeline.py |
"""Functions for detecting and extracting figures."""
import os
from typing import List, Tuple, Iterable
import cv2 # Need to import OpenCV before tensorflow to avoid import error
from scipy.misc import imread, imsave
import numpy as np
from deepfigures.extraction import (
tensorbox_fourchannel,
pdffigures_wrapper,
figure_utils)
from deepfigures import settings
from deepfigures.extraction.datamodels import (
BoxClass,
Figure,
PdfDetectionResult,
CaptionOnly)
from deepfigures import settings
from deepfigures.utils import (
file_util,
settings_utils)
from deepfigures.utils import misc
PAD_FACTOR = 0.02
TENSORBOX_MODEL = settings.TENSORBOX_MODEL
# Holds a cached instantiation of TensorboxCaptionmaskDetector.
_detector = None
def get_detector() -> tensorbox_fourchannel.TensorboxCaptionmaskDetector:
"""
Get TensorboxCaptionmaskDetector instance, initializing it on the first call.
"""
global _detector
if not _detector:
_detector = tensorbox_fourchannel.TensorboxCaptionmaskDetector(
**TENSORBOX_MODEL)
return _detector
def extract_figures_json(
pdf_path,
page_image_paths,
pdffigures_output,
output_directory):
"""Extract information about figures to JSON and save to disk.
:param str pdf_path: path to the PDF from which to extract
figures.
:returns: path to the JSON file containing the detection results.
"""
page_images_array = np.array([
imread(page_image_path)
for page_image_path in page_image_paths
])
detector = get_detector()
figure_boxes_by_page = detector.get_detections(
page_images_array)
pdffigures_captions = pdffigures_wrapper.get_captions(
pdffigures_output=pdffigures_output,
target_dpi=settings.DEFAULT_INFERENCE_DPI)
figures_by_page = []
for page_num in range(len(page_image_paths)):
figure_boxes = figure_boxes_by_page[page_num]
pf_page_captions = [
caption
for caption in pdffigures_captions
if caption.page == page_num
]
caption_boxes = [
caption.caption_boundary
for caption in pf_page_captions
]
figure_indices, caption_indices = figure_utils.pair_boxes(
figure_boxes, caption_boxes)
page_image = page_images_array[page_num]
pad_pixels = PAD_FACTOR * min(page_image.shape[:2])
for (figure_idx, caption_idx) in zip(figure_indices, caption_indices):
figures_by_page.append(
Figure(
figure_boundary=figure_boxes[figure_idx].expand_box(
pad_pixels).crop_to_page(
page_image.shape).crop_whitespace_edges(
page_image),
caption_boundary=caption_boxes[caption_idx],
caption_text=pf_page_captions[caption_idx].caption_text,
name=pf_page_captions[caption_idx].name,
figure_type=pf_page_captions[caption_idx].figure_type,
page=page_num))
pdf_detection_result = PdfDetectionResult(
pdf=pdf_path,
figures=figures_by_page,
dpi=settings.DEFAULT_INFERENCE_DPI,
raw_detected_boxes=figure_boxes_by_page,
raw_pdffigures_output=pdffigures_output)
output_path = os.path.join(
output_directory,
os.path.basename(pdf_path)[:-4] + 'deepfigures-results.json')
file_util.write_json_atomic(
output_path,
pdf_detection_result.to_dict(),
indent=2,
sort_keys=True)
return output_path
| deepfigures-open-master | deepfigures/extraction/detection.py |
"""The model used to detect figures."""
import copy
import os
import tempfile
from typing import List, Tuple, Iterable
import numpy as np
import tensorflow as tf
from deepfigures import settings
from deepfigures.extraction.datamodels import (
BoxClass,
Figure,
PdfDetectionResult,
CaptionOnly)
from deepfigures.extraction import (
figure_utils,
pdffigures_wrapper,
renderers)
from deepfigures.extraction.pdffigures_wrapper import pdffigures_extractor
from deepfigures.utils import (
file_util,
image_util,
config,
traits,
settings_utils)
from tensorboxresnet import train
from tensorboxresnet.utils import train_utils
CAPTION_CHANNEL_BACKGROUND = 255
CAPTION_CHANNEL_MASK = 0
pdf_renderer = settings_utils.import_setting(
settings.DEEPFIGURES_PDF_RENDERER)()
class TensorboxCaptionmaskDetector(object):
"""Interface for using the neural network model to detect figures.
Instantiating this class creates a tensorflow session object as the
self.sess attribute. When done using the instance, remember to close
the session; however, do not open and close sessions every time you
extract a figure because the added overhead will very negatively
affect performance.
"""
def __init__(
self,
save_dir,
iteration,
batch_size=1 # Batch sizes greater than 1 will change results due to batch norm in inception_v1
):
self.save_dir = save_dir
self.iteration = iteration
self.hypes = self._get_hypes()
self.hypes['batch_size'] = batch_size
self.input_shape = [
self.hypes['image_height'], self.hypes['image_width'],
self.hypes['image_channels']
] # type: Tuple[float, float, float]
self.graph = tf.Graph()
with self.graph.as_default():
self.x_in = tf.placeholder(
tf.float32, name='x_in', shape=self.input_shape
)
assert (self.hypes['use_rezoom'])
pred_boxes, self.pred_logits, self.pred_confidences, self.pred_confs_deltas, pred_boxes_deltas = \
train.build_forward(self.hypes, tf.expand_dims(self.x_in, 0), 'test', reuse=None)
self.pred_boxes = pred_boxes + pred_boxes_deltas
grid_area = self.hypes['grid_height'] * self.hypes['grid_width']
pred_confidences = tf.reshape(
tf.nn.softmax(
tf.reshape(
self.pred_confs_deltas,
[grid_area * self.hypes['rnn_len'], 2]
)
), [grid_area, self.hypes['rnn_len'], 2]
)
assert (self.hypes['reregress'])
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
model_weights = self._get_weights()
saver.restore(self.sess, model_weights)
def _get_weights(self) -> str:
suffixes = ['.index', '.meta', '.data-00000-of-00001']
local_paths = [
file_util.cache_file(
self.save_dir + 'save.ckpt-%d' % self.iteration + suffix
) for suffix in suffixes
]
local_path = local_paths[0]
return local_path[:local_path.rfind(suffixes[0])]
def _get_hypes(self) -> dict:
return file_util.read_json(self.save_dir + 'hypes.json')
def detect_page(
self,
page_tensor: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
feed = {self.x_in: page_tensor}
(np_pred_boxes, np_pred_confidences) = self.sess.run(
[self.pred_boxes, self.pred_confidences],
feed_dict=feed)
return (np_pred_boxes, np_pred_confidences)
def get_detections(
self,
page_images: List[np.ndarray],
crop_whitespace: bool = True,
conf_threshold: float = .5) -> List[List[BoxClass]]:
page_datas = [
{
'page_image': page_image,
'orig_size': page_image.shape[:2],
'resized_page_image': image_util.imresize_multichannel(
page_image, self.input_shape),
}
for page_image in page_images
]
predictions = [
self.detect_page(page_data['resized_page_image'])
for page_data in page_datas
]
for (page_data, prediction) in zip(page_datas, predictions):
(np_pred_boxes, np_pred_confidences) = prediction
new_img, rects = train_utils.add_rectangles(
self.hypes,
page_data['resized_page_image'],
np_pred_confidences,
np_pred_boxes,
use_stitching=True,
min_conf=conf_threshold,
show_suppressed=False)
detected_boxes = [
BoxClass(x1=r.x1, y1=r.y1, x2=r.x2, y2=r.y2).resize_by_page(
self.input_shape, page_data['orig_size'])
for r in rects if r.score > conf_threshold
]
if crop_whitespace:
detected_boxes = [
box.crop_whitespace_edges(page_data['page_image'])
for box in detected_boxes
]
detected_boxes = list(filter(None, detected_boxes))
page_data['detected_boxes'] = detected_boxes
return [page_data['detected_boxes'] for page_data in page_datas]
def detect_figures(
pdf: str,
pdffigures_captions: List[CaptionOnly],
detector: TensorboxCaptionmaskDetector,
conf_threshold: float
) -> Tuple[List[Figure], List[List[BoxClass]]]:
page_image_files = pdf_renderer.render(pdf, dpi=settings.DEFAULT_INFERENCE_DPI)
page_tensors = []
for f in page_image_files:
page_im = image_util.read_tensor(f)
if detector.hypes['image_channels'] == 3:
page_tensors.append(page_im)
else:
im_with_mask = np.pad(
page_im,
pad_width=[(0, 0), (0, 0), (0, 1)],
mode='constant',
constant_values=CAPTION_CHANNEL_BACKGROUND
)
for caption in pdffigures_captions:
(x1, y1, x2, y2) = caption.caption_boundary.get_rounded()
im_with_mask[y1:y2, x1:x2, 3] = CAPTION_CHANNEL_MASK
page_tensors.append(im_with_mask)
figure_boxes_by_page = detector.get_detections(
page_tensors, conf_threshold=conf_threshold
)
figures_by_page = []
for page_num in range(len(page_image_files)):
# Page numbers are always 0 indexed
figure_boxes = figure_boxes_by_page[page_num]
pf_page_captions = [
cap for cap in pdffigures_captions if cap.page == page_num
]
caption_boxes = [cap.caption_boundary for cap in pf_page_captions]
(figure_indices, caption_indices) = figure_utils.pair_boxes(
figure_boxes, caption_boxes
)
figures_by_page.extend(
[
Figure(
figure_boundary=figure_boxes[figure_idx],
caption_boundary=caption_boxes[caption_idx],
caption_text=pf_page_captions[caption_idx].caption_text,
name=pf_page_captions[caption_idx].name,
figure_type=pf_page_captions[caption_idx].figure_type,
page=page_num,
)
for (figure_idx,
caption_idx) in zip(figure_indices, caption_indices)
]
)
return figures_by_page, figure_boxes_by_page
def detect_batch(
src_pdfs: List[str],
detector: TensorboxCaptionmaskDetector,
conf_threshold: float=.5) -> Iterable[PdfDetectionResult]:
for src_pdf in src_pdfs:
with tempfile.TemporaryDirectory(
prefix='deepfigures-tensorbox') as working_dir:
pdf_path = os.path.join(
working_dir,
src_pdf.replace('/', '_'))
file_util.copy(src_pdf, pdf_path)
pdffigures_output = pdffigures_extractor.extract(
pdf_path,
working_dir)
pdffigures_captions = pdffigures_wrapper.get_captions(
pdffigures_output)
figures_by_page, figure_boxes_by_page = detect_figures(
pdf_path,
pdffigures_captions,
detector,
conf_threshold=conf_threshold)
yield PdfDetectionResult(
pdf=src_pdf,
figures=figures_by_page,
dpi=settings.DEFAULT_INFERENCE_DPI,
raw_detected_boxes=figure_boxes_by_page,
raw_pdffigures_output=pdffigures_output)
| deepfigures-open-master | deepfigures/extraction/tensorbox_fourchannel.py |
import collections
import os
import subprocess
from typing import Callable, Dict, Iterable, List, Tuple, TypeVar
from matplotlib import axes
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from deepfigures.utils import file_util
from deepfigures.extraction.renderers import PDFRenderer
from deepfigures.extraction.exceptions import LatexException
from deepfigures.extraction.datamodels import (BoxClass, Figure)
from deepfigures.settings import DEFAULT_INFERENCE_DPI
def call_pdflatex(
src_tex: str, src_dir: str, dest_dir: str, timeout: int=1200
) -> str:
"""
Call pdflatex on the tex source file src_tex, save its output to dest_dir, and return the path of the
resulting pdf.
"""
# Need to be in the same directory as the file to compile it
file_util.safe_makedirs(dest_dir)
# Shell-escape required due to https://www.scivision.co/pdflatex-error-epstopdf-output-filename-not-allowed-in-restricted-mode/
cmd = [
'pdflatex', '-interaction=nonstopmode', '-shell-escape',
'-output-directory=' + dest_dir, src_tex
]
# Run twice so that citations are built correctly
# Had some issues getting latexmk to work
try:
subprocess.run(
cmd, stdout=subprocess.PIPE, cwd=src_dir, timeout=timeout
)
res = subprocess.run(
cmd, stdout=subprocess.PIPE, cwd=src_dir, timeout=timeout
)
except subprocess.TimeoutExpired:
raise LatexException(
' '.join(cmd), -1, 'Timeout exception after %d' % timeout
)
if res.returncode != 0:
raise LatexException(' '.join(cmd), res.returncode, res.stdout)
paperid = os.path.splitext(os.path.basename(src_tex))[0]
return dest_dir + paperid + '.pdf'
def im_diff(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Returns a copy of image 'a' with all pixels where 'a' and 'b' are equal set to white."""
assert (np.array_equal(np.shape(a), np.shape(b)))
diff = a - b
mask = np.any(diff != 0, axis=2) # Check if any channel is different
rgb_mask = np.transpose(np.tile(mask, (3, 1, 1)), axes=[1, 2, 0])
diff_image = np.copy(a)
diff_image[np.logical_not(rgb_mask)] = 255
return diff_image
def pair_boxes(a_boxes: List[BoxClass],
b_boxes: List[BoxClass]) -> Tuple[List[int], List[int]]:
"""
Find the pairing between boxes with the lowest total distance, e.g. for matching figures to their captions.
This is an instance of the linear assignment problem and can be solved efficiently using the Hungarian algorithm.
Return the indices of matched boxes. If a_boxes and b_boxes are of unequal length, not all boxes will be paired.
Length of returned lists is min(len(a_boxes), len(b_boxes)).
"""
a_len = len(a_boxes)
b_len = len(b_boxes)
cost_matrix = np.zeros([a_len, b_len])
cost_matrix[:] = np.nan
for (a_idx, a_box) in enumerate(a_boxes):
for (b_idx, b_box) in enumerate(b_boxes):
cost_matrix[a_idx, b_idx] = a_box.distance_to_other(b_box)
assert (cost_matrix != np.nan).all()
(a_indices, b_indices) = sp.optimize.linear_sum_assignment(cost_matrix)
assert len(a_indices) == len(b_indices)
return a_indices, b_indices
def load_figures_json(filename: str) -> Dict[str, List[Figure]]:
d = file_util.read_json(filename)
res = {
page: [Figure.from_dict(dict_fig) for dict_fig in page_dicts]
for (page, page_dicts) in d.items()
}
return res
T = TypeVar('T')
S = TypeVar('S')
def group_by(l: Iterable[T],
key: Callable[[T], S]=lambda x: x) -> Dict[S, List[T]]:
"""Like itertools.groupby but doesn't require first sorting by the key function. Returns a dict."""
d = collections.defaultdict(list)
assert (callable(key))
for item in l:
d[key(item)].append(item)
return d
def ordered_group_by(l: Iterable[T],
key: Callable[[T], S]=lambda x: x) -> Dict[S, List[T]]:
"""Keys are returned in order of first occurrence."""
d = collections.OrderedDict()
assert (callable(key))
for item in l:
k = key(item)
if k not in d:
d[k] = []
d[k].append(item)
return d
def group_figures_by_pagenum(figs: Iterable[Figure]
) -> Dict[int, List[Figure]]:
return group_by(figs, lambda x: x.page)
def make_axes(size: Tuple[float, float]=(20, 20)) -> axes.Subplot:
fig, ax = plt.subplots(1, figsize=size)
return ax
def pagename_to_pagenum(pagename: str) -> int:
"""Takes a page name with a 1-indexed number and returns the 0-indexed page number."""
return int(
PDFRenderer.IMAGE_FILENAME_RE.fullmatch(pagename).group('page_num')
) - 1
def pagenum_to_pagename(pdf: str, pagenum: int, dpi: int=DEFAULT_INFERENCE_DPI) -> str:
"""Takes a pdf and a page with 0-indexed number and returns the 1-indexed page image name."""
return os.path.join(
os.path.dirname(pdf),
(PDFRenderer.IMAGE_FILENAME_PREFIX_TEMPLATE +
'{page_num:04d}.png').format(
pdf_name=os.path.split(pdf)[-1], dpi=dpi, page_num=pagenum + 1
))
def pagename_to_pdf(pagename: str) -> str:
"""Takes a page image name and returns the name of the pdf it came from."""
return PDFRenderer.IMAGE_FILENAME_RE.fullmatch(pagename).group('pdf_name')
| deepfigures-open-master | deepfigures/extraction/figure_utils.py |
"""Data models for deepfigures.
This subpackage contains models for various data dealt with by the
deepfigures package.
"""
from typing import List, Optional, Tuple, Union
from matplotlib import patches
import numpy as np
from deepfigures.utils import traits
from deepfigures.utils.config import JsonSerializable
from deepfigures.settings import (DEFAULT_INFERENCE_DPI, BACKGROUND_COLOR)
# A box of the form (x1, y1, x2, y2) in pixel coordinates
IntBox = Tuple[int, int, int, int]
ImageSize = Union[Tuple[float, float], Tuple[float, float, float]
] # Page sizes may have a third color channel
class BoxClass(JsonSerializable):
x1 = traits.Float(allow_none=False)
y1 = traits.Float(allow_none=False)
x2 = traits.Float(allow_none=False)
y2 = traits.Float(allow_none=False)
@staticmethod
def from_tuple(t: Tuple[float, float, float, float]) -> 'BoxClass':
return BoxClass(x1=t[0], y1=t[1], x2=t[2], y2=t[3])
@staticmethod
def from_tensorbox_rect(r) -> 'BoxClass':
return BoxClass(
x1=r.cx - .5 * r.width,
x2=r.cx + .5 * r.width,
y1=r.cy - .5 * r.height,
y2=r.cy + .5 * r.height
)
@staticmethod
def from_xml(word, target_dpi=DEFAULT_INFERENCE_DPI) -> 'BoxClass':
scale_factor = DEFAULT_INFERENCE_DPI / 72
return BoxClass(
x1=float(word.get('xMin')),
y1=float(word.get('yMin')),
x2=float(word.get('xMax')),
y2=float(word.get('yMax'))
).rescale(scale_factor)
def get_width(self) -> float:
return self.x2 - self.x1
def get_height(self) -> float:
return self.y2 - self.y1
def get_plot_box(
self, color: str='red', fill: bool=False, **kwargs
) -> patches.Rectangle:
"""Return a rectangle patch for plotting"""
return patches.Rectangle(
(self.x1, self.y1),
self.get_width(),
self.get_height(),
edgecolor=color,
fill=fill,
**kwargs
)
def get_area(self) -> float:
width = self.get_width()
height = self.get_height()
if width <= 0 or height <= 0:
return 0
else:
return width * height
def rescale(self, ratio: float) -> 'BoxClass':
return BoxClass(
x1=self.x1 * ratio,
y1=self.y1 * ratio,
x2=self.x2 * ratio,
y2=self.y2 * ratio
)
def resize_by_page(
self, cur_page_size: ImageSize, target_page_size: ImageSize
):
(orig_h, orig_w) = cur_page_size[:2]
(target_h, target_w) = target_page_size[:2]
height_scale = target_h / orig_h
width_scale = target_w / orig_w
return BoxClass(
x1=self.x1 * width_scale,
y1=self.y1 * height_scale,
x2=self.x2 * width_scale,
y2=self.y2 * height_scale
)
def get_rounded(self) -> IntBox:
return (
int(round(self.x1)), int(round(self.y1)), int(round(self.x2)),
int(round(self.y2))
)
def crop_image(self, image: np.ndarray) -> np.ndarray:
"""Return image cropped to the portion contained in box."""
(x1, y1, x2, y2) = self.get_rounded()
return image[y1:y2, x1:x2]
def crop_whitespace_edges(self, im: np.ndarray) -> Optional['BoxClass']:
(rounded_x1, rounded_y1, rounded_x2, rounded_y2) = self.get_rounded()
white_im = im.copy()
white_im[:, :rounded_x1] = BACKGROUND_COLOR
white_im[:, rounded_x2:] = BACKGROUND_COLOR
white_im[:rounded_y1, :] = BACKGROUND_COLOR
white_im[rounded_y2:, :] = BACKGROUND_COLOR
is_white = (white_im == BACKGROUND_COLOR).all(axis=2)
nonwhite_columns = np.where(is_white.all(axis=0) != 1)[0]
nonwhite_rows = np.where(is_white.all(axis=1) != 1)[0]
if len(nonwhite_columns) == 0 or len(nonwhite_rows) == 0:
return None
x1 = min(nonwhite_columns)
x2 = max(nonwhite_columns) + 1
y1 = min(nonwhite_rows)
y2 = max(nonwhite_rows) + 1
assert x1 >= rounded_x1, 'ERROR: x1:%d box[0]:%d' % (x1, rounded_x1)
assert y1 >= rounded_y1, 'ERROR: y1:%d box[1]:%d' % (y1, rounded_y1)
assert x2 <= rounded_x2, 'ERROR: x2:%d box[2]:%d' % (x2, rounded_x2)
assert y2 <= rounded_y2, 'ERROR: y2:%d box[3]:%d' % (y2, rounded_y2)
# np.where returns np.int64, cast back to python types
return BoxClass(x1=float(x1), y1=float(y1), x2=float(x2), y2=float(y2))
def distance_to_other(self, other: 'BoxClass') -> float:
x_distance = max([0, self.x1 - other.x2, other.x1 - self.x2])
y_distance = max([0, self.y1 - other.y2, other.y1 - self.y2])
return np.linalg.norm([x_distance, y_distance], 2)
def intersection(self, other: 'BoxClass') -> float:
intersection = BoxClass(
x1=max(self.x1, other.x1),
y1=max(self.y1, other.y1),
x2=min(self.x2, other.x2),
y2=min(self.y2, other.y2)
)
if intersection.x2 >= intersection.x1 and intersection.y2 >= intersection.y1:
return intersection.get_area()
else:
return 0
def iou(self, other: 'BoxClass') -> float:
intersection = self.intersection(other)
union = self.get_area() + other.get_area() - intersection
if union == 0:
return 0
else:
return intersection / union
def contains_box(self, other: 'BoxClass', overlap_threshold=.5) -> bool:
if other.get_area() == 0:
return False
else:
return self.intersection(other
) / other.get_area() >= overlap_threshold
def expand_box(self, amount: float) -> 'BoxClass':
return BoxClass(
x1=self.x1 - amount,
y1=self.y1 - amount,
x2=self.x2 + amount,
y2=self.y2 + amount,
)
def crop_to_page(self, page_shape: ImageSize) -> 'BoxClass':
page_height, page_width = page_shape[:2]
return BoxClass(
x1=max(self.x1, 0),
y1=max(self.y1, 0),
x2=min(self.x2, page_width),
y2=min(self.y2, page_height),
)
def enclosing_box(boxes: List[BoxClass]) -> BoxClass:
assert len(boxes) > 0
return BoxClass(
x1=min([box.x1 for box in boxes]),
y1=min([box.y1 for box in boxes]),
x2=max([box.x2 for box in boxes]),
y2=max([box.y2 for box in boxes])
)
class Figure(JsonSerializable):
figure_boundary = traits.Instance(BoxClass)
caption_boundary = traits.Instance(BoxClass)
caption_text = traits.Unicode()
name = traits.Unicode()
page = traits.Int()
figure_type = traits.Unicode()
dpi = traits.Int()
page_width = traits.Int()
page_height = traits.Int()
# URI to cropped image of the figure
uri = traits.Unicode(
default_value=None, allow_none=True)
def page_size(self) -> Tuple[int, int]:
return self.page_height, self.page_width
@staticmethod
def from_pf_ann(ann: dict, target_page_size: Tuple[int, int]) -> 'Figure':
"""Convert an annotation in the pdffigures format"""
cur_page_size = ann['page_height'], ann['page_width']
if cur_page_size[0] is None:
cur_page_size = [
d * DEFAULT_INFERENCE_DPI / ann['dpi'] for d in target_page_size
]
return Figure(
figure_boundary=BoxClass.from_tuple(ann['region_bb'])
.resize_by_page(cur_page_size, target_page_size),
caption_boundary=BoxClass.from_tuple(ann['caption_bb'])
.resize_by_page(cur_page_size, target_page_size),
caption_text=ann['caption'],
name=ann['name'],
page=ann['page'],
figure_type=ann['figure_type'],
page_width=target_page_size[
1
],
page_height=target_page_size[
0
]
)
@staticmethod
def from_pf_output(res: dict, target_dpi=DEFAULT_INFERENCE_DPI) -> 'Figure':
"""Convert a pdffigures output figure to a Figure object"""
scale_factor = target_dpi / 72
return Figure(
figure_boundary=BoxClass.from_dict(res['regionBoundary']
).rescale(scale_factor),
caption_boundary=BoxClass.from_dict(res['captionBoundary'])
.rescale(scale_factor),
caption_text=res['caption'],
name=res['name'],
page=res['page'],
figure_type=res['figType']
)
class CaptionOnly(JsonSerializable):
caption_boundary = traits.Instance(BoxClass)
caption_text = traits.Unicode()
name = traits.Unicode()
page = traits.Int()
figure_type = traits.Unicode()
dpi = traits.Int()
class PdfDetectionResult(JsonSerializable):
pdf = traits.Unicode()
figures = traits.List(traits.Instance(Figure))
dpi = traits.Int()
raw_detected_boxes = traits.List(
traits.List(traits.Instance(BoxClass)), allow_none=True
) # type: Optional[List[List[BoxClass]]]
raw_pdffigures_output = traits.Dict(
traits.Any(), allow_none=True
) # type: Optional[dict]
error = traits.Unicode(
default_value=None, allow_none=True
) # type: Optional[str]
class AuthorInfo(JsonSerializable):
bounding_box = traits.Instance(BoxClass)
name = traits.Unicode()
class TitleAndAuthorInfo(JsonSerializable):
pdf = traits.Unicode()
pdf_sha1 = traits.Unicode()
image_path = traits.Unicode()
title_bounding_box = traits.Instance(BoxClass)
title_text = traits.Unicode()
authors = traits.List(traits.Instance(AuthorInfo))
| deepfigures-open-master | deepfigures/extraction/datamodels.py |
"""Code for extracting figures from PDFs.
This subpackage implements the main functionality for deepfigures,
running deep models to detect figures as well as other code to render
the pages, etc.
"""
| deepfigures-open-master | deepfigures/extraction/__init__.py |
"""PDF Rendering engines for deepfigures."""
import glob
import json
import logging
import lxml
import os
import re
import shutil
import string
import subprocess
import typing
import bs4
from deepfigures.utils import file_util
from deepfigures.extraction import exceptions
from deepfigures import settings
logger = logging.getLogger(__name__)
# constant for use in the isprintable function
_PRINTABLES = set(string.printable)
def isprintable(s):
"""Return True if all characters in s are printable, else False.
Parameters
----------
:param str s: a string.
Returns
-------
:return: True if s has only printable characters, otherwise False.
"""
return set(s).issubset(_PRINTABLES)
class PDFRenderer(object):
"""Render PDFs and extract text from them.
PDFRenderers are used to generate data and as part of the figure
extraction pipeline for deepfigures. PDFRenderers must implement
methods to render PDFs as images to disk and to extract text with
bounding boxes that may later be parsed into classes from
deepfigures.datamodels.
Usage
-----
Subclass PDFRenderer and override:
- RENDERING_ENGINE_NAME: a class variable giving a unique name
that signals what backend was used to process the PDFs.
- _rasterize_pdf: a method (see _rasterize_pdf on this class for
details).
- _extract_text: a method (see _extract_text on this class for
details).
"""
RENDERING_ENGINE_NAME = None
IMAGE_FILENAME_RE = re.compile(
r'(?P<pdf_name>.*)-dpi(?P<dpi>\d+)-page(?P<page_num>\d+).(?P<ext>png|jpg)'
)
IMAGE_FILENAME_PREFIX_TEMPLATE = \
'{pdf_name}-dpi{dpi:d}-page'
def __init__(self):
"""Initialize the PDFRenderer."""
# check that subclasses override cls.RENDERING_ENGINE_NAME
assert self.RENDERING_ENGINE_NAME is not None, (
"class variable RENDERING_ENGINE_NAME must not be None"
)
def render(
self,
pdf_path: str,
output_dir: typing.Optional[str]=None,
dpi: int=settings.DEFAULT_INFERENCE_DPI,
ext: str='png',
max_pages: typing.Optional[int]=None,
use_cache: bool=True,
check_retcode: bool=False
) -> typing.List[str]:
"""Render pdf_path, save to disk and return the file paths.
Render the pdf at pdf_path, save the generated image to disk
in output dir using a file name matching the
PDFRenderer.IMAGE_FILENAME_RE pattern, and return a list of
paths to the generated files.
Parameters
----------
:param str pdf_path: path to the pdf that should be rendered.
:param Optional[str] output_dir: path to the directory in which
to save output. If None, then output is saved in the same
directory as the PDF.
:param int dpi: the dpi at which to render the PDF.
:param str ext: the extension or file type of the generated
image, should be either 'png' or 'jpg'.
:param Optional[int] max_pages: the maximum number of pages to
render from the PDF.
:param bool use_cache: whether or not to skip the rendering
operation if the pdf has already been rendered.
:param bool check_retcode: whether or not to check the return
code from the subprocess used to render the PDF.
Returns
-------
:return: the list of generated paths
"""
image_types = ['png', 'jpg']
if ext not in image_types:
raise ValueError(
"ext must be one of {}".format(', '.join(image_types)))
if output_dir is None:
output_dir = os.path.dirname(pdf_path)
if not os.path.isdir(output_dir):
raise IOError(
"Output directory ({}) does not exist.".format(output))
pdf_name = os.path.basename(pdf_path)
# engines_dir: directory used for storing the output from
# different rendering engines.
engines_dir = os.path.join(
output_dir, '{pdf_name}-images'.format(pdf_name=pdf_name))
# images_dir: directory used for storing images output by this
# specific PDFRenderer / engine.
images_dir = os.path.join(
engines_dir,
self.RENDERING_ENGINE_NAME,
'dpi{}'.format(dpi))
image_filename_prefix = self.IMAGE_FILENAME_PREFIX_TEMPLATE.format(
pdf_name=pdf_name, dpi=dpi)
image_output_path_prefix = os.path.join(
images_dir, image_filename_prefix)
success_file_path = os.path.join(images_dir, '_SUCCESS')
if not os.path.exists(success_file_path) or not use_cache:
if os.path.exists(images_dir):
logger.info("Overwriting {}.".format(images_dir))
shutil.rmtree(images_dir)
os.makedirs(images_dir)
self._rasterize_pdf(
pdf_path=pdf_path,
image_output_path_prefix=image_output_path_prefix,
dpi=dpi,
ext=ext,
max_pages=max_pages,
check_retcode=check_retcode)
# add a success file to verify that the operation completed
with open(success_file_path, 'w') as f_out:
f_out.write('')
generated_image_paths = glob.glob(
image_output_path_prefix + '*.' + ext)
return sort_by_page_num(generated_image_paths)
def _rasterize_pdf(
self,
pdf_path: str,
image_output_path_prefix: str,
dpi: int,
ext: str,
max_pages: typing.Optional[int],
check_retcode: bool,
) -> typing.List[str]:
"""Rasterize the PDF at PDF path and save it to disk.
Rasterize the PDF at PDF path and save it to disk using
image_output_path_prefix. Each page of the PDF should be
rasterized separately and saved to the path formed by
appending '{page_num:04d}.{ext}' to
image_output_path_prefix.
Parameters
----------
:param str pdf_path: path to the pdf that should be rendered.
:param str image_output_path_prefix: prefix for the output
path of each rendered pdf page.
:param int dpi: the dpi at which to render the pdf.
:param int max_pages: the maximum number of pages to render
from the pdf.
Returns
-------
:return: None
"""
raise NotImplementedError(
"Subclasses of PDFRenderer must implement _rasterize_pdf."
)
def extract_text(self, pdf_path: str, encoding: str='UTF-8'
) -> typing.Optional[bs4.BeautifulSoup]:
"""Extract info about a PDF as XML returning the parser for it.
Extract information about the text, bounding boxes and pages of
a PDF as XML, saving the XML to disk and returning a parser for
it.
Parameters
----------
:param str pdf_path: the path to the pdf from which to extract
information.
:param str encoding: the encoding to use for the XML.
Returns
-------
:return: A parser for the XML that is saved to disk.
"""
# generate the html files
self._extract_text(pdf_path=pdf_path, encoding=encoding)
html = pdf_path[:-4] + '.html'
if not os.path.isfile(html):
html_soup = None
try:
with open(html, 'r') as f:
html_soup = bs4.BeautifulSoup(f, 'xml')
except UnicodeDecodeError:
html_soup = None
if html_soup is None:
raise exceptions.PDFProcessingError(
"Error in extracting xml for {}.".format(pdf_path)
)
return html_soup
def _extract_text(self, pdf_path: str, encoding: str='UTF-8') -> None:
"""Extract text from a PDF and save to disk as xml.
Parameters
----------
:param str pdf_path: path to the PDF to be extracted.
:param str encoding: the encoding to use for saving the XML.
Returns
-------
:return: None
"""
raise NotImplementedError(
"Subclasses of PDFRenderer must implement _extract_text."
)
class GhostScriptRenderer(PDFRenderer):
"""Render PDFs using GhostScript."""
RENDERING_ENGINE_NAME = 'ghostscript'
def _rasterize_pdf(
self,
pdf_path: str,
image_output_path_prefix: str,
dpi: int,
ext: str,
max_pages: typing.Optional[int],
check_retcode: bool
) -> typing.List[str]:
"""Rasterize a PDF using GhostScript."""
# ghostscript requires a template string for the output path
image_output_path_template = image_output_path_prefix + '%04d.{ext}'.format(
ext=ext)
sdevice = 'png16m' if ext == 'png' else 'jpeg'
gs_args = [
'gs', '-dGraphicsAlphaBits=4', '-dTextAlphaBits=4', '-dNOPAUSE', '-dBATCH', '-dSAFER', '-dQUIET',
'-sDEVICE=' + sdevice,
'-r%d' % dpi, '-sOutputFile=' + image_output_path_template,
'-dBufferSpace=%d' % int(1e9),
'-dBandBufferSpace=%d' % int(5e8), '-sBandListStorage=memory',
'-c',
'%d setvmthreshold' % int(1e9), '-dNOGC',
'-dNumRenderingThreads=4', "-f", pdf_path
]
if max_pages is not None:
gs_args.insert(-2, '-dLastPage=%d' % max_pages)
subprocess.run(gs_args, check=check_retcode)
def _extract_text(self, pdf_path: str, encoding: str) -> None:
"""Extract text using pdftotext."""
subprocess.run(['pdftotext', '-bbox', '-enc', encoding, pdf_path])
def sort_by_page_num(file_paths: typing.List[str]) -> typing.List[str]:
"""Sort file_paths by the page number.
Sort file_paths by the page number where file_paths is a list
of rendered output image file paths generated by a
PDFRenderer.
Parameters
----------
:param List[str] file_paths: a list of file paths generated by
a PDFRenderer.
Returns
-------
file_paths sorted by page number.
"""
return sorted(
file_paths,
key=lambda file_path: int(PDFRenderer.IMAGE_FILENAME_RE.fullmatch(
os.path.split(file_path)[-1]).group('page_num')))
| deepfigures-open-master | deepfigures/extraction/renderers.py |
import os
import subprocess
from typing import List, Optional, Iterable
import tempfile
from deepfigures.utils import file_util
from deepfigures.extraction import datamodels
from deepfigures import settings
import logging
import shlex
import contextlib
import more_itertools
# DPI used by pdffigures for json outputs; this is hard-coded as 72
PDFFIGURES_DPI = 72
class PDFFiguresExtractor(object):
"""Extract figure and caption information from a PDF."""
def extract(self, pdf_path, output_dir, use_cache=True):
"""Return results from extracting a PDF with pdffigures2.
:param str pdf_path: path to the PDF to extract.
:param str output_dir: path to the output directory.
:param bool use_cache: whether or not to use cached data from
disk if it's available.
:returns: results from running pdffigures2 on the PDF.
"""
pdffigures_dir = os.path.join(output_dir, 'pdffigures/')
if not os.path.exists(pdffigures_dir):
os.makedirs(pdffigures_dir)
success_file_path = os.path.join(pdffigures_dir, '_SUCCESS')
pdffigures_jar_path = file_util.cache_file(
settings.PDFFIGURES_JAR_PATH)
if not os.path.exists(success_file_path) or not use_cache:
subprocess.check_call(
'java'
' -jar {pdffigures_jar_path}'
' --figure-data-prefix {pdffigures_dir}'
' --save-regionless-captions'
' {pdf_path}'.format(
pdffigures_jar_path=pdffigures_jar_path,
pdf_path=pdf_path,
pdffigures_dir=pdffigures_dir),
shell=True)
# add a success file to verify that the operation completed
with open(success_file_path, 'w') as f_out:
f_out.write('')
return file_util.read_json(
os.path.join(
pdffigures_dir,
os.path.basename(pdf_path)[:-4] + '.json'))
pdffigures_extractor = PDFFiguresExtractor()
def figure_to_caption(figure: dict) -> datamodels.CaptionOnly:
return datamodels.CaptionOnly(
caption_boundary=datamodels.BoxClass.
from_dict(figure['captionBoundary']),
page=figure['page'],
caption_text=figure['caption'],
name=figure['name'],
figure_type=figure['figType'],
)
def regionless_to_caption(regionless: dict) -> datamodels.CaptionOnly:
return datamodels.CaptionOnly(
caption_boundary=datamodels.BoxClass.from_dict(regionless['boundary']),
page=regionless['page'],
caption_text=regionless['text'],
name=regionless['name'],
figure_type=regionless['figType'],
)
def get_captions(
pdffigures_output: dict, target_dpi: int=settings.DEFAULT_INFERENCE_DPI
) -> List[datamodels.CaptionOnly]:
figures = pdffigures_output.get('figures', [])
regionless_captions = pdffigures_output.get('regionless-captions', [])
captions = (
[figure_to_caption(fig) for fig in figures] +
[regionless_to_caption(reg) for reg in regionless_captions]
)
for caption in captions:
caption.caption_boundary = caption.caption_boundary.rescale(
target_dpi / PDFFIGURES_DPI
)
return captions
def get_figures(pdffigures_output: dict, target_dpi: int=settings.DEFAULT_INFERENCE_DPI
) -> List[datamodels.Figure]:
return [
datamodels.Figure.from_pf_output(figure, target_dpi)
for figure in pdffigures_output.get('figures', [])
]
def detect_batch(src_pdfs: List[str], target_dpi: int = settings.DEFAULT_INFERENCE_DPI, chunksize=1) -> \
Iterable[datamodels.PdfDetectionResult]:
for chunk in more_itertools.chunked(src_pdfs, chunksize):
results = [
pdffigures_extractor.extract(pdf_path, os.path.dirname(pdf_path))
for pdf_path in chunk
]
for (result, pdf) in zip(results, chunk):
figs = get_figures(result, target_dpi=target_dpi)
yield datamodels.PdfDetectionResult(
pdf=pdf,
figures=figs,
dpi=target_dpi,
raw_detected_boxes=None,
raw_pdffigures_output=None,
error=None
)
| deepfigures-open-master | deepfigures/extraction/pdffigures_wrapper.py |
"""The figure extraction pipeline for deepfigures.
The ``deepfigures.extraction.pipeline`` module defines the figure
extraction pipeline for deepfigures, including copying the PDF to a
location, rendering a PDF, finding captions for figures, detecting the
figures and cropping out the images.
"""
import hashlib
import os
import shutil
from PIL import Image
from deepfigures import settings
from deepfigures.extraction import (
detection,
pdffigures_wrapper,
renderers)
from deepfigures.utils import (
misc,
settings_utils)
class FigureExtraction(object):
"""A class representing the data extracted from a PDF.
The ``FigureExtraction`` class represents the data extracted from
a single PDF and is generated through the ``extract`` method of
the ``FigureExtractionPipeline`` class.
The data stored for a ``FigureExtraction`` instance sits on disk
in a directory. See `Attributes`_ for more information.
Attributes
----------
path_templates : Dict[str, str]
A class attribute providing the templates for the paths to the
extracted data on disk, relative to the data directory.
paths : Dict[str, str]
A dictionary mapping path names to their actual absolute paths
on disk.
parent_directory : str
The parent directory for the directory containing the extracted
data.
low_res_rendering_paths : Optional[str]
Paths to the low resolution renderings of the PDF (used for
predicting the bounding boxes).
hi_res_rendering_paths : Optional[str]
Paths to the high resolution renderings of the PDF (used for
cropping out the figure images).
pdffigures_output_path : Optional[str]
Path to the output of running pdffigures2 on the PDF.
deepfigures_json_path : Optional[str]
Path to the deepfigures JSON predicting the bounding boxes.
"""
"""Templates for paths to the data extracted from a PDF."""
path_templates = {
'BASE': '{pdf_hash}',
'PDF_PATH': '{base}/{pdf_name}',
'RENDERINGS_PATH': '{base}/page-renderings',
'PDFFIGURES_OUTPUT_PATH': '{base}/pdffigures-output',
'DEEPFIGURES_OUTPUT_PATH': '{base}/deepfigures-output',
'FIGURE_IMAGES_PATH': '{base}/figure-images'
}
def __init__(self, pdf_path, parent_directory):
"""Initialize a ``FigureExtraction`` instance.
Parameters
----------
pdf_path : str
The path to the PDF locally on disk.
parent_directory : str
The parent directory for the directory in which the figure
extraction results will be stored.
"""
# compute strings to fill in the path templates
pdf_hash = misc.hash_out_of_core(hashlib.sha1, pdf_path)
pdf_name = os.path.basename(pdf_path)
base = self.path_templates['BASE'].format(pdf_hash=pdf_hash)
template_vars = {
'pdf_hash': pdf_hash,
'pdf_name': pdf_name,
'base': base
}
# set the paths attribute
self.paths = {
k: os.path.join(parent_directory, v.format(**template_vars))
for k, v in self.path_templates.items()
}
self.parent_directory = parent_directory
self.low_res_rendering_paths = None
self.hi_res_rendering_paths = None
self.pdf_figures_output_path = None
self.deepfigures_json_path = None
class FigureExtractionPipeline(object):
"""A class for extracting figure data from PDFs.
The ``FigureExtractionPipeline`` class's main function is to
generate instances of ``FigureExtraction``. Each instance of a
``FigureExtraction`` represents the data extracted from processing a
single PDF.
See the ``FigureExtraction`` class's doc string for details on
the format that this extracted data takes.
"""
def extract(self, pdf_path, output_directory):
"""Return a ``FigureExtraction`` instance for ``pdf_path``.
Extract the figures and additional information from the PDF at
``pdf_path``, saving the results to disk in ``output_directory``
and returning the corresponding ``FigureExtraction`` instance.
Parameters
----------
pdf_path : str
The path to the PDF.
output_directory : str
The directory in which to save the results from extraction.
Returns
-------
FigureExtraction
A ``FigureExtraction`` instance for the PDF at ``pdf_path``.
"""
figure_extraction = FigureExtraction(
pdf_path=pdf_path,
parent_directory=output_directory)
# create the extraction results directory
os.makedirs(figure_extraction.paths['BASE'])
# copy the PDF into the extraction results directory
shutil.copy(pdf_path, figure_extraction.paths['PDF_PATH'])
pdf_renderer = settings_utils.import_setting(
settings.DEEPFIGURES_PDF_RENDERER)()
# render the PDF into low-res images
figure_extraction.low_res_rendering_paths = \
pdf_renderer.render(
pdf_path=figure_extraction.paths['PDF_PATH'],
output_dir=figure_extraction.paths['BASE'],
dpi=settings.DEFAULT_INFERENCE_DPI)
# render the PDF into hi-res images
figure_extraction.hi_res_rendering_paths = \
pdf_renderer.render(
pdf_path=figure_extraction.paths['PDF_PATH'],
output_dir=figure_extraction.paths['BASE'],
dpi=settings.DEFAULT_CROPPED_IMG_DPI)
# extract captions from PDF using pdffigures2
figure_extraction.pdffigures_output_path = \
pdffigures_wrapper.pdffigures_extractor.extract(
pdf_path=figure_extraction.paths['PDF_PATH'],
output_dir=figure_extraction.paths['BASE'])
# run deepfigures / neural networks on the PDF images
figure_extraction.deepfigures_json_path = \
detection.extract_figures_json(
pdf_path=figure_extraction.paths['PDF_PATH'],
page_image_paths=figure_extraction.low_res_rendering_paths,
pdffigures_output=figure_extraction.pdffigures_output_path,
output_directory=figure_extraction.paths['BASE'])
return figure_extraction
| deepfigures-open-master | deepfigures/extraction/pipeline.py |
"""Exceptions for deepfigures."""
class LatexException(OSError):
"""An exception thrown for errors in rendering LaTeX."""
def __init__(self, cmd, code, stdout):
self.code = code
self.stdout = stdout
def __str__(self):
return (
'Return code: %s, stdout: %s' %
(repr(self.code), repr(self.stdout))
)
class PDFProcessingError(OSError):
"""An exception thrown for errors in processsing a PDF."""
| deepfigures-open-master | deepfigures/extraction/exceptions.py |
"""Tests for deepfigures.extraction.renderers"""
import contextlib
import logging
import os
import shutil
import time
import tempfile
import unittest
import numpy as np
from scipy.misc import imread
import pytest
from deepfigures.extraction import renderers
from deepfigures import settings
logger = logging.getLogger(__name__)
class IsPrintableTest(unittest.TestCase):
"""Test deepfigures.renderers.isprintable."""
def test_returns_correct_values(self):
"""Test isprintable returns the correct values."""
# test empty string
self.assertTrue(renderers.isprintable(''))
# test single printable characters
self.assertTrue(renderers.isprintable('a'))
self.assertTrue(renderers.isprintable('#'))
self.assertTrue(renderers.isprintable('!'))
self.assertTrue(renderers.isprintable('|'))
# test multicharacter strings
self.assertTrue(renderers.isprintable('aasfd'))
self.assertTrue(renderers.isprintable('a*&($#asdf!'))
# test nonprintable chars
self.assertFalse(renderers.isprintable('\x0e'))
self.assertFalse(renderers.isprintable('afj\x0eqq'))
class PDFRendererTest(unittest.TestCase):
"""Tests for deepfigures.renderers.PDFRenderer.
Since PDFRenderer is only meant as a base class for classes that
actually use a rendering backend, most of it's functionality is
tested through tests of it's subclasses (GhostScriptRenderer).
"""
def test_init(self):
"""Test init asserts RENDERING_ENGINE_NAME exists."""
with self.assertRaises(AssertionError):
renderers.PDFRenderer()
class PDFRendererSubclassTestMixin(object):
"""A mixin for making tests of PDFRenderer subclasses.
Usage
-----
To test a PDFRenderer, mix this class into a unittest.TestCase
subclass, provide PDF_RENDERER and MANUALLY_INSPECTED_RENDERINGS_DIR
class attributes on that subclass, and render / manually inspect
images of each page for
deepfigures/tests/data/pdfrenderer/paper.pdf.
PDF_RENDERER should be an instance of the pdf renderer class you
wish to test, and MANUALLY_INSPECTED_RENDERINGS_DIR should be a
directory containing renderings using PDF_RENDERER that have been
manually inspected and match the paths in
deepfigures/tests/data/pdfrenderer/pdfbox-renderings/.
Example
-------
class GhostScriptRendererTest(
PDFRendererSubclassTestMixin,
unittest.TestCase):
'''... documentation ...'''
PDF_RENDERER = GhostScriptRenderer()
MANUALLY_INSPECTED_RENDERINGS_DIR = os.path.join(
settings.TEST_DATA_DIR,
'pdfrenderer/ghostscript-renderings/')
def ghostscript_renderer_specific_test(self):
...
"""
PDF_RENDERER = None
MANUALLY_INSPECTED_RENDERINGS_DIR = None
def mixin_test_setup(self, ext):
"""Set up for unittests.
Parameters
----------
:param str ext: 'png' or 'jpg', the extension for the image type
for which you wish to setup a test.
"""
# implement this test setup as a method that is explicitly
# called rather than trying to use setUp from unittest.TestCase
# because we don't want to require users to call super in their
# setUp methods.
self.pdf_renderer = self.PDF_RENDERER
self.pdf_path = os.path.join(
settings.TEST_DATA_DIR,
'pdfrenderer/paper.pdf')
self.pdf_num_pages = 6
self.pdf_rendered_page_template = \
'paper.pdf-dpi100-page{page_num:04d}.{ext}'
# add random bits to the path so that separate instances
# of this test writing in parallel don't collide.
self.tmp_output_dir = tempfile.mkdtemp()
self.expected_dir_structure = [
os.path.join(
self.tmp_output_dir,
'paper.pdf-images',
self.pdf_renderer.RENDERING_ENGINE_NAME,
'dpi{}'.format(settings.DEFAULT_INFERENCE_DPI),
'_SUCCESS')
]
self.expected_dir_structure.extend([
os.path.join(
self.tmp_output_dir,
'paper.pdf-images/',
self.pdf_renderer.RENDERING_ENGINE_NAME,
'dpi{}'.format(settings.DEFAULT_INFERENCE_DPI),
self.pdf_rendered_page_template.format(
page_num=i, ext=ext))
for i in range(1, 7)
])
def mixin_test_teardown(self):
"""Tear down for unittests."""
shutil.rmtree(self.tmp_output_dir)
@contextlib.contextmanager
def setup_and_teardown(self, ext):
"""Setup and tear down resources for a test as a context manager.
Parameters
----------
:param str ext: either 'png' or 'jpg', the type of image for
which you want to write the test.
"""
try:
self.mixin_test_setup(ext=ext)
yield
finally:
self.mixin_test_teardown()
def _test_render_image_ext(self, ext):
"""Test the render method with a png extension."""
self.pdf_renderer.render(
pdf_path=self.pdf_path,
output_dir=self.tmp_output_dir,
ext=ext,
check_retcode=True)
# check that all and only the expected paths are in the output
# dir
output_dir_paths = [
os.path.join(dir_path, file_name)
for dir_path, dir_names, file_names in os.walk(
self.tmp_output_dir)
for file_name in file_names
]
self.assertEqual(
sorted(output_dir_paths),
sorted(self.expected_dir_structure))
# since it's a little complicated to debug bad renderings,
# provide a useful help message.
bad_render_help_msg = (
"\n"
"\n HINT!: Use the render method on {pdf_renderer} to generate"
"\n and inspect renderered output, and if the rendered"
"\n output looks good move it into "
"\n ``{renderings_dir}`` in place of"
"\n the existing files. If using docker you'll need to run"
"\n the following command after mounting ``/tmp`` as a volume:"
"\n"
"\n python3 -c 'from deepfigures.extraction import renderers;"
" renderers.{pdf_renderer}().render("
"\"tests/data/pdfrenderer/paper.pdf\","
" \"/tmp/\","
" ext=\"{ext}\","
" use_cache=False)'".format(
renderings_dir=self.MANUALLY_INSPECTED_RENDERINGS_DIR,
pdf_renderer=self.pdf_renderer.__class__.__name__,
ext=ext))
# compare the renderings against manually inspected renderings
for path in output_dir_paths:
if path[-3:] == ext:
test_image = imread(path)
reference_image = imread(
os.path.join(
self.MANUALLY_INSPECTED_RENDERINGS_DIR,
os.path.split(path)[-1]))
# test that the average absolute difference between the pixels is
# less than 5.
self.assertLess(
np.sum(np.abs(test_image - reference_image)) / test_image.size, 5.0,
msg=bad_render_help_msg)
def test_render_png(self):
"""Test the render method with a png extension."""
ext = 'png'
with self.setup_and_teardown(ext=ext):
self._test_render_image_ext(ext=ext)
def test_render_jpg(self):
"""Test the render method with a jpg extension."""
ext = 'jpg'
with self.setup_and_teardown(ext=ext):
self._test_render_image_ext(ext=ext)
def test_uses_cache(self):
"""Test that the rendered uses existing copies of the files."""
ext = 'png'
with self.setup_and_teardown(ext=ext):
self.pdf_renderer.render(
pdf_path=self.pdf_path,
output_dir=self.tmp_output_dir,
ext=ext,
check_retcode=True)
output_dir_paths = [
os.path.join(dir_path, file_name)
for dir_path, dir_names, file_names in os.walk(
self.tmp_output_dir)
for file_name in file_names
]
mtimes = {}
for path in output_dir_paths:
mtimes[path] = os.path.getmtime(path)
time.sleep(1)
# render the PDF again and verify the mtimes haven't changed
self.pdf_renderer.render(
pdf_path=self.pdf_path,
output_dir=self.tmp_output_dir,
ext=ext,
check_retcode=True)
output_dir_paths = [
os.path.join(dir_path, file_name)
for dir_path, dir_names, file_names in os.walk(
self.tmp_output_dir)
for file_name in file_names
]
for path in output_dir_paths:
self.assertEqual(mtimes[path], os.path.getmtime(path))
def test_busts_cache(self):
"""Test that passing use_cache False busts the cache."""
ext = 'png'
with self.setup_and_teardown(ext=ext):
self.pdf_renderer.render(
pdf_path=self.pdf_path,
output_dir=self.tmp_output_dir,
ext=ext,
check_retcode=True)
output_dir_paths = [
os.path.join(dir_path, file_name)
for dir_path, dir_names, file_names in os.walk(
self.tmp_output_dir)
for file_name in file_names
]
mtimes = {}
for path in output_dir_paths:
mtimes[path] = os.path.getmtime(path)
# render the PDF again and verify the mtimes have changed
time.sleep(1)
self.pdf_renderer.render(
pdf_path=self.pdf_path,
output_dir=self.tmp_output_dir,
ext=ext,
use_cache=False,
check_retcode=True)
output_dir_paths = [
os.path.join(dir_path, file_name)
for dir_path, dir_names, file_names in os.walk(
self.tmp_output_dir)
for file_name in file_names
]
for path in output_dir_paths:
if path[-3:] == 'png' or path[-8:] == '_SUCCESS':
self.assertNotEqual(
mtimes[path],
os.path.getmtime(path),
msg="{path} mtime did not change.".format(path=path))
class GhostScriptRendererTest(
PDFRendererSubclassTestMixin,
unittest.TestCase):
"""Test deepfigures.renderers.GhostScriptRenderer."""
PDF_RENDERER = renderers.GhostScriptRenderer()
MANUALLY_INSPECTED_RENDERINGS_DIR = os.path.join(
settings.TEST_DATA_DIR,
'pdfrenderer/ghostscript-renderings/')
| deepfigures-open-master | deepfigures/extraction/tests/test_renderers.py |
"""Test deepfigures.extraction.pipeline"""
import logging
import tempfile
import unittest
from deepfigures.extraction import pipeline
from deepfigures.utils import test
logger = logging.getLogger(__name__)
class TestFigureExtractionPipeline(unittest.TestCase):
"""Test ``FigureExtractionPipeline``."""
def test_extract(self):
"""Test extract against a known extraction."""
pdf_path = "/work/tests/data/endtoend/paper.pdf"
figure_extractor = pipeline.FigureExtractionPipeline()
with tempfile.TemporaryDirectory() as tmp_dir:
figure_extraction = figure_extractor.extract(
pdf_path, tmp_dir)
test.test_deepfigures_json(
self,
expected_json='/work/tests/data/endtoend/_work_tests_data_endtoend_paper.pdf-result.json',
actual_json=figure_extraction.deepfigures_json_path)
| deepfigures-open-master | deepfigures/extraction/tests/test_pipeline.py |
#!/usr/bin/env python
import sys
from setuptools import setup, Extension, find_packages
tf_include = '/'.join(sys.executable.split('/')[:-2]) + \
'/lib/python%d.%d/site-packages/tensorflow/include' % sys.version_info[:2]
import os
extra_defs = []
if os.uname().sysname == 'Darwin':
extra_defs.append('-D_GLIBCXX_USE_CXX11_ABI=0')
else:
os.environ['CC'] = 'g++'
os.environ['CXX'] = 'g++'
setup(
name='tensorboxresnet',
version='0.20',
packages=find_packages(),
setup_requires=['Cython'],
ext_modules=[
Extension(
'tensorboxresnet.utils.stitch_wrapper',
[
'./tensorboxresnet/utils/stitch_wrapper.pyx',
'./tensorboxresnet/utils/stitch_rects.cpp',
'./tensorboxresnet/utils/hungarian/hungarian.cpp'
],
language='c++',
extra_compile_args=[
'-std=c++11', '-Itensorbox/utils',
'-I%s' % tf_include
] + extra_defs,
)
]
)
| deepfigures-open-master | vendor/tensorboxresnet/setup.py |
deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/__init__.py |
|
#!/usr/bin/env python
import os
import json
import tensorflow.contrib.slim as slim
import datetime
import random
import time
import argparse
import os
import threading
from scipy import misc
import tensorflow as tf
import numpy as np
from distutils.version import LooseVersion
if LooseVersion(tf.__version__) >= LooseVersion('1.0'):
rnn_cell = tf.contrib.rnn
else:
try:
from tensorflow.models.rnn import rnn_cell
except ImportError:
rnn_cell = tf.nn.rnn_cell
random.seed(0)
np.random.seed(0)
from tensorboxresnet.utils import train_utils, googlenet_load, tf_concat
def build_overfeat_inner(H, lstm_input):
'''
build simple overfeat decoder
'''
if H['rnn_len'] > 1:
raise ValueError('rnn_len > 1 only supported with use_lstm == True')
outputs = []
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('Overfeat', initializer=initializer):
w = tf.get_variable(
'ip', shape=[H['later_feat_channels'], H['lstm_size']]
)
outputs.append(tf.matmul(lstm_input, w))
return outputs
def deconv(x, output_shape, channels):
k_h = 2
k_w = 2
w = tf.get_variable(
'w_deconv',
initializer=tf.random_normal_initializer(stddev=0.01),
shape=[k_h, k_w, channels[1], channels[0]]
)
y = tf.nn.conv2d_transpose(
x, w, output_shape, strides=[1, k_h, k_w, 1], padding='VALID'
)
return y
def rezoom(
H, pred_boxes, early_feat, early_feat_channels, w_offsets, h_offsets
):
'''
Rezoom into a feature map at multiple interpolation points in a grid.
If the predicted object center is at X, len(w_offsets) == 3, and len(h_offsets) == 5,
the rezoom grid will look as follows:
[o o o]
[o o o]
[o X o]
[o o o]
[o o o]
Where each letter indexes into the feature map with bilinear interpolation
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
indices = []
for w_offset in w_offsets:
for h_offset in h_offsets:
indices.append(
train_utils.bilinear_select(
H, pred_boxes, early_feat, early_feat_channels, w_offset,
h_offset
)
)
interp_indices = tf_concat(0, indices)
rezoom_features = train_utils.interp(
early_feat, interp_indices, early_feat_channels
)
rezoom_features_r = tf.reshape(
rezoom_features, [
len(w_offsets) * len(h_offsets), outer_size, H['rnn_len'],
early_feat_channels
]
)
rezoom_features_t = tf.transpose(rezoom_features_r, [1, 2, 0, 3])
return tf.reshape(
rezoom_features_t, [
outer_size, H['rnn_len'],
len(w_offsets) * len(h_offsets) * early_feat_channels
]
)
def build_forward(H, x, phase, reuse):
'''
Construct the forward model
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
input_mean = 117.
x -= input_mean
cnn, early_feat = googlenet_load.model(x, H, reuse)
early_feat_channels = H['early_feat_channels']
early_feat = early_feat[:, :, :, :early_feat_channels]
if H['deconv']:
size = 3
stride = 2
pool_size = 5
with tf.variable_scope("deconv", reuse=reuse):
w = tf.get_variable(
'conv_pool_w',
shape=[
size, size, H['later_feat_channels'],
H['later_feat_channels']
],
initializer=tf.random_normal_initializer(stddev=0.01)
)
cnn_s = tf.nn.conv2d(
cnn, w, strides=[1, stride, stride, 1], padding='SAME'
)
cnn_s_pool = tf.nn.avg_pool(
cnn_s[:, :, :, :256],
ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1],
padding='SAME'
)
cnn_s_with_pool = tf_concat(3, [cnn_s_pool, cnn_s[:, :, :, 256:]])
cnn_deconv = deconv(
cnn_s_with_pool,
output_shape=[
H['batch_size'], H['grid_height'], H['grid_width'], 256
],
channels=[H['later_feat_channels'], 256]
)
cnn = tf_concat(3, (cnn_deconv, cnn[:, :, :, 256:]))
elif H['avg_pool_size'] > 1:
pool_size = H['avg_pool_size']
cnn1 = cnn[:, :, :, :700]
cnn2 = cnn[:, :, :, 700:]
cnn2 = tf.nn.avg_pool(
cnn2,
ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1],
padding='SAME'
)
cnn = tf_concat(3, [cnn1, cnn2])
cnn = tf.reshape(
cnn, [
H['batch_size'] * H['grid_width'] * H['grid_height'],
H['later_feat_channels']
]
)
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('decoder', reuse=reuse, initializer=initializer):
scale_down = 0.01
lstm_input = tf.reshape(
cnn * scale_down,
(H['batch_size'] * grid_size, H['later_feat_channels'])
)
if H['use_lstm']:
lstm_outputs = build_lstm_inner(H, lstm_input)
else:
lstm_outputs = build_overfeat_inner(H, lstm_input)
pred_boxes = []
pred_logits = []
for k in range(H['rnn_len']):
output = lstm_outputs[k]
if phase == 'train':
output = tf.nn.dropout(output, 0.5)
box_weights = tf.get_variable(
'box_ip%d' % k, shape=(H['lstm_size'], 4)
)
conf_weights = tf.get_variable(
'conf_ip%d' % k, shape=(H['lstm_size'], H['num_classes'])
)
pred_boxes_step = tf.reshape(
tf.matmul(output, box_weights) * 50, [outer_size, 1, 4]
)
pred_boxes.append(pred_boxes_step)
pred_logits.append(
tf.reshape(
tf.matmul(output, conf_weights),
[outer_size, 1, H['num_classes']]
)
)
pred_boxes = tf_concat(1, pred_boxes)
pred_logits = tf_concat(1, pred_logits)
pred_logits_squash = tf.reshape(
pred_logits, [outer_size * H['rnn_len'], H['num_classes']]
)
pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
pred_confidences = tf.reshape(
pred_confidences_squash,
[outer_size, H['rnn_len'], H['num_classes']]
)
if H['use_rezoom']:
pred_confs_deltas = []
pred_boxes_deltas = []
w_offsets = H['rezoom_w_coords']
h_offsets = H['rezoom_h_coords']
num_offsets = len(w_offsets) * len(h_offsets)
rezoom_features = rezoom(
H, pred_boxes, early_feat, early_feat_channels, w_offsets,
h_offsets
)
if phase == 'train':
rezoom_features = tf.nn.dropout(rezoom_features, 0.5)
for k in range(H['rnn_len']):
delta_features = tf_concat(
1, [lstm_outputs[k], rezoom_features[:, k, :] / 1000.]
)
dim = 128
delta_weights1 = tf.get_variable(
'delta_ip1%d' % k,
shape=[
H['lstm_size'] + early_feat_channels * num_offsets, dim
]
)
ip1 = tf.nn.relu(tf.matmul(delta_features, delta_weights1))
if phase == 'train':
ip1 = tf.nn.dropout(ip1, 0.5)
delta_confs_weights = tf.get_variable(
'delta_ip2%d' % k, shape=[dim, H['num_classes']]
)
if H['reregress']:
delta_boxes_weights = tf.get_variable(
'delta_ip_boxes%d' % k, shape=[dim, 4]
)
pred_boxes_deltas.append(
tf.reshape(
tf.matmul(ip1, delta_boxes_weights) * 5,
[outer_size, 1, 4]
)
)
scale = H.get('rezoom_conf_scale', 50)
pred_confs_deltas.append(
tf.reshape(
tf.matmul(ip1, delta_confs_weights) * scale,
[outer_size, 1, H['num_classes']]
)
)
pred_confs_deltas = tf_concat(1, pred_confs_deltas)
if H['reregress']:
pred_boxes_deltas = tf_concat(1, pred_boxes_deltas)
return pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas
return pred_boxes, pred_logits, pred_confidences
def build_forward_backward(H, x, phase, boxes, flags):
'''
Call build_forward() and then setup the loss functions
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
reuse = {'train': None, 'test': True}[phase]
if H['use_rezoom']:
(
pred_boxes, pred_logits, pred_confidences, pred_confs_deltas,
pred_boxes_deltas
) = build_forward(H, x, phase, reuse)
else:
pred_boxes, pred_logits, pred_confidences = build_forward(
H, x, phase, reuse
)
with tf.variable_scope(
'decoder', reuse={'train': None,
'test': True}[phase]
):
outer_boxes = tf.reshape(boxes, [outer_size, H['rnn_len'], 4])
outer_flags = tf.cast(
tf.reshape(flags, [outer_size, H['rnn_len']]), 'int32'
)
if H['use_lstm']:
hungarian_module = tf.load_op_library(
'utils/hungarian/hungarian.so'
)
assignments, classes, perm_truth, pred_mask = (
hungarian_module.hungarian(
pred_boxes, outer_boxes, outer_flags,
H['solver']['hungarian_iou']
)
)
else:
classes = tf.reshape(flags, (outer_size, 1))
perm_truth = tf.reshape(outer_boxes, (outer_size, 1, 4))
pred_mask = tf.reshape(
tf.cast(tf.greater(classes, 0), 'float32'), (outer_size, 1, 1)
)
true_classes = tf.reshape(
tf.cast(tf.greater(classes, 0), 'int64'),
[outer_size * H['rnn_len']]
)
pred_logit_r = tf.reshape(
pred_logits, [outer_size * H['rnn_len'], H['num_classes']]
)
confidences_loss = (
tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pred_logit_r, labels=true_classes
)
)
) / outer_size * H['solver']['head_weights'][0]
residual = tf.reshape(
perm_truth - pred_boxes * pred_mask, [outer_size, H['rnn_len'], 4]
)
boxes_loss = tf.reduce_sum(
tf.abs(residual)
) / outer_size * H['solver']['head_weights'][1]
if H['use_rezoom']:
if H['rezoom_change_loss'] == 'center':
error = (perm_truth[:, :, 0:2] - pred_boxes[:, :, 0:2]
) / tf.maximum(perm_truth[:, :, 2:4], 1.)
square_error = tf.reduce_sum(tf.square(error), 2)
inside = tf.reshape(
tf.to_int64(
tf.logical_and(
tf.less(square_error, 0.2**2),
tf.greater(classes, 0)
)
), [-1]
)
elif H['rezoom_change_loss'] == 'iou':
iou = train_utils.iou(
train_utils.to_x1y1x2y2(tf.reshape(pred_boxes, [-1, 4])),
train_utils.to_x1y1x2y2(tf.reshape(perm_truth, [-1, 4]))
)
inside = tf.reshape(tf.to_int64(tf.greater(iou, 0.5)), [-1])
else:
assert H['rezoom_change_loss'] == False
inside = tf.reshape(
tf.to_int64((tf.greater(classes, 0))), [-1]
)
new_confs = tf.reshape(
pred_confs_deltas,
[outer_size * H['rnn_len'], H['num_classes']]
)
delta_confs_loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=new_confs, labels=inside
)
) / outer_size * H['solver']['head_weights'][0] * 0.1
pred_logits_squash = tf.reshape(
new_confs, [outer_size * H['rnn_len'], H['num_classes']]
)
pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
pred_confidences = tf.reshape(
pred_confidences_squash,
[outer_size, H['rnn_len'], H['num_classes']]
)
loss = confidences_loss + boxes_loss + delta_confs_loss
if H['reregress']:
delta_residual = tf.reshape(
perm_truth - (pred_boxes + pred_boxes_deltas) * pred_mask,
[outer_size, H['rnn_len'], 4]
)
delta_boxes_loss = (
tf.reduce_sum(
tf.minimum(tf.square(delta_residual), 10.**2)
) / outer_size * H['solver']['head_weights'][1] * 0.03
)
boxes_loss = delta_boxes_loss
tf.summary.histogram(
phase + '/delta_hist0_x', pred_boxes_deltas[:, 0, 0]
)
tf.summary.histogram(
phase + '/delta_hist0_y', pred_boxes_deltas[:, 0, 1]
)
tf.summary.histogram(
phase + '/delta_hist0_w', pred_boxes_deltas[:, 0, 2]
)
tf.summary.histogram(
phase + '/delta_hist0_h', pred_boxes_deltas[:, 0, 3]
)
loss += delta_boxes_loss
else:
loss = confidences_loss + boxes_loss
return pred_boxes, pred_confidences, loss, confidences_loss, boxes_loss
def build(H, q):
'''
Build full model for training, including forward / backward passes,
optimizers, and summary statistics.
'''
arch = H
solver = H["solver"]
os.environ['CUDA_VISIBLE_DEVICES'] = str(solver.get('gpu', ''))
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
learning_rate = tf.placeholder(tf.float32)
if solver['opt'] == 'RMS':
opt = tf.train.RMSPropOptimizer(
learning_rate=learning_rate, decay=0.9, epsilon=solver['epsilon']
)
elif solver['opt'] == 'Adam':
opt = tf.train.AdamOptimizer(
learning_rate=learning_rate, epsilon=solver['epsilon']
)
elif solver['opt'] == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
else:
raise ValueError('Unrecognized opt type')
loss, accuracy, confidences_loss, boxes_loss = {}, {}, {}, {}
for phase in ['train', 'test']:
# generate predictions and losses from forward pass
x, confidences, boxes = q[phase].dequeue_many(arch['batch_size'])
flags = tf.argmax(confidences, 3)
grid_size = H['grid_width'] * H['grid_height']
(
pred_boxes, pred_confidences, loss[phase], confidences_loss[phase],
boxes_loss[phase]
) = build_forward_backward(H, x, phase, boxes, flags)
pred_confidences_r = tf.reshape(
pred_confidences,
[H['batch_size'], grid_size, H['rnn_len'], arch['num_classes']]
)
pred_boxes_r = tf.reshape(
pred_boxes, [H['batch_size'], grid_size, H['rnn_len'], 4]
)
# Set up summary operations for tensorboard
a = tf.equal(
tf.argmax(confidences[:, :, 0, :], 2),
tf.argmax(pred_confidences_r[:, :, 0, :], 2)
)
accuracy[phase] = tf.reduce_mean(
tf.cast(a, 'float32'), name=phase + '/accuracy'
)
if phase == 'train':
global_step = tf.Variable(0, trainable=False)
tvars = tf.trainable_variables()
if H['clip_norm'] <= 0:
grads = tf.gradients(loss['train'], tvars)
else:
grads, norm = tf.clip_by_global_norm(
tf.gradients(loss['train'], tvars), H['clip_norm']
)
train_op = opt.apply_gradients(
zip(grads, tvars), global_step=global_step
)
elif phase == 'test':
moving_avg = tf.train.ExponentialMovingAverage(0.95)
smooth_op = moving_avg.apply(
[
accuracy['train'],
accuracy['test'],
confidences_loss['train'],
boxes_loss['train'],
confidences_loss['test'],
boxes_loss['test'],
]
)
for p in ['train', 'test']:
tf.summary.scalar('%s/accuracy' % p, accuracy[p])
tf.summary.scalar(
'%s/accuracy/smooth' % p, moving_avg.average(accuracy[p])
)
tf.summary.scalar(
"%s/confidences_loss" % p, confidences_loss[p]
)
tf.summary.scalar(
"%s/confidences_loss/smooth" % p,
moving_avg.average(confidences_loss[p])
)
tf.summary.scalar("%s/regression_loss" % p, boxes_loss[p])
tf.summary.scalar(
"%s/regression_loss/smooth" % p,
moving_avg.average(boxes_loss[p])
)
if phase == 'test':
test_image = x
# show ground truth to verify labels are correct
test_true_confidences = confidences[0, :, :, :]
test_true_boxes = boxes[0, :, :, :]
# show predictions to visualize training progress
test_pred_confidences = pred_confidences_r[0, :, :, :]
test_pred_boxes = pred_boxes_r[0, :, :, :]
def log_image(
np_img, np_confidences, np_boxes, np_global_step, pred_or_true
):
if np_img.shape[2] == 4:
np_img = np_img[:, :, [0, 1, 3]]
merged = train_utils.add_rectangles(
H,
np_img,
np_confidences,
np_boxes,
use_stitching=True,
rnn_len=H['rnn_len']
)[0]
num_images = 5000
img_path = os.path.join(
H['save_dir'], '%s_%s.jpg' % (
(np_global_step / H['logging']['display_iter']
) % num_images, pred_or_true
)
)
misc.imsave(img_path, merged)
return merged
pred_log_img = tf.py_func(
log_image, [
test_image, test_pred_confidences, test_pred_boxes,
global_step, 'pred'
], [tf.float32]
)
true_log_img = tf.py_func(
log_image, [
test_image, test_true_confidences, test_true_boxes,
global_step, 'true'
], [tf.float32]
)
tf.summary.image(
phase + '/pred_boxes', pred_log_img, max_outputs=10
)
tf.summary.image(
phase + '/true_boxes', true_log_img, max_outputs=10
)
summary_op = tf.summary.merge_all()
return (
config, loss, accuracy, summary_op, train_op, smooth_op, global_step,
learning_rate
)
def train(H, test_images):
'''
Setup computation graph, run 2 prefetch data threads, and then run the main loop
'''
if not os.path.exists(H['save_dir']): os.makedirs(H['save_dir'])
ckpt_file = H['save_dir'] + '/save.ckpt'
with open(H['save_dir'] + '/hypes.json', 'w') as f:
json.dump(H, f, indent=4)
x_in = tf.placeholder(tf.float32)
confs_in = tf.placeholder(tf.float32)
boxes_in = tf.placeholder(tf.float32)
q = {}
enqueue_op = {}
for phase in ['train', 'test']:
dtypes = [tf.float32, tf.float32, tf.float32]
grid_size = H['grid_width'] * H['grid_height']
channels = H.get('image_channels', 3)
print('Image channels: %d' % channels)
shapes = (
[H['image_height'], H['image_width'],
channels], [grid_size, H['rnn_len'], H['num_classes']],
[grid_size, H['rnn_len'], 4],
)
q[phase] = tf.FIFOQueue(capacity=30, dtypes=dtypes, shapes=shapes)
enqueue_op[phase] = q[phase].enqueue((x_in, confs_in, boxes_in))
def make_feed(d):
return {
x_in: d['image'],
confs_in: d['confs'],
boxes_in: d['boxes'],
learning_rate: H['solver']['learning_rate']
}
def thread_loop(sess, enqueue_op, phase, gen):
for d in gen:
sess.run(enqueue_op[phase], feed_dict=make_feed(d))
(
config, loss, accuracy, summary_op, train_op, smooth_op, global_step,
learning_rate
) = build(H, q)
saver = tf.train.Saver(max_to_keep=None)
writer = tf.summary.FileWriter(logdir=H['save_dir'], flush_secs=10)
with tf.Session(config=config) as sess:
tf.train.start_queue_runners(sess=sess)
for phase in ['train', 'test']:
# enqueue once manually to avoid thread start delay
gen = train_utils.load_data_gen(
H, phase, jitter=H['solver']['use_jitter']
)
d = next(gen)
sess.run(enqueue_op[phase], feed_dict=make_feed(d))
t = threading.Thread(
target=thread_loop, args=(sess, enqueue_op, phase, gen)
)
t.daemon = True
t.start()
tf.set_random_seed(H['solver']['rnd_seed'])
sess.run(tf.global_variables_initializer())
writer.add_graph(sess.graph)
weights_str = H['solver']['weights']
if len(weights_str) > 0:
print('Restoring from: %s' % weights_str)
saver.restore(sess, weights_str)
elif H['slim_ckpt'] == '':
sess.run(
tf.variables_initializer(
[
x for x in tf.global_variables()
if x.name.startswith(H['slim_basename']) and
H['solver']['opt'] not in x.name
]
)
)
else:
init_fn = slim.assign_from_checkpoint_fn(
'%s/data/%s' %
(os.path.dirname(os.path.realpath(__file__)),
H['slim_ckpt']), [
x for x in tf.global_variables()
if x.name.startswith(H['slim_basename']) and
H['solver']['opt'] not in x.name
]
)
init_fn(sess)
# train model for N iterations
start = time.time()
max_iter = H['solver'].get('max_iter', 10000000)
for i in range(max_iter):
display_iter = H['logging']['display_iter']
adjusted_lr = (
H['solver']['learning_rate'] * 0.5**
max(0, (i / H['solver']['learning_rate_step']) - 2)
)
lr_feed = {learning_rate: adjusted_lr}
if i % display_iter != 0:
# train network
batch_loss_train, _ = sess.run(
[loss['train'], train_op], feed_dict=lr_feed
)
else:
# test network every N iterations; log additional info
if i > 0:
dt = (time.time() - start
) / (H['batch_size'] * display_iter)
start = time.time()
(train_loss, test_accuracy, summary_str, _, _) = sess.run(
[
loss['train'],
accuracy['test'],
summary_op,
train_op,
smooth_op,
],
feed_dict=lr_feed
)
writer.add_summary(summary_str, global_step=global_step.eval())
print_str = ', '.join(
[
'Step: %d',
'lr: %f',
'Train Loss: %.2f',
'Softmax Test Accuracy: %.1f%%',
'Time/image (ms): %.1f',
]
)
print(
print_str % (
i, adjusted_lr, train_loss, test_accuracy * 100,
dt * 1000 if i > 0 else 0
)
)
if global_step.eval() % H['logging'][
'save_iter'
] == 0 or global_step.eval() == max_iter - 1:
saver.save(sess, ckpt_file, global_step=global_step)
def main():
'''
Parse command line arguments and return the hyperparameter dictionary H.
H first loads the --hypes hypes.json file and is further updated with
additional arguments as needed.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default=None, type=str)
parser.add_argument('--gpu', default=None, type=int)
parser.add_argument('--hypes', required=True, type=str)
parser.add_argument('--max_iter', required=False, type=int, default=None)
parser.add_argument('--logdir', default='output', type=str)
args = parser.parse_args()
with open(args.hypes, 'r') as f:
H = json.load(f)
if args.gpu is not None:
H['solver']['gpu'] = args.gpu
if args.max_iter is not None:
H['solver']['max_iter'] = args.max_iter
if len(H.get('exp_name', '')) == 0:
H['exp_name'] = args.hypes.split('/')[-1].replace('.json', '')
H['save_dir'] = args.logdir + '/%s_%s' % (
H['exp_name'], datetime.datetime.now().strftime('%Y_%m_%d_%H.%M')
)
if args.weights is not None:
H['solver']['weights'] = args.weights
train(H, test_images=[])
if __name__ == '__main__':
main()
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/train.py |
from tensorboxresnet.utils.slim_nets import inception_v1 as inception
from tensorboxresnet.utils.slim_nets import resnet_v1 as resnet
import tensorflow.contrib.slim as slim
def model(x, H, reuse, is_training=True):
if H['slim_basename'] == 'resnet_v1_101':
with slim.arg_scope(resnet.resnet_arg_scope()):
_, T = resnet.resnet_v1_101(
x, is_training=is_training, num_classes=1000, reuse=reuse
)
elif H['slim_basename'] == 'InceptionV1':
with slim.arg_scope(inception.inception_v1_arg_scope()):
_, T = inception.inception_v1(
x,
is_training=is_training,
num_classes=1001,
spatial_squeeze=False,
reuse=reuse
)
#print '\n'.join(map(str, [(k, v.op.outputs[0].get_shape()) for k, v in T.iteritems()]))
coarse_feat = T[H['slim_top_lname']][:, :, :, :H['later_feat_channels']]
assert coarse_feat.op.outputs[0].get_shape()[3] == H['later_feat_channels']
# fine feat can be used to reinspect input
attention_lname = H.get('slim_attention_lname', 'Mixed_3b')
early_feat = T[attention_lname]
return coarse_feat, early_feat
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/googlenet_load.py |
import numpy as np
import random
import os
import cv2
import itertools
import tensorflow as tf
import multiprocessing
import multiprocessing.pool
import queue
from tensorboxresnet.utils.data_utils import (
annotation_jitter, annotation_to_h5
)
from tensorboxresnet.utils.annolist import AnnotationLib as al
from tensorboxresnet.utils.rect import Rect
from tensorboxresnet.utils import tf_concat
from tensorboxresnet.utils.stitch_wrapper import stitch_rects
import functools
from deepfigures.utils import image_util
tensor_queue = multiprocessing.Queue(maxsize=8)
def rescale_boxes(current_shape, anno, target_height, target_width):
x_scale = target_width / float(current_shape[1])
y_scale = target_height / float(current_shape[0])
for r in anno.rects:
assert r.x1 < r.x2
r.x1 *= x_scale
r.x2 *= x_scale
assert r.y1 < r.y2
r.y1 *= y_scale
r.y2 *= y_scale
return anno
def load_idl_tf(idlfile, H, jitter):
"""Take the idlfile and net configuration and create a generator
that outputs a jittered version of a random image from the annolist
that is mean corrected."""
annolist = al.parse(idlfile)
annos = []
for anno in annolist:
anno.imageName = os.path.join(
os.path.dirname(os.path.realpath(idlfile)), anno.imageName
)
annos.append(anno)
random.seed(0)
if H['data']['truncate_data']:
annos = annos[:10]
for epoch in itertools.count():
print('Starting epoch %d' % epoch)
random.shuffle(annos)
partial_load = functools.partial(
load_page_ann, H=H, epoch=epoch, jitter=jitter
)
with multiprocessing.pool.ThreadPool(processes=4) as p:
map_result = p.map_async(partial_load, annos)
while not map_result.ready():
try:
yield tensor_queue.get(timeout=100)
except queue.Empty:
pass
while not tensor_queue.empty():
yield tensor_queue.get()
def load_page_ann(anno, H, epoch, jitter) -> None:
try:
I = image_util.read_tensor(anno.imageName, maxsize=1e8)
except image_util.FileTooLargeError:
print('ERROR: %s too large' % anno.imageName, flush=True)
return
if I is None:
print("ERROR: Failure reading %s" % anno.imageName, flush=True)
return
assert (len(I.shape) == 3)
if I.shape[0] != H["image_height"] or I.shape[1] != H["image_width"]:
if epoch == 0:
anno = rescale_boxes(
I.shape, anno, H["image_height"], H["image_width"]
)
I = image_util.imresize_multichannel(
I, (H["image_height"], H["image_width"]), interp='cubic'
)
if jitter:
jitter_scale_min = 0.9
jitter_scale_max = 1.1
jitter_offset = 16
I, anno = annotation_jitter(
I,
anno,
target_width=H["image_width"],
target_height=H["image_height"],
jitter_scale_min=jitter_scale_min,
jitter_scale_max=jitter_scale_max,
jitter_offset=jitter_offset
)
boxes, flags = annotation_to_h5(
H, anno, H["grid_width"], H["grid_height"], H["rnn_len"]
)
tensor_queue.put({"image": I, "boxes": boxes, "flags": flags})
def make_sparse(n, d):
v = np.zeros((d,), dtype=np.float32)
v[n] = 1.
return v
def load_data_gen(H, phase, jitter):
grid_size = H['grid_width'] * H['grid_height']
data = load_idl_tf(
H["data"]['%s_idl' % phase],
H,
jitter={'train': jitter,
'test': False}[phase]
)
for d in data:
output = {}
rnn_len = H["rnn_len"]
flags = d['flags'][0, :, 0, 0:rnn_len, 0]
boxes = np.transpose(d['boxes'][0, :, :, 0:rnn_len, 0], (0, 2, 1))
assert (flags.shape == (grid_size, rnn_len))
assert (boxes.shape == (grid_size, rnn_len, 4))
output['image'] = d['image']
output['confs'] = np.array(
[
[
make_sparse(int(detection), d=H['num_classes'])
for detection in cell
] for cell in flags
]
)
output['boxes'] = boxes
output['flags'] = flags
yield output
def add_rectangles(
H,
orig_image,
confidences,
boxes,
use_stitching=False,
rnn_len=1,
min_conf=0.1,
show_removed=True,
tau=0.25,
show_suppressed=True
):
image = np.copy(orig_image[0])
num_cells = H["grid_height"] * H["grid_width"]
boxes_r = np.reshape(
boxes, (-1, H["grid_height"], H["grid_width"], rnn_len, 4)
)
confidences_r = np.reshape(
confidences,
(-1, H["grid_height"], H["grid_width"], rnn_len, H['num_classes'])
)
cell_pix_size = H['region_size']
all_rects = [
[[] for _ in range(H["grid_width"])] for _ in range(H["grid_height"])
]
for n in range(rnn_len):
for y in range(H["grid_height"]):
for x in range(H["grid_width"]):
bbox = boxes_r[0, y, x, n, :]
abs_cx = int(bbox[0]) + cell_pix_size / 2 + cell_pix_size * x
abs_cy = int(bbox[1]) + cell_pix_size / 2 + cell_pix_size * y
w = bbox[2]
h = bbox[3]
conf = np.max(confidences_r[0, y, x, n, 1:])
all_rects[y][x].append(Rect(abs_cx, abs_cy, w, h, conf))
all_rects_r = [r for row in all_rects for cell in row for r in cell]
if use_stitching:
acc_rects = stitch_rects(all_rects, tau)
else:
acc_rects = all_rects_r
if show_suppressed:
pairs = [(all_rects_r, (255, 0, 0))]
else:
pairs = []
pairs.append((acc_rects, (0, 255, 0)))
for rect_set, color in pairs:
for rect in rect_set:
if rect.confidence > min_conf:
cv2.rectangle(
image, (
int(rect.cx - int(rect.width / 2)),
int(rect.cy - int(rect.height / 2))
), (
int(rect.cx + int(rect.width / 2)),
int(rect.cy + int(rect.height / 2))
), color, 2
)
rects = []
for rect in acc_rects:
r = al.AnnoRect()
r.x1 = rect.cx - rect.width / 2.
r.x2 = rect.cx + rect.width / 2.
r.y1 = rect.cy - rect.height / 2.
r.y2 = rect.cy + rect.height / 2.
r.score = rect.true_confidence
rects.append(r)
return image, rects
def to_x1y1x2y2(box):
w = tf.maximum(box[:, 2:3], 1)
h = tf.maximum(box[:, 3:4], 1)
x1 = box[:, 0:1] - w / 2
x2 = box[:, 0:1] + w / 2
y1 = box[:, 1:2] - h / 2
y2 = box[:, 1:2] + h / 2
return tf_concat(1, [x1, y1, x2, y2])
def intersection(box1, box2):
x1_max = tf.maximum(box1[:, 0], box2[:, 0])
y1_max = tf.maximum(box1[:, 1], box2[:, 1])
x2_min = tf.minimum(box1[:, 2], box2[:, 2])
y2_min = tf.minimum(box1[:, 3], box2[:, 3])
x_diff = tf.maximum(x2_min - x1_max, 0)
y_diff = tf.maximum(y2_min - y1_max, 0)
return x_diff * y_diff
def area(box):
x_diff = tf.maximum(box[:, 2] - box[:, 0], 0)
y_diff = tf.maximum(box[:, 3] - box[:, 1], 0)
return x_diff * y_diff
def union(box1, box2):
return area(box1) + area(box2) - intersection(box1, box2)
def iou(box1, box2):
return intersection(box1, box2) / union(box1, box2)
def to_idx(vec, w_shape):
'''
vec = (idn, idh, idw)
w_shape = [n, h, w, c]
'''
return vec[:, 2] + w_shape[2] * (vec[:, 1] + w_shape[1] * vec[:, 0])
def interp(w, i, channel_dim):
'''
Input:
w: A 4D block tensor of shape (n, h, w, c)
i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
each having type (int, float, float)
The 4D block represents a batch of 3D image feature volumes with c channels.
The input i is a list of points to index into w via interpolation. Direct
indexing is not possible due to y_1 and z_1 being float values.
Output:
A list of the values: [
w[x_1, y_1, z_1, :]
w[x_2, y_2, z_2, :]
...
w[x_k, y_k, z_k, :]
]
of the same length == len(i)
'''
w_as_vector = tf.reshape(w, [-1,
channel_dim]) # gather expects w to be 1-d
upper_l = tf.to_int32(
tf_concat(1, [i[:, 0:1],
tf.floor(i[:, 1:2]),
tf.floor(i[:, 2:3])])
)
upper_r = tf.to_int32(
tf_concat(1, [i[:, 0:1],
tf.floor(i[:, 1:2]),
tf.ceil(i[:, 2:3])])
)
lower_l = tf.to_int32(
tf_concat(1, [i[:, 0:1],
tf.ceil(i[:, 1:2]),
tf.floor(i[:, 2:3])])
)
lower_r = tf.to_int32(
tf_concat(1, [i[:, 0:1],
tf.ceil(i[:, 1:2]),
tf.ceil(i[:, 2:3])])
)
upper_l_idx = to_idx(upper_l, tf.shape(w))
upper_r_idx = to_idx(upper_r, tf.shape(w))
lower_l_idx = to_idx(lower_l, tf.shape(w))
lower_r_idx = to_idx(lower_r, tf.shape(w))
upper_l_value = tf.gather(w_as_vector, upper_l_idx)
upper_r_value = tf.gather(w_as_vector, upper_r_idx)
lower_l_value = tf.gather(w_as_vector, lower_l_idx)
lower_r_value = tf.gather(w_as_vector, lower_r_idx)
alpha_lr = tf.expand_dims(i[:, 2] - tf.floor(i[:, 2]), 1)
alpha_ud = tf.expand_dims(i[:, 1] - tf.floor(i[:, 1]), 1)
upper_value = (1 - alpha_lr) * upper_l_value + (alpha_lr) * upper_r_value
lower_value = (1 - alpha_lr) * lower_l_value + (alpha_lr) * lower_r_value
value = (1 - alpha_ud) * upper_value + (alpha_ud) * lower_value
return value
def bilinear_select(
H, pred_boxes, early_feat, early_feat_channels, w_offset, h_offset
):
'''
Function used for rezooming high level feature maps. Uses bilinear interpolation
to select all channels at index (x, y) for a high level feature map, where x and y are floats.
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
fine_stride = 8. # pixels per 60x80 grid cell in 480x640 image
coarse_stride = H['region_size'
] # pixels per 15x20 grid cell in 480x640 image
batch_ids = []
x_offsets = []
y_offsets = []
for n in range(H['batch_size']):
for i in range(H['grid_height']):
for j in range(H['grid_width']):
for k in range(H['rnn_len']):
batch_ids.append([n])
x_offsets.append([coarse_stride / 2. + coarse_stride * j])
y_offsets.append([coarse_stride / 2. + coarse_stride * i])
batch_ids = tf.constant(batch_ids)
x_offsets = tf.constant(x_offsets)
y_offsets = tf.constant(y_offsets)
pred_boxes_r = tf.reshape(pred_boxes, [outer_size * H['rnn_len'], 4])
scale_factor = coarse_stride / fine_stride # scale difference between 15x20 and 60x80 features
pred_x_center = (
pred_boxes_r[:, 0:1] + w_offset * pred_boxes_r[:, 2:3] + x_offsets
) / fine_stride
pred_x_center_clip = tf.clip_by_value(
pred_x_center, 0, scale_factor * H['grid_width'] - 1
)
pred_y_center = (
pred_boxes_r[:, 1:2] + h_offset * pred_boxes_r[:, 3:4] + y_offsets
) / fine_stride
pred_y_center_clip = tf.clip_by_value(
pred_y_center, 0, scale_factor * H['grid_height'] - 1
)
interp_indices = tf_concat(
1, [tf.to_float(batch_ids), pred_y_center_clip, pred_x_center_clip]
)
return interp_indices
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/train_utils.py |
import tensorflow as tf
from distutils.version import LooseVersion
TENSORFLOW_VERSION = LooseVersion(tf.__version__)
def tf_concat(axis, values, **kwargs):
if TENSORFLOW_VERSION >= LooseVersion('1.0'):
return tf.concat(values, axis, **kwargs)
else:
return tf.concat(axis, values, **kwargs)
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/__init__.py |
class Rect(object):
def __init__(self, cx, cy, width, height, confidence):
self.cx = cx
self.cy = cy
self.width = width
self.height = height
self.confidence = confidence
self.true_confidence = confidence
def overlaps(self, other):
if abs(self.cx - other.cx) > (self.width + other.width) / 1.5:
return False
elif abs(self.cy - other.cy) > (self.height + other.height) / 2.0:
return False
else:
return True
def distance(self, other):
return sum(
map(
abs, [
self.cx - other.cx, self.cy - other.cy, self.width -
other.width, self.height - other.height
]
)
)
def intersection(self, other):
left = max(self.cx - self.width / 2., other.cx - other.width / 2.)
right = min(self.cx + self.width / 2., other.cx + other.width / 2.)
width = max(right - left, 0)
top = max(self.cy - self.height / 2., other.cy - other.height / 2.)
bottom = min(self.cy + self.height / 2., other.cy + other.height / 2.)
height = max(bottom - top, 0)
return width * height
def area(self):
return self.height * self.width
def union(self, other):
return self.area() + other.area() - self.intersection(other)
def iou(self, other):
return self.intersection(other) / self.union(other)
def __eq__(self, other):
return (
self.cx == other.cx and self.cy == other.cy and
self.width == other.width and self.height == other.height and
self.confidence == other.confidence
)
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/rect.py |
import cv2
import numpy as np
import copy
import tensorboxresnet.utils.annolist.AnnotationLib as al
def annotation_to_h5(H, a, cell_width, cell_height, max_len):
region_size = H['region_size']
assert H['region_size'] == H['image_height'] / H['grid_height']
assert H['region_size'] == H['image_width'] / H['grid_width']
cell_regions = get_cell_grid(cell_width, cell_height, region_size)
cells_per_image = len(cell_regions)
box_list = [[] for idx in range(cells_per_image)]
for cidx, c in enumerate(cell_regions):
box_list[cidx] = [r for r in a.rects if all(r.intersection(c))]
boxes = np.zeros((1, cells_per_image, 4, max_len, 1), dtype=np.float)
box_flags = np.zeros((1, cells_per_image, 1, max_len, 1), dtype=np.float)
for cidx in range(cells_per_image):
#assert(cur_num_boxes <= max_len)
cell_ox = 0.5 * (cell_regions[cidx].x1 + cell_regions[cidx].x2)
cell_oy = 0.5 * (cell_regions[cidx].y1 + cell_regions[cidx].y2)
unsorted_boxes = []
for bidx in range(min(len(box_list[cidx]), max_len)):
# relative box position with respect to cell
ox = 0.5 * (box_list[cidx][bidx].x1 + box_list[cidx][bidx].x2
) - cell_ox
oy = 0.5 * (box_list[cidx][bidx].y1 + box_list[cidx][bidx].y2
) - cell_oy
width = abs(box_list[cidx][bidx].x2 - box_list[cidx][bidx].x1)
height = abs(box_list[cidx][bidx].y2 - box_list[cidx][bidx].y1)
if (abs(ox) < H['focus_size'] * region_size and abs(oy) < H['focus_size'] * region_size and
width < H['biggest_box_px'] and height < H['biggest_box_px']):
unsorted_boxes.append(
np.array([ox, oy, width, height], dtype=np.float)
)
for bidx, box in enumerate(
sorted(unsorted_boxes, key=lambda x: x[0]**2 + x[1]**2)
):
boxes[0, cidx, :, bidx, 0] = box
box_flags[0, cidx, 0, bidx, 0] = max(
box_list[cidx][bidx].silhouetteID, 1
)
return boxes, box_flags
def get_cell_grid(cell_width, cell_height, region_size):
cell_regions = []
for iy in range(cell_height):
for ix in range(cell_width):
cidx = iy * cell_width + ix
ox = (ix + 0.5) * region_size
oy = (iy + 0.5) * region_size
r = al.AnnoRect(
ox - 0.5 * region_size, oy - 0.5 * region_size,
ox + 0.5 * region_size, oy + 0.5 * region_size
)
r.track_id = cidx
cell_regions.append(r)
return cell_regions
def annotation_jitter(
I,
a_in,
min_box_width=20,
jitter_scale_min=0.9,
jitter_scale_max=1.1,
jitter_offset=16,
target_width=640,
target_height=480
):
assert I.shape[
2
] == 3, 'Not implemented for images with more than 3 channels'
a = copy.deepcopy(a_in)
# MA: sanity check
new_rects = []
for i in range(len(a.rects)):
r = a.rects[i]
try:
assert (r.x1 < r.x2 and r.y1 < r.y2)
new_rects.append(r)
except:
print('bad rectangle')
a.rects = new_rects
if a.rects:
cur_min_box_width = min([r.width() for r in a.rects])
else:
cur_min_box_width = min_box_width / jitter_scale_min
# don't downscale below min_box_width
jitter_scale_min = max(
jitter_scale_min, float(min_box_width) / cur_min_box_width
)
# it's always ok to upscale
jitter_scale_min = min(jitter_scale_min, 1.0)
jitter_scale_max = jitter_scale_max
jitter_scale = np.random.uniform(jitter_scale_min, jitter_scale_max)
jitter_flip = np.random.random_integers(0, 1)
if jitter_flip == 1:
I = np.fliplr(I)
for r in a:
r.x1 = I.shape[1] - r.x1
r.x2 = I.shape[1] - r.x2
r.x1, r.x2 = r.x2, r.x1
for p in r.point:
p.x = I.shape[1] - p.x
I1 = cv2.resize(
I,
None,
fx=jitter_scale,
fy=jitter_scale,
interpolation=cv2.INTER_CUBIC
)
jitter_offset_x = np.random.random_integers(-jitter_offset, jitter_offset)
jitter_offset_y = np.random.random_integers(-jitter_offset, jitter_offset)
rescaled_width = I1.shape[1]
rescaled_height = I1.shape[0]
px = round(0.5 *
(target_width)) - round(0.5 *
(rescaled_width)) + jitter_offset_x
py = round(0.5 *
(target_height)) - round(0.5 *
(rescaled_height)) + jitter_offset_y
I2 = np.zeros((target_height, target_width, 3), dtype=I1.dtype)
x1 = max(0, px)
y1 = max(0, py)
x2 = min(rescaled_width, target_width - x1)
y2 = min(rescaled_height, target_height - y1)
I2[0:(y2 - y1), 0:(x2 - x1), :] = I1[y1:y2, x1:x2, :]
ox1 = round(0.5 * rescaled_width) + jitter_offset_x
oy1 = round(0.5 * rescaled_height) + jitter_offset_y
ox2 = round(0.5 * target_width)
oy2 = round(0.5 * target_height)
for r in a:
r.x1 = round(jitter_scale * r.x1 - x1)
r.x2 = round(jitter_scale * r.x2 - x1)
r.y1 = round(jitter_scale * r.y1 - y1)
r.y2 = round(jitter_scale * r.y2 - y1)
if r.x1 < 0:
r.x1 = 0
if r.y1 < 0:
r.y1 = 0
if r.x2 >= I2.shape[1]:
r.x2 = I2.shape[1] - 1
if r.y2 >= I2.shape[0]:
r.y2 = I2.shape[0] - 1
for p in r.point:
p.x = round(jitter_scale * p.x - x1)
p.y = round(jitter_scale * p.y - y1)
# MA: make sure all points are inside the image
r.point = [
p for p in r.point
if p.x >= 0 and p.y >= 0 and p.x < I2.shape[1] and
p.y < I2.shape[0]
]
new_rects = []
for r in a.rects:
if r.x1 <= r.x2 and r.y1 <= r.y2:
new_rects.append(r)
else:
pass
a.rects = new_rects
return I2, a
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/data_utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.