python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import nltk
import numpy as np
def _set_span(t, i):
if isinstance(t[0], str):
t.span = (i, i+len(t))
else:
first = True
for c in t:
cur_span = _set_span(c, i)
i = cur_span[1]
if first:
min_ = cur_span[0]
first = False
max_ = cur_span[1]
t.span = (min_, max_)
return t.span
def set_span(t):
assert isinstance(t, nltk.tree.Tree)
try:
return _set_span(t, 0)
except:
print(t)
exit()
def tree_contains_span(tree, span):
"""
Assumes that tree span has been set with set_span
Returns true if any subtree of t has exact span as the given span
:param t:
:param span:
:return bool:
"""
return span in set(t.span for t in tree.subtrees())
def span_len(span):
return span[1] - span[0]
def span_overlap(s1, s2):
start = max(s1[0], s2[0])
stop = min(s1[1], s2[1])
if stop > start:
return start, stop
return None
def span_prec(true_span, pred_span):
overlap = span_overlap(true_span, pred_span)
if overlap is None:
return 0
return span_len(overlap) / span_len(pred_span)
def span_recall(true_span, pred_span):
overlap = span_overlap(true_span, pred_span)
if overlap is None:
return 0
return span_len(overlap) / span_len(true_span)
def span_f1(true_span, pred_span):
p = span_prec(true_span, pred_span)
r = span_recall(true_span, pred_span)
if p == 0 or r == 0:
return 0.0
return 2 * p * r / (p + r)
def find_max_f1_span(tree, span):
return find_max_f1_subtree(tree, span).span
def find_max_f1_subtree(tree, span):
return max(((t, span_f1(span, t.span)) for t in tree.subtrees()), key=lambda p: p[1])[0]
def tree2matrix(tree, node2num, row_size=None, col_size=None, dtype='int32'):
set_span(tree)
D = tree.height() - 1
B = len(tree.leaves())
row_size = row_size or D
col_size = col_size or B
matrix = np.zeros([row_size, col_size], dtype=dtype)
mask = np.zeros([row_size, col_size, col_size], dtype='bool')
for subtree in tree.subtrees():
row = subtree.height() - 2
col = subtree.span[0]
matrix[row, col] = node2num(subtree)
for subsub in subtree.subtrees():
if isinstance(subsub, nltk.tree.Tree):
mask[row, col, subsub.span[0]] = True
if not isinstance(subsub[0], nltk.tree.Tree):
c = subsub.span[0]
for r in range(row):
mask[r, c, c] = True
else:
mask[row, col, col] = True
return matrix, mask
def load_compressed_tree(s):
def compress_tree(tree):
assert not isinstance(tree, str)
if len(tree) == 1:
if isinstance(tree[0], nltk.tree.Tree):
return compress_tree(tree[0])
else:
return tree
else:
for i, t in enumerate(tree):
if isinstance(t, nltk.tree.Tree):
tree[i] = compress_tree(t)
else:
tree[i] = t
return tree
return compress_tree(nltk.tree.Tree.fromstring(s))
| bi-att-flow-master | my/nltk_utils.py |
from tensorflow.python.ops.rnn_cell import _linear
from tensorflow.python.util import nest
import tensorflow as tf
from my.tensorflow import flatten, reconstruct, add_wd, exp_mask
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
with tf.name_scope(name or "dropout"):
if keep_prob < 1.0:
d = tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
out = tf.cond(is_train, lambda: d, lambda: x)
return out
return x
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask=mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def double_linear_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "Double_Linear_Logits"):
first = tf.tanh(linear(args, size, bias, bias_start=bias_start, scope='first',
wd=wd, input_keep_prob=input_keep_prob, is_train=is_train))
second = linear(first, 1, bias, bias_start=bias_start, squeeze=True, scope='second',
wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
if mask is not None:
second = exp_mask(second, mask)
return second
def linear_logits(args, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "Linear_Logits"):
logits = linear(args, 1, bias, bias_start=bias_start, squeeze=True, scope='first',
wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
if mask is not None:
logits = exp_mask(logits, mask)
return logits
def sum_logits(args, mask=None, name=None):
with tf.name_scope(name or "sum_logits"):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
rank = len(args[0].get_shape())
logits = sum(tf.reduce_sum(arg, rank-1) for arg in args)
if mask is not None:
logits = exp_mask(logits, mask)
return logits
def get_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None, func=None):
if func is None:
func = "sum"
if func == 'sum':
return sum_logits(args, mask=mask, name=scope)
elif func == 'linear':
return linear_logits(args, bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
elif func == 'double':
return double_linear_logits(args, size, bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
elif func == 'dot':
assert len(args) == 2
arg = args[0] * args[1]
return sum_logits([arg], mask=mask, name=scope)
elif func == 'mul_linear':
assert len(args) == 2
arg = args[0] * args[1]
return linear_logits([arg], bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
elif func == 'proj':
assert len(args) == 2
d = args[1].get_shape()[-1]
proj = linear([args[0]], d, False, bias_start=bias_start, scope=scope, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
return sum_logits([proj * args[1]], mask=mask)
elif func == 'tri_linear':
assert len(args) == 2
new_arg = args[0] * args[1]
return linear_logits([args[0], args[1], new_arg], bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
else:
raise Exception()
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob, is_train=is_train)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, is_train=None, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
if is_train is not None and keep_prob < 1.0:
in_ = dropout(in_, keep_prob, is_train)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, is_train=None, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, is_train=is_train, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(2, outs)
return concat_out
| bi-att-flow-master | my/tensorflow/nn.py |
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import DropoutWrapper, RNNCell, LSTMStateTuple
from my.tensorflow import exp_mask, flatten
from my.tensorflow.nn import linear, softsel, double_linear_logits
class SwitchableDropoutWrapper(DropoutWrapper):
def __init__(self, cell, is_train, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
super(SwitchableDropoutWrapper, self).__init__(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob,
seed=seed)
self.is_train = is_train
def __call__(self, inputs, state, scope=None):
outputs_do, new_state_do = super(SwitchableDropoutWrapper, self).__call__(inputs, state, scope=scope)
tf.get_variable_scope().reuse_variables()
outputs, new_state = self._cell(inputs, state, scope)
outputs = tf.cond(self.is_train, lambda: outputs_do, lambda: outputs)
if isinstance(state, tuple):
new_state = state.__class__(*[tf.cond(self.is_train, lambda: new_state_do_i, lambda: new_state_i)
for new_state_do_i, new_state_i in zip(new_state_do, new_state)])
else:
new_state = tf.cond(self.is_train, lambda: new_state_do, lambda: new_state)
return outputs, new_state
class TreeRNNCell(RNNCell):
def __init__(self, cell, input_size, reduce_func):
self._cell = cell
self._input_size = input_size
self._reduce_func = reduce_func
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N*B, I + B]
:param state: [N*B, d]
:param scope:
:return: [N*B, d]
"""
with tf.variable_scope(scope or self.__class__.__name__):
d = self.state_size
x = tf.slice(inputs, [0, 0], [-1, self._input_size]) # [N*B, I]
mask = tf.slice(inputs, [0, self._input_size], [-1, -1]) # [N*B, B]
B = tf.shape(mask)[1]
prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1) # [N, B, d] -> [N, 1, B, d]
mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d]) # [N, B, B, d]
# prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2)
prev_state = self._reduce_func(exp_mask(prev_state, mask), 2) # [N, B, d]
prev_state = tf.reshape(prev_state, [-1, d]) # [N*B, d]
return self._cell(x, prev_state)
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
class NoOpCell(RNNCell):
def __init__(self, num_units):
self._num_units = num_units
def __call__(self, inputs, state, scope=None):
return state, state
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
class MatchCell(RNNCell):
def __init__(self, cell, input_size, q_len):
self._cell = cell
self._input_size = input_size
# FIXME : This won't be needed with good shape guessing
self._q_len = q_len
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N, d + JQ + JQ * d]
:param state: [N, d]
:param scope:
:return:
"""
with tf.variable_scope(scope or self.__class__.__name__):
c_prev, h_prev = state
x = tf.slice(inputs, [0, 0], [-1, self._input_size])
q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len]) # [N, JQ]
qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1])
qs = tf.reshape(qs, [-1, self._q_len, self._input_size]) # [N, JQ, d]
x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1]) # [N, JQ, d]
h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1]) # [N, JQ, d]
f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f')) # [N, JQ, d]
a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask)) # [N, JQ]
q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1)
z = tf.concat(1, [x, q]) # [N, 2d]
return self._cell(z, state)
class AttentionCell(RNNCell):
def __init__(self, cell, memory, mask=None, controller=None, mapper=None, input_keep_prob=1.0, is_train=None):
"""
Early fusion attention cell: uses the (inputs, state) to control the current attention.
:param cell:
:param memory: [N, M, m]
:param mask:
:param controller: (inputs, prev_state, memory) -> memory_logits
"""
self._cell = cell
self._memory = memory
self._mask = mask
self._flat_memory = flatten(memory, 2)
self._flat_mask = flatten(mask, 1)
if controller is None:
controller = AttentionCell.get_linear_controller(True, is_train=is_train)
self._controller = controller
if mapper is None:
mapper = AttentionCell.get_concat_mapper()
elif mapper == 'sim':
mapper = AttentionCell.get_sim_mapper()
self._mapper = mapper
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or "AttentionCell"):
memory_logits = self._controller(inputs, state, self._flat_memory)
sel_mem = softsel(self._flat_memory, memory_logits, mask=self._flat_mask) # [N, m]
new_inputs, new_state = self._mapper(inputs, state, sel_mem)
return self._cell(new_inputs, state)
@staticmethod
def get_double_linear_controller(size, bias, input_keep_prob=1.0, is_train=None):
def double_linear_controller(inputs, state, memory):
"""
:param inputs: [N, i]
:param state: [N, d]
:param memory: [N, M, m]
:return: [N, M]
"""
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = double_linear_logits(in_, size, bias, input_keep_prob=input_keep_prob,
is_train=is_train)
return out
return double_linear_controller
@staticmethod
def get_linear_controller(bias, input_keep_prob=1.0, is_train=None):
def linear_controller(inputs, state, memory):
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = linear(in_, 1, bias, squeeze=True, input_keep_prob=input_keep_prob, is_train=is_train)
return out
return linear_controller
@staticmethod
def get_concat_mapper():
def concat_mapper(inputs, state, sel_mem):
"""
:param inputs: [N, i]
:param state: [N, d]
:param sel_mem: [N, m]
:return: (new_inputs, new_state) tuple
"""
return tf.concat(1, [inputs, sel_mem]), state
return concat_mapper
@staticmethod
def get_sim_mapper():
def sim_mapper(inputs, state, sel_mem):
"""
Assume that inputs and sel_mem are the same size
:param inputs: [N, i]
:param state: [N, d]
:param sel_mem: [N, i]
:return: (new_inputs, new_state) tuple
"""
return tf.concat(1, [inputs, sel_mem, inputs * sel_mem, tf.abs(inputs - sel_mem)]), state
return sim_mapper
| bi-att-flow-master | my/tensorflow/rnn_cell.py |
from my.tensorflow.general import * | bi-att-flow-master | my/tensorflow/__init__.py |
import tensorflow as tf
from tensorflow.python.ops.rnn import dynamic_rnn as _dynamic_rnn, \
bidirectional_dynamic_rnn as _bidirectional_dynamic_rnn
from tensorflow.python.ops.rnn import bidirectional_rnn as _bidirectional_rnn
from my.tensorflow import flatten, reconstruct
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert not time_major # TODO : to be implemented later!
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
initial_state=initial_state, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
outputs = reconstruct(flat_outputs, inputs, 2)
return outputs, final_state
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert not time_major # TODO : to be implemented later!
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
else tf.reverse_sequence(flat_inputs, sequence_length, 1)
flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
initial_state=initial_state, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
else tf.reverse_sequence(flat_outputs, sequence_length, 1)
outputs = reconstruct(flat_outputs, inputs, 2)
return outputs, final_state
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
assert not time_major
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
(flat_fw_outputs, flat_bw_outputs), final_state = \
_bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# FIXME : final state is not reshaped!
return (fw_outputs, bw_outputs), final_state
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
(flat_fw_outputs, flat_bw_outputs), final_state = \
_bidirectional_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
dtype=dtype, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# FIXME : final state is not reshaped!
return (fw_outputs, bw_outputs), final_state
| bi-att-flow-master | my/tensorflow/rnn.py |
from itertools import zip_longest
import tensorflow as tf
from functools import reduce
from operator import mul
import numpy as np
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
def get_initializer(matrix):
def _initializer(shape, dtype=None, partition_info=None, **kwargs): return matrix
return _initializer
def variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, var in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
assert g is not None, var.name
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def mask(val, mask, name=None):
if name is None:
name = 'mask'
return tf.mul(val, tf.cast(mask, 'float'), name=name)
def exp_mask(val, mask, name=None):
"""Give very negative number to unmasked elements in val.
For example, [-3, -2, 10], [True, True, False] -> [-3, -2, -1e9].
Typically, this effectively masks in exponential space (e.g. softmax)
Args:
val: values to be masked
mask: masking boolean tensor, same shape as tensor
name: name for output tensor
Returns:
Same shape as val, where some elements are very small (exponentially zero)
"""
if name is None:
name = "exp_mask"
return tf.add(val, (1 - tf.cast(mask, 'float')) * VERY_NEGATIVE_NUMBER, name=name)
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep):
ref_shape = ref.get_shape().as_list()
tensor_shape = tensor.get_shape().as_list()
ref_stop = len(ref_shape) - keep
tensor_start = len(tensor_shape) - keep
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)]
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))]
# pre_shape = [tf.shape(ref)[i] for i in range(len(ref.get_shape().as_list()[:-keep]))]
# keep_shape = tensor.get_shape().as_list()[-keep:]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def add_wd(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
with tf.name_scope("weight_decay"):
for var in variables:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name="{}/wd".format(var.op.name))
tf.add_to_collection('losses', weight_decay)
def grouper(iterable, n, fillvalue=None, shorten=False, num_groups=None):
args = [iter(iterable)] * n
out = zip_longest(*args, fillvalue=fillvalue)
out = list(out)
if num_groups is not None:
default = (fillvalue, ) * n
assert isinstance(num_groups, int)
out = list(each for each, _ in zip_longest(out, range(num_groups), fillvalue=default))
if shorten:
assert fillvalue is None
out = (tuple(e for e in each if e is not None) for each in out)
return out
def padded_reshape(tensor, shape, mode='CONSTANT', name=None):
paddings = [[0, shape[i] - tf.shape(tensor)[i]] for i in range(len(shape))]
return tf.pad(tensor, paddings, mode=mode, name=name) | bi-att-flow-master | my/tensorflow/general.py |
import gzip
import json
from json import encoder
import os
import tensorflow as tf
from basic_cnn.evaluator import Evaluation, F1Evaluation
from my.utils import short_floats
import pickle
class GraphHandler(object):
def __init__(self, config):
self.config = config
self.saver = tf.train.Saver(max_to_keep=config.max_to_keep)
self.writer = None
self.save_path = os.path.join(config.save_dir, config.model_name)
def initialize(self, sess):
if self.config.load:
self._load(sess)
else:
sess.run(tf.initialize_all_variables())
if self.config.mode == 'train':
self.writer = tf.train.SummaryWriter(self.config.log_dir, graph=tf.get_default_graph())
def save(self, sess, global_step=None):
self.saver.save(sess, self.save_path, global_step=global_step)
def _load(self, sess):
config = self.config
if config.load_path:
save_path = config.load_path
elif config.load_step > 0:
save_path = os.path.join(config.save_dir, "{}-{}".format(config.model_name, config.load_step))
else:
save_dir = config.save_dir
checkpoint = tf.train.get_checkpoint_state(save_dir)
assert checkpoint is not None, "cannot load checkpoint at {}".format(save_dir)
save_path = checkpoint.model_checkpoint_path
print("Loading saved model from {}".format(save_path))
self.saver.restore(sess, save_path)
def add_summary(self, summary, global_step):
self.writer.add_summary(summary, global_step)
def add_summaries(self, summaries, global_step):
for summary in summaries:
self.add_summary(summary, global_step)
def dump_eval(self, e, precision=2, path=None):
assert isinstance(e, Evaluation)
if self.config.dump_pickle:
path = path or os.path.join(self.config.eval_dir, "{}-{}.pklz".format(e.data_type, str(e.global_step).zfill(6)))
with gzip.open(path, 'wb', compresslevel=3) as fh:
pickle.dump(e.dict, fh)
else:
path = path or os.path.join(self.config.eval_dir, "{}-{}.json".format(e.data_type, str(e.global_step).zfill(6)))
with open(path, 'w') as fh:
json.dump(short_floats(e.dict, precision), fh)
def dump_answer(self, e, path=None):
assert isinstance(e, Evaluation)
path = path or os.path.join(self.config.answer_dir, "{}-{}.json".format(e.data_type, str(e.global_step).zfill(6)))
with open(path, 'w') as fh:
json.dump(e.id2answer_dict, fh)
| bi-att-flow-master | basic_cnn/graph_handler.py |
bi-att-flow-master | basic_cnn/__init__.py |
|
import random
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import BasicLSTMCell, GRUCell
from basic_cnn.read_data import DataSet
from basic_cnn.superhighway import SHCell
from my.tensorflow import exp_mask, get_initializer, VERY_SMALL_NUMBER
from my.tensorflow.nn import linear, double_linear_logits, linear_logits, softsel, dropout, get_logits, softmax, \
highway_network, multi_conv1d
from my.tensorflow.rnn import bidirectional_dynamic_rnn, dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
def bi_attention(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
"""
h_a:
all u attending on h
choosing an element of h that max-matches u
First creates confusion matrix between h and u
Then take max of the attention weights over u row
Finally softmax over
u_a:
each h attending on u
:param h: [N, M, JX, d]
:param u: [N, JQ, d]
:param h_mask: [N, M, JX]
:param u_mask: [N, B]
:param scope:
:return: [N, M, d], [N, M, JX, d]
"""
with tf.variable_scope(scope or "bi_attention"):
N, M, JX, JQ, d = config.batch_size, config.max_num_sents, config.max_sent_size, config.max_ques_size, config.hidden_size
JX = tf.shape(h)[2]
h_aug = tf.tile(tf.expand_dims(h, 3), [1, 1, 1, JQ, 1])
u_aug = tf.tile(tf.expand_dims(tf.expand_dims(u, 1), 1), [1, M, JX, 1, 1])
if h_mask is None:
and_mask = None
else:
h_mask_aug = tf.tile(tf.expand_dims(h_mask, 3), [1, 1, 1, JQ])
u_mask_aug = tf.tile(tf.expand_dims(tf.expand_dims(u_mask, 1), 1), [1, M, JX, 1])
and_mask = h_mask_aug & u_mask_aug
u_logits = get_logits([h_aug, u_aug], None, True, wd=config.wd, mask=and_mask,
is_train=is_train, func=config.logit_func, scope='u_logits') # [N, M, JX, JQ]
u_a = softsel(u_aug, u_logits) # [N, M, JX, d]
if tensor_dict is not None:
# a_h = tf.nn.softmax(h_logits) # [N, M, JX]
a_u = tf.nn.softmax(u_logits) # [N, M, JX, JQ]
# tensor_dict['a_h'] = a_h
tensor_dict['a_u'] = a_u
if config.bi:
h_a = softsel(h, tf.reduce_max(u_logits, 3)) # [N, M, d]
h_a = tf.tile(tf.expand_dims(h_a, 2), [1, 1, JX, 1])
else:
h_a = None
return u_a, h_a
def attention_layer(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "attention_layer"):
u_a, h_a = bi_attention(config, is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
if config.bi:
p0 = tf.concat(3, [h , u_a, h * u_a, h * h_a])
else:
p0 = tf.concat(3, [h , u_a, h * u_a])
return p0
class Model(object):
def __init__(self, config, scope):
self.scope = scope
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, M, JX, JQ, VW, VC, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, M, None], name='x')
self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
self.q = tf.placeholder('int32', [N, JQ], name='q')
self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
self.y = tf.placeholder('bool', [N, M, JX], name='y')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
# Define misc
self.tensor_dict = {}
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
if config.mode == 'train':
self._build_ema()
self.summary = tf.merge_all_summaries()
self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.max_word_size
JX = tf.shape(self.x)[2]
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
with tf.variable_scope("emb"):
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(0, [word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
xx = tf.concat(3, [xx, Ax]) # [N, M, JX, di]
qq = tf.concat(2, [qq, Aq]) # [N, JQ, di]
# highway network
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell = BasicLSTMCell(d, state_is_tuple=True)
d_cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N]
with tf.variable_scope("prepro"):
(fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f)) = bidirectional_dynamic_rnn(d_cell, d_cell, qq, q_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u = tf.concat(2, [fw_u, bw_u])
if config.two_prepro_layers:
(fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f)) = bidirectional_dynamic_rnn(d_cell, d_cell, u, q_len, dtype='float', scope='u2') # [N, J, d], [N, d]
u = tf.concat(2, [fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='u1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
if config.two_prepro_layers:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, h, x_len, dtype='float', scope='u2') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
else:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='h1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
if config.two_prepro_layers:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, h, x_len, dtype='float', scope='h2') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope("main"):
p0 = attention_layer(config, self.is_train, h, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
(fw_g0, bw_g0), _ = bidirectional_dynamic_rnn(d_cell, d_cell, p0, x_len, dtype='float', scope='g0') # [N, M, JX, 2d]
g0 = tf.concat(3, [fw_g0, bw_g0])
# p1 = attention_layer(config, self.is_train, g0, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="p1")
(fw_g1, bw_g1), _ = bidirectional_dynamic_rnn(d_cell, d_cell, g0, x_len, dtype='float', scope='g1') # [N, M, JX, 2d]
g1 = tf.concat(3, [fw_g1, bw_g1])
# logits = u_logits(config, self.is_train, g1, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="logits")
# [N, M, JX]
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
a1i = softsel(tf.reshape(g1, [N, M*JX, 2*d]), tf.reshape(logits, [N, M*JX]))
if config.feed_gt:
logy = tf.log(tf.cast(self.y, 'float') + VERY_SMALL_NUMBER)
logits = tf.cond(self.is_train, lambda: logy, lambda: logits)
if config.feed_hard:
hard_yp = tf.argmax(tf.reshape(logits, [N, M*JX]), 1)
hard_logits = tf.reshape(tf.one_hot(hard_yp, M*JX), [N, M, JX]) # [N, M, JX]
logits = tf.cond(self.is_train, lambda: logits, lambda: hard_logits)
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [-1, M, JX])
self.tensor_dict['g1'] = g1
self.logits = flat_logits
self.yp = yp
def _build_loss(self):
config = self.config
N, M, JX, JQ, VW, VC = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size
JX = tf.shape(self.x)[2]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = -tf.log(tf.reduce_sum(self.yp * tf.cast(self.y, 'float'), [1, 2]) + VERY_SMALL_NUMBER)
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.scalar_summary(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _build_ema(self):
ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema_op = ema.apply(tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/histogram", scope=self.scope))
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
tf.scalar_summary(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/histogram", scope=self.scope):
ema_var = ema.average(var)
tf.histogram_summary(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size
feed_dict = {}
if config.len_opt:
"""
Note that this optimization results in variable GPU RAM usage (i.e. can cause OOM in the middle of training.)
First test without len_opt and make sure no OOM, and use len_opt
"""
if sum(len(para) for para in batch.data['x']) == 0:
new_JX = 1
else:
new_JX = max(len(para) for para in batch.data['x'])
JX = min(JX, new_JX)
# print(JX)
x = np.zeros([N, M, JX], dtype='int32')
cx = np.zeros([N, M, JX, W], dtype='int32')
x_mask = np.zeros([N, M, JX], dtype='bool')
q = np.zeros([N, JQ], dtype='int32')
cq = np.zeros([N, JQ, W], dtype='int32')
q_mask = np.zeros([N, JQ], dtype='bool')
feed_dict[self.x] = x
feed_dict[self.x_mask] = x_mask
feed_dict[self.cx] = cx
feed_dict[self.q] = q
feed_dict[self.cq] = cq
feed_dict[self.q_mask] = q_mask
feed_dict[self.is_train] = is_train
if config.use_glove_for_unk:
feed_dict[self.new_emb_mat] = batch.shared['new_emb_mat']
X = batch.data['x']
CX = batch.data['cx']
def _get_word(word):
if word.startswith("@"):
return 2
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
if config.use_glove_for_unk:
d2 = batch.shared['new_word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d2:
return d2[each] + len(d)
return 1
def _get_char(char):
d = batch.shared['char2idx']
if char in d:
return d[char]
return 1
if supervised:
y = np.zeros([N, M, JX], dtype='int32')
feed_dict[self.y] = y
for i, (xi, yi) in enumerate(zip(batch.data['x'], batch.data['y'])):
count = 0
for j, xij in enumerate(xi):
for k, xijk in enumerate(xij):
if xijk == yi:
y[i, j, k] = True
count += 1
assert count > 0
for i, xi in enumerate(X):
for j, xij in enumerate(xi):
for k, xijk in enumerate(xij):
each = _get_word(xijk)
x[i, j, k] = each
x_mask[i, j, k] = True
for i, cxi in enumerate(CX):
for j, cxij in enumerate(cxi):
for k, cxijk in enumerate(cxij):
for l, cxijkl in enumerate(cxijk):
cx[i, j, k, l] = _get_char(cxijkl)
if l + 1 == config.max_word_size:
break
for i, qi in enumerate(batch.data['q']):
for j, qij in enumerate(qi):
q[i, j] = _get_word(qij)
q_mask[i, j] = True
for i, cqi in enumerate(batch.data['cq']):
for j, cqij in enumerate(cqi):
for k, cqijk in enumerate(cqij):
cq[i, j, k] = _get_char(cqijk)
if k + 1 == config.max_word_size:
break
return feed_dict
def get_multi_gpu_models(config):
models = []
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/gpu:{}".format(gpu_idx)):
model = Model(config, scope)
tf.get_variable_scope().reuse_variables()
models.append(model)
return models
| bi-att-flow-master | basic_cnn/model.py |
import os
import tensorflow as tf
from basic_cnn.main import main as m
flags = tf.app.flags
flags.DEFINE_string("model_name", "basic_cnn", "Model name [basic]")
flags.DEFINE_string("data_dir", "data/cnn", "Data dir [data/cnn]")
flags.DEFINE_string("root_dir", "/Users/minjoons/data/cnn/questions", "root dir [~/data/cnn/questions]")
flags.DEFINE_string("run_id", "0", "Run ID [0]")
flags.DEFINE_string("out_base_dir", "out", "out base dir [out]")
flags.DEFINE_integer("batch_size", 60, "Batch size [60]")
flags.DEFINE_float("init_lr", 0.5, "Initial learning rate [0.5]")
flags.DEFINE_integer("num_epochs", 50, "Total number of epochs for training [50]")
flags.DEFINE_integer("num_steps", 20000, "Number of steps [20000]")
flags.DEFINE_integer("eval_num_batches", 100, "eval num batches [100]")
flags.DEFINE_integer("load_step", 0, "load step [0]")
flags.DEFINE_integer("early_stop", 4, "early stop [4]")
flags.DEFINE_string("mode", "test", "train | dev | test | forward [test]")
flags.DEFINE_boolean("load", True, "load saved data? [True]")
flags.DEFINE_boolean("progress", True, "Show progress? [True]")
flags.DEFINE_integer("log_period", 100, "Log period [100]")
flags.DEFINE_integer("eval_period", 1000, "Eval period [1000]")
flags.DEFINE_integer("save_period", 1000, "Save Period [1000]")
flags.DEFINE_float("decay", 0.9, "Exponential moving average decay [0.9]")
flags.DEFINE_boolean("draft", False, "Draft for quick testing? [False]")
flags.DEFINE_integer("hidden_size", 100, "Hidden size [100]")
flags.DEFINE_integer("char_out_size", 100, "Char out size [100]")
flags.DEFINE_float("input_keep_prob", 0.8, "Input keep prob [0.8]")
flags.DEFINE_integer("char_emb_size", 8, "Char emb size [8]")
flags.DEFINE_integer("char_filter_height", 5, "Char filter height [5]")
flags.DEFINE_float("wd", 0.0, "Weight decay [0.0]")
flags.DEFINE_bool("lower_word", True, "lower word [True]")
flags.DEFINE_bool("dump_eval", False, "dump eval? [True]")
flags.DEFINE_bool("dump_answer", True, "dump answer? [True]")
flags.DEFINE_string("model", "2", "config 1 |2 [2]")
flags.DEFINE_bool("squash", False, "squash the sentences into one? [False]")
flags.DEFINE_bool("single", False, "supervise only the answer sentence? [False]")
flags.DEFINE_integer("word_count_th", 10, "word count th [100]")
flags.DEFINE_integer("char_count_th", 50, "char count th [500]")
flags.DEFINE_integer("sent_size_th", 60, "sent size th [64]")
flags.DEFINE_integer("num_sents_th", 200, "num sents th [8]")
flags.DEFINE_integer("ques_size_th", 30, "ques size th [32]")
flags.DEFINE_integer("word_size_th", 16, "word size th [16]")
flags.DEFINE_integer("para_size_th", 256, "para size th [256]")
flags.DEFINE_bool("swap_memory", True, "swap memory? [True]")
flags.DEFINE_string("data_filter", "max", "max | valid | semi [max]")
flags.DEFINE_bool("finetune", False, "finetune? [False]")
flags.DEFINE_bool("feed_gt", False, "feed gt prev token during training [False]")
flags.DEFINE_bool("feed_hard", False, "feed hard argmax prev token during testing [False]")
flags.DEFINE_bool("use_glove_for_unk", True, "use glove for unk [False]")
flags.DEFINE_bool("known_if_glove", True, "consider as known if present in glove [False]")
flags.DEFINE_bool("eval", True, "eval? [True]")
flags.DEFINE_integer("highway_num_layers", 2, "highway num layers [2]")
flags.DEFINE_bool("use_word_emb", True, "use word embedding? [True]")
flags.DEFINE_string("forward_name", "single", "Forward name [single]")
flags.DEFINE_string("answer_path", "", "Answer path []")
flags.DEFINE_string("load_path", "", "Load path []")
flags.DEFINE_string("shared_path", "", "Shared path []")
flags.DEFINE_string("device", "/cpu:0", "default device [/cpu:0]")
flags.DEFINE_integer("num_gpus", 1, "num of gpus [1]")
flags.DEFINE_string("out_channel_dims", "100", "Out channel dims, separated by commas [100]")
flags.DEFINE_string("filter_heights", "5", "Filter heights, separated by commas [5]")
flags.DEFINE_bool("share_cnn_weights", True, "Share CNN weights [False]")
flags.DEFINE_bool("share_lstm_weights", True, "Share LSTM weights [True]")
flags.DEFINE_bool("two_prepro_layers", False, "Use two layers for preprocessing? [False]")
flags.DEFINE_bool("aug_att", False, "Augment attention layers with more features? [False]")
flags.DEFINE_integer("max_to_keep", 20, "Max recent saves to keep [20]")
flags.DEFINE_bool("vis", False, "output visualization numbers? [False]")
flags.DEFINE_bool("dump_pickle", True, "Dump pickle instead of json? [True]")
flags.DEFINE_float("keep_prob", 1.0, "keep prob [1.0]")
flags.DEFINE_string("prev_mode", "a", "prev mode gy | y | a [a]")
flags.DEFINE_string("logit_func", "tri_linear", "logit func [tri_linear]")
flags.DEFINE_bool("sh", False, "use superhighway [False]")
flags.DEFINE_string("answer_func", "linear", "answer logit func [linear]")
flags.DEFINE_bool("cluster", False, "Cluster data for faster training [False]")
flags.DEFINE_bool("len_opt", False, "Length optimization? [False]")
flags.DEFINE_string("sh_logit_func", "tri_linear", "sh logit func [tri_linear]")
flags.DEFINE_float("filter_ratio", 1.0, "filter ratio [1.0]")
flags.DEFINE_bool("bi", False, "bi-directional attention? [False]")
flags.DEFINE_integer("width", 5, "width around entity [5]")
def main(_):
config = flags.FLAGS
config.out_dir = os.path.join(config.out_base_dir, config.model_name, str(config.run_id).zfill(2))
m(config)
if __name__ == "__main__":
tf.app.run()
| bi-att-flow-master | basic_cnn/cli.py |
import json
import os
import random
import itertools
import math
from collections import defaultdict
import numpy as np
from cnn_dm.prepro import para2sents
from my.tensorflow import grouper
from my.utils import index
class Data(object):
def get_size(self):
raise NotImplementedError()
def get_by_idxs(self, idxs):
"""
Efficient way to obtain a batch of items from filesystem
:param idxs:
:return dict: {'X': [,], 'Y', }
"""
data = defaultdict(list)
for idx in idxs:
each_data = self.get_one(idx)
for key, val in each_data.items():
data[key].append(val)
return data
def get_one(self, idx):
raise NotImplementedError()
def get_empty(self):
raise NotImplementedError()
def __add__(self, other):
raise NotImplementedError()
class MyData(Data):
def __init__(self, config, root_dir, file_names):
self.root_dir = root_dir
self.file_names = file_names
self.config = config
def get_one(self, idx):
file_name = self.file_names[idx]
with open(os.path.join(self.root_dir, file_name), 'r') as fh:
url = fh.readline().strip()
_ = fh.readline()
para = fh.readline().strip()
_ = fh.readline()
ques = fh.readline().strip()
_ = fh.readline()
answer = fh.readline().strip()
_ = fh.readline()
cands = list(line.strip() for line in fh)
cand_ents = list(cand.split(":")[0] for cand in cands)
wordss = para2sents(para, self.config.width)
ques_words = ques.split(" ")
x = wordss
cx = [[list(word) for word in words] for words in wordss]
q = ques_words
cq = [list(word) for word in ques_words]
y = answer
c = cand_ents
data = {'x': x, 'cx': cx, 'q': q, 'cq': cq, 'y': y, 'c': c, 'ids': file_name}
return data
def get_empty(self):
return MyData(self.config, self.root_dir, [])
def __add__(self, other):
file_names = self.file_names + other.file_names
return MyData(self.config, self.root_dir, file_names)
def get_size(self):
return len(self.file_names)
class DataSet(object):
def __init__(self, data, data_type, shared=None, valid_idxs=None):
self.data = data # e.g. {'X': [0, 1, 2], 'Y': [2, 3, 4]}
self.data_type = data_type
self.shared = shared
total_num_examples = self.get_data_size()
self.valid_idxs = range(total_num_examples) if valid_idxs is None else valid_idxs
self.num_examples = total_num_examples
def _sort_key(self, idx):
rx = self.data['*x'][idx]
x = self.shared['x'][rx[0]][rx[1]]
return max(map(len, x))
def get_data_size(self):
if isinstance(self.data, dict):
return len(next(iter(self.data.values())))
elif isinstance(self.data, Data):
return self.data.get_size()
raise Exception()
def get_by_idxs(self, idxs):
if isinstance(self.data, dict):
out = defaultdict(list)
for key, val in self.data.items():
out[key].extend(val[idx] for idx in idxs)
return out
elif isinstance(self.data, Data):
return self.data.get_by_idxs(idxs)
raise Exception()
def get_one(self, idx):
if isinstance(self.data, dict):
out = {key: [val[idx]] for key, val in self.data.items()}
return out
elif isinstance(self.data, Data):
return self.data.get_one(idx)
def get_batches(self, batch_size, num_batches=None, shuffle=False, cluster=False):
"""
:param batch_size:
:param num_batches:
:param shuffle:
:param cluster: cluster examples by their lengths; this might give performance boost (i.e. faster training).
:return:
"""
num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size))
if num_batches is None:
num_batches = num_batches_per_epoch
num_epochs = int(math.ceil(num_batches / num_batches_per_epoch))
if shuffle:
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
if cluster:
sorted_idxs = sorted(random_idxs, key=self._sort_key)
sorted_grouped = lambda: list(grouper(sorted_idxs, batch_size))
grouped = lambda: random.sample(sorted_grouped(), num_batches_per_epoch)
else:
random_grouped = lambda: list(grouper(random_idxs, batch_size))
grouped = random_grouped
else:
raw_grouped = lambda: list(grouper(self.valid_idxs, batch_size))
grouped = raw_grouped
batch_idx_tuples = itertools.chain.from_iterable(grouped() for _ in range(num_epochs))
for _ in range(num_batches):
batch_idxs = tuple(i for i in next(batch_idx_tuples) if i is not None)
batch_data = self.get_by_idxs(batch_idxs)
shared_batch_data = {}
for key, val in batch_data.items():
if key.startswith('*'):
assert self.shared is not None
shared_key = key[1:]
shared_batch_data[shared_key] = [index(self.shared[shared_key], each) for each in val]
batch_data.update(shared_batch_data)
batch_ds = DataSet(batch_data, self.data_type, shared=self.shared)
yield batch_idxs, batch_ds
def get_multi_batches(self, batch_size, num_batches_per_step, num_steps=None, shuffle=False, cluster=False):
batch_size_per_step = batch_size * num_batches_per_step
batches = self.get_batches(batch_size_per_step, num_batches=num_steps, shuffle=shuffle, cluster=cluster)
multi_batches = (tuple(zip(grouper(idxs, batch_size, shorten=True, num_groups=num_batches_per_step),
data_set.divide(num_batches_per_step))) for idxs, data_set in batches)
return multi_batches
def get_empty(self):
if isinstance(self.data, dict):
data = {key: [] for key in self.data}
elif isinstance(self.data, Data):
data = self.data.get_empty()
else:
raise Exception()
return DataSet(data, self.data_type, shared=self.shared)
def __add__(self, other):
if isinstance(self.data, dict):
data = {key: val + other.data[key] for key, val in self.data.items()}
elif isinstance(self.data, Data):
data = self.data + other.data
else:
raise Exception()
valid_idxs = list(self.valid_idxs) + [valid_idx + self.num_examples for valid_idx in other.valid_idxs]
return DataSet(data, self.data_type, shared=self.shared, valid_idxs=valid_idxs)
def divide(self, integer):
batch_size = int(math.ceil(self.num_examples / integer))
idxs_gen = grouper(self.valid_idxs, batch_size, shorten=True, num_groups=integer)
data_gen = (self.get_by_idxs(idxs) for idxs in idxs_gen)
ds_tuple = tuple(DataSet(data, self.data_type, shared=self.shared) for data in data_gen)
return ds_tuple
class MyDataSet(DataSet):
def __init__(self, data, data_type, shared=None, valid_idxs=None):
super(MyDataSet, self).__init__(data, data_type, shared=shared, valid_idxs=valid_idxs)
shared['max_num_sents'] = len(self.get_one(self.num_examples-1)['x'])
def _sort_key(self, idx):
return idx
def read_data(config, data_type, ref, data_filter=None):
shared_path = os.path.join(config.data_dir, "shared_{}.json".format(data_type))
with open(shared_path, 'r') as fh:
shared = json.load(fh)
paths = shared['sorted']
if config.filter_ratio < 1.0:
stop = int(round(len(paths) * config.filter_ratio))
paths = paths[:stop]
num_examples = len(paths)
valid_idxs = range(num_examples)
print("Loaded {}/{} examples from {}".format(len(valid_idxs), num_examples, data_type))
shared_path = config.shared_path or os.path.join(config.out_dir, "shared.json")
if not ref:
word2vec_dict = shared['lower_word2vec'] if config.lower_word else shared['word2vec']
word_counter = shared['lower_word_counter'] if config.lower_word else shared['word_counter']
char_counter = shared['char_counter']
if config.finetune:
shared['word2idx'] = {word: idx + 3 for idx, word in
enumerate(word for word, count in word_counter.items()
if count > config.word_count_th or (config.known_if_glove and word in word2vec_dict))}
else:
assert config.known_if_glove
assert config.use_glove_for_unk
shared['word2idx'] = {word: idx + 3 for idx, word in
enumerate(word for word, count in word_counter.items()
if count > config.word_count_th and word not in word2vec_dict)}
shared['char2idx'] = {char: idx + 2 for idx, char in
enumerate(char for char, count in char_counter.items()
if count > config.char_count_th)}
NULL = "-NULL-"
UNK = "-UNK-"
ENT = "-ENT-"
shared['word2idx'][NULL] = 0
shared['word2idx'][UNK] = 1
shared['word2idx'][ENT] = 2
shared['char2idx'][NULL] = 0
shared['char2idx'][UNK] = 1
json.dump({'word2idx': shared['word2idx'], 'char2idx': shared['char2idx']}, open(shared_path, 'w'))
else:
new_shared = json.load(open(shared_path, 'r'))
for key, val in new_shared.items():
shared[key] = val
if config.use_glove_for_unk:
# create new word2idx and word2vec
word2vec_dict = shared['lower_word2vec'] if config.lower_word else shared['word2vec']
new_word2idx_dict = {word: idx for idx, word in enumerate(word for word in word2vec_dict.keys() if word not in shared['word2idx'])}
shared['new_word2idx'] = new_word2idx_dict
offset = len(shared['word2idx'])
word2vec_dict = shared['lower_word2vec'] if config.lower_word else shared['word2vec']
new_word2idx_dict = shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for word, idx in new_word2idx_dict.items()}
# print("{}/{} unique words have corresponding glove vectors.".format(len(idx2vec_dict), len(word2idx_dict)))
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
shared['new_emb_mat'] = new_emb_mat
data = MyData(config, os.path.join(config.root_dir, data_type), paths)
data_set = MyDataSet(data, data_type, shared=shared, valid_idxs=valid_idxs)
return data_set
def get_cnn_data_filter(config):
return True
def update_config(config, data_sets):
config.max_num_sents = 0
config.max_sent_size = 0
config.max_ques_size = 0
config.max_word_size = 0
for data_set in data_sets:
shared = data_set.shared
config.max_sent_size = max(config.max_sent_size, shared['max_sent_size'])
config.max_ques_size = max(config.max_ques_size, shared['max_ques_size'])
config.max_word_size = max(config.max_word_size, shared['max_word_size'])
config.max_num_sents = max(config.max_num_sents, shared['max_num_sents'])
config.max_word_size = min(config.max_word_size, config.word_size_th)
config.char_vocab_size = len(data_sets[0].shared['char2idx'])
config.word_emb_size = len(next(iter(data_sets[0].shared['word2vec'].values())))
config.word_vocab_size = len(data_sets[0].shared['word2idx'])
| bi-att-flow-master | basic_cnn/read_data.py |
import tensorflow as tf
from basic_cnn.model import Model
from my.tensorflow import average_gradients
class Trainer(object):
def __init__(self, config, model):
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.loss = model.get_loss()
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.grads = self.opt.compute_gradients(self.loss, var_list=self.var_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
def get_train_op(self):
return self.train_op
def step(self, sess, batch, get_summary=False):
assert isinstance(sess, tf.Session)
_, ds = batch
feed_dict = self.model.get_feed_dict(ds, True)
if get_summary:
loss, summary, train_op = \
sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
return loss, summary, train_op
class MultiGPUTrainer(object):
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/gpu:{}".format(gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
def step(self, sess, batches, get_summary=False):
assert isinstance(sess, tf.Session)
feed_dict = {}
for batch, model in zip(batches, self.models):
_, ds = batch
feed_dict.update(model.get_feed_dict(ds, True))
if get_summary:
loss, summary, train_op = \
sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
return loss, summary, train_op
| bi-att-flow-master | basic_cnn/trainer.py |
import argparse
import json
import math
import os
import shutil
from pprint import pprint
import tensorflow as tf
from tqdm import tqdm
import numpy as np
from basic_cnn.evaluator import F1Evaluator, Evaluator, ForwardEvaluator, MultiGPUF1Evaluator, CNNAccuracyEvaluator, \
MultiGPUCNNAccuracyEvaluator
from basic_cnn.graph_handler import GraphHandler
from basic_cnn.model import Model, get_multi_gpu_models
from basic_cnn.trainer import Trainer, MultiGPUTrainer
from basic_cnn.read_data import read_data, get_cnn_data_filter, update_config
def main(config):
set_dirs(config)
with tf.device(config.device):
if config.mode == 'train':
_train(config)
elif config.mode == 'test' or config.mode == 'dev':
_test(config)
elif config.mode == 'forward':
_forward(config)
else:
raise ValueError("invalid value for 'mode': {}".format(config.mode))
def _config_draft(config):
if config.draft:
config.num_steps = 2
config.eval_period = 1
config.log_period = 1
config.save_period = 1
config.eval_num_batches = 1
def _train(config):
# load_metadata(config, 'train') # this updates the config file according to metadata file
data_filter = get_cnn_data_filter(config)
train_data = read_data(config, 'train', config.load, data_filter=data_filter)
dev_data = read_data(config, 'dev', True, data_filter=data_filter)
# test_data = read_data(config, 'test', True, data_filter=data_filter)
update_config(config, [train_data, dev_data])
_config_draft(config)
word2vec_dict = train_data.shared['lower_word2vec'] if config.lower_word else train_data.shared['word2vec']
word2idx_dict = train_data.shared['word2idx']
idx2vec_dict = {word2idx_dict[word]: vec for word, vec in word2vec_dict.items() if word in word2idx_dict}
print("{}/{} unique words have corresponding glove vectors.".format(len(idx2vec_dict), len(word2idx_dict)))
emb_mat = np.array([idx2vec_dict[idx] if idx in idx2vec_dict
else np.random.multivariate_normal(np.zeros(config.word_emb_size), np.eye(config.word_emb_size))
for idx in range(config.word_vocab_size)])
config.emb_mat = emb_mat
# construct model graph and variables (using default graph)
pprint(config.__flags, indent=2)
# model = Model(config)
models = get_multi_gpu_models(config)
model = models[0]
trainer = MultiGPUTrainer(config, models)
evaluator = MultiGPUCNNAccuracyEvaluator(config, models, tensor_dict=model.tensor_dict if config.vis else None)
graph_handler = GraphHandler(config) # controls all tensors and variables in the graph, including loading /saving
# Variables
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
# begin training
print(train_data.num_examples)
num_steps = config.num_steps or int(math.ceil(train_data.num_examples / (config.batch_size * config.num_gpus))) * config.num_epochs
global_step = 0
for batches in tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus,
num_steps=num_steps, shuffle=True, cluster=config.cluster), total=num_steps):
global_step = sess.run(model.global_step) + 1 # +1 because all calculations are done after step
get_summary = global_step % config.log_period == 0
loss, summary, train_op = trainer.step(sess, batches, get_summary=get_summary)
if get_summary:
graph_handler.add_summary(summary, global_step)
# occasional saving
if global_step % config.save_period == 0:
graph_handler.save(sess, global_step=global_step)
if not config.eval:
continue
# Occasional evaluation
if global_step % config.eval_period == 0:
num_steps = math.ceil(dev_data.num_examples / (config.batch_size * config.num_gpus))
if 0 < config.eval_num_batches < num_steps:
num_steps = config.eval_num_batches
e_train = evaluator.get_evaluation_from_batches(
sess, tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps)
)
graph_handler.add_summaries(e_train.summaries, global_step)
e_dev = evaluator.get_evaluation_from_batches(
sess, tqdm(dev_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps))
graph_handler.add_summaries(e_dev.summaries, global_step)
if config.dump_eval:
graph_handler.dump_eval(e_dev)
if config.dump_answer:
graph_handler.dump_answer(e_dev)
if global_step % config.save_period != 0:
graph_handler.save(sess, global_step=global_step)
def _test(config):
assert config.load
test_data = read_data(config, config.mode, True)
update_config(config, [test_data])
_config_draft(config)
if config.use_glove_for_unk:
word2vec_dict = test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec']
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for word, idx in new_word2idx_dict.items()}
# print("{}/{} unique words have corresponding glove vectors.".format(len(idx2vec_dict), len(word2idx_dict)))
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
evaluator = MultiGPUCNNAccuracyEvaluator(config, models, tensor_dict=models[0].tensor_dict if config.vis else None)
graph_handler = GraphHandler(config) # controls all tensors and variables in the graph, including loading /saving
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_steps = math.ceil(test_data.num_examples / (config.batch_size * config.num_gpus))
if 0 < config.eval_num_batches < num_steps:
num_steps = config.eval_num_batches
e = None
for multi_batch in tqdm(test_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps, cluster=config.cluster), total=num_steps):
ei = evaluator.get_evaluation(sess, multi_batch)
e = ei if e is None else e + ei
if config.vis:
eval_subdir = os.path.join(config.eval_dir, "{}-{}".format(ei.data_type, str(ei.global_step).zfill(6)))
if not os.path.exists(eval_subdir):
os.mkdir(eval_subdir)
path = os.path.join(eval_subdir, str(ei.idxs[0]).zfill(8))
graph_handler.dump_eval(ei, path=path)
print(e)
if config.dump_answer:
print("dumping answer ...")
graph_handler.dump_answer(e)
if config.dump_eval:
print("dumping eval ...")
graph_handler.dump_eval(e)
def _forward(config):
assert config.load
test_data = read_data(config, config.forward_name, True)
update_config(config, [test_data])
_config_draft(config)
if config.use_glove_for_unk:
word2vec_dict = test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec']
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for word, idx in new_word2idx_dict.items()}
# print("{}/{} unique words have corresponding glove vectors.".format(len(idx2vec_dict), len(word2idx_dict)))
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
evaluator = ForwardEvaluator(config, model)
graph_handler = GraphHandler(config) # controls all tensors and variables in the graph, including loading /saving
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_batches = math.ceil(test_data.num_examples / config.batch_size)
if 0 < config.eval_num_batches < num_batches:
num_batches = config.eval_num_batches
e = evaluator.get_evaluation_from_batches(sess, tqdm(test_data.get_batches(config.batch_size, num_batches=num_batches), total=num_batches))
print(e)
if config.dump_answer:
print("dumping answer ...")
graph_handler.dump_answer(e, path=config.answer_path)
if config.dump_eval:
print("dumping eval ...")
graph_handler.dump_eval(e)
def set_dirs(config):
# create directories
if not config.load and os.path.exists(config.out_dir):
shutil.rmtree(config.out_dir)
config.save_dir = os.path.join(config.out_dir, "save")
config.log_dir = os.path.join(config.out_dir, "log")
config.eval_dir = os.path.join(config.out_dir, "eval")
config.answer_dir = os.path.join(config.out_dir, "answer")
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if not os.path.exists(config.save_dir):
os.mkdir(config.save_dir)
if not os.path.exists(config.log_dir):
os.mkdir(config.log_dir)
if not os.path.exists(config.answer_dir):
os.mkdir(config.answer_dir)
if not os.path.exists(config.eval_dir):
os.mkdir(config.eval_dir)
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("config_path")
return parser.parse_args()
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def _run():
args = _get_args()
with open(args.config_path, 'r') as fh:
config = Config(**json.load(fh))
main(config)
if __name__ == "__main__":
_run()
| bi-att-flow-master | basic_cnn/main.py |
import itertools
from collections import defaultdict
import numpy as np
import tensorflow as tf
import os
from basic_cnn.read_data import DataSet
from my.nltk_utils import span_f1
from my.tensorflow import padded_reshape
from my.utils import argmax
class Evaluation(object):
def __init__(self, data_type, global_step, idxs, yp, tensor_dict=None):
self.data_type = data_type
self.global_step = global_step
self.idxs = idxs
self.yp = yp
self.num_examples = len(yp)
self.tensor_dict = None
self.dict = {'data_type': data_type,
'global_step': global_step,
'yp': yp,
'idxs': idxs,
'num_examples': self.num_examples}
if tensor_dict is not None:
self.tensor_dict = {key: val.tolist() for key, val in tensor_dict.items()}
for key, val in self.tensor_dict.items():
self.dict[key] = val
self.summaries = None
def __repr__(self):
return "{} step {}".format(self.data_type, self.global_step)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_idxs = self.idxs + other.idxs
new_tensor_dict = None
if self.tensor_dict is not None:
new_tensor_dict = {key: val + other.tensor_dict[key] for key, val in self.tensor_dict.items()}
return Evaluation(self.data_type, self.global_step, new_idxs, new_yp, tensor_dict=new_tensor_dict)
def __radd__(self, other):
return self.__add__(other)
class LabeledEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, y, id2answer_dict, tensor_dict=None):
super(LabeledEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.y = y
self.dict['y'] = y
self.id2answer_dict = id2answer_dict
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_idxs = self.idxs + other.idxs
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return LabeledEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_id2answer_dict, tensor_dict=new_tensor_dict)
class AccuracyEvaluation(LabeledEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, id2answer_dict, correct, loss, tensor_dict=None):
super(AccuracyEvaluation, self).__init__(data_type, global_step, idxs, yp, y, id2answer_dict, tensor_dict=tensor_dict)
self.loss = loss
self.correct = correct
self.id2answer_dict = id2answer_dict
self.acc = sum(correct) / len(correct)
self.dict['loss'] = loss
self.dict['correct'] = correct
self.dict['acc'] = self.acc
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/loss'.format(data_type), simple_value=self.loss)])
acc_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc'.format(data_type), simple_value=self.acc)])
self.summaries = [loss_summary, acc_summary]
def __repr__(self):
return "{} step {}: accuracy={}={}/{}, loss={}".format(self.data_type, self.global_step, self.acc,
sum(self.correct), self.num_examples, self.loss)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
new_tensor_dict = None
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return AccuracyEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_id2answer_dict, new_correct, new_loss, tensor_dict=new_tensor_dict)
class Evaluator(object):
def __init__(self, config, model, tensor_dict=None):
self.config = config
self.model = model
self.global_step = model.global_step
self.yp = model.yp
self.tensor_dict = {} if tensor_dict is None else tensor_dict
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), tensor_dict=tensor_dict)
return e
def get_evaluation_from_batches(self, sess, batches):
e = sum(self.get_evaluation(sess, batch) for batch in batches)
return e
class LabeledEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(LabeledEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.y = model.y
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
y = feed_dict[self.y]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist(), tensor_dict=tensor_dict)
return e
class AccuracyEvaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(AccuracyEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
y = data_set.data['y']
global_step, yp, loss, vals = sess.run([self.global_step, self.yp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
correct, probs, preds = zip(*[self.__class__.compare(data_set.get_one(idx), ypi) for idx, ypi in zip(data_set.valid_idxs, yp)])
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
ids = data_set.data['ids']
id2score_dict = {id_: prob for id_, prob in zip(ids, probs)}
id2answer_dict = {id_: pred for id_, pred in zip(ids, preds)}
id2answer_dict['scores'] = id2score_dict
e = AccuracyEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y, id2answer_dict, correct, float(loss), tensor_dict=tensor_dict)
return e
@staticmethod
def compare(data, ypi):
prob = float(np.max(ypi))
yi = data['y']
for start, stop in yi:
if start == int(np.argmax(ypi)):
return True, prob, " "
return False, prob, " "
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
class CNNAccuracyEvaluator(AccuracyEvaluator):
@staticmethod
def compare(data, ypi):
# ypi: [N, M, JX] numbers
yi = data['y'][0] # entity
xi = data['x'][0] # [N, M, JX] words
dist = defaultdict(int)
for ypij, xij in zip(ypi, xi):
for ypijk, xijk in zip(ypij, xij):
if xijk.startswith("@"):
dist[xijk] += ypijk
pred, prob = max(dist.items(), key=lambda item: item[1])
assert pred.startswith("@")
assert yi.startswith("@")
return pred == yi, prob, pred
class AccuracyEvaluator2(AccuracyEvaluator):
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
para_start = int(np.argmax(np.max(ypi, 1)))
sent_start = int(np.argmax(ypi[para_start]))
if tuple(start) == (para_start, sent_start):
return True
return False
class ForwardEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, loss, id2answer_dict, tensor_dict=None):
super(ForwardEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.yp2 = yp2
self.loss = loss
self.dict['loss'] = loss
self.dict['yp2'] = yp2
self.id2answer_dict = id2answer_dict
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_yp)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return ForwardEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_loss, new_id2answer_dict, tensor_dict=new_tensor_dict)
def __repr__(self):
return "{} step {}: loss={:.4f}".format(self.data_type, self.global_step, self.loss)
class F1Evaluation(AccuracyEvaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, y, correct, loss, f1s, id2answer_dict, tensor_dict=None):
super(F1Evaluation, self).__init__(data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=tensor_dict)
self.yp2 = yp2
self.f1s = f1s
self.f1 = float(np.mean(f1s))
self.dict['yp2'] = yp2
self.dict['f1s'] = f1s
self.dict['f1'] = self.f1
self.id2answer_dict = id2answer_dict
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/f1'.format(data_type), simple_value=self.f1)])
self.summaries.append(f1_summary)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_f1s = self.f1s + other.f1s
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
return F1Evaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_y, new_correct, new_loss, new_f1s, new_id2answer_dict)
def __repr__(self):
return "{} step {}: accuracy={:.4f}, f1={:.4f}, loss={:.4f}".format(self.data_type, self.global_step, self.acc, self.f1, self.loss)
class F1Evaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(F1Evaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
global_step, yp, yp2, loss, vals = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
if self.config.squash:
new_y = []
for xi, yi in zip(data_set.data['x'], y):
new_yi = []
for start, stop in yi:
start_offset = sum(map(len, xi[:start[0]]))
stop_offset = sum(map(len, xi[:stop[0]]))
new_start = 0, start_offset + start[1]
new_stop = 0, stop_offset + stop[1]
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
if self.config.single:
new_y = []
for yi in y:
new_yi = []
for start, stop in yi:
new_start = 0, start[1]
new_stop = 0, stop[1]
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
spans = [get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)]
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
id2answer_dict = {id_: " ".join(_get(xi, span))
for id_, xi, span in zip(data_set.data['ids'], data_set.data['x'], spans)}
correct = [self.__class__.compare2(yi, span) for yi, span in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for yi, span in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y,
correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
return e
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return False
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
class MultiGPUF1Evaluator(F1Evaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUF1Evaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
self.yp = tf.concat(0, [padded_reshape(model.yp, [N, M, JX]) for model in models])
self.yp2 = tf.concat(0, [padded_reshape(model.yp2, [N, M, JX]) for model in models])
self.loss = tf.add_n([model.loss for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
class MultiGPUCNNAccuracyEvaluator(CNNAccuracyEvaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUCNNAccuracyEvaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
self.yp = tf.concat(0, [padded_reshape(model.yp, [N, M, JX]) for model in models])
self.loss = tf.add_n([model.loss for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
class ForwardEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(ForwardEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, yp2, loss, vals = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
spans = [get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)]
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
id2answer_dict = {id_: " ".join(_get(xi, span))
for id_, xi, span in zip(data_set.data['ids'], data_set.data['x'], spans)}
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = ForwardEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), float(loss), id2answer_dict, tensor_dict=tensor_dict)
return e
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return False
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
def get_best_span(ypi, yp2i):
max_val = 0
best_word_span = (0, 1)
best_sent_idx = 0
for f, (ypif, yp2if) in enumerate(zip(ypi, yp2i)):
argmax_j1 = 0
for j in range(len(ypif)):
val1 = ypif[argmax_j1]
if val1 < ypif[j]:
val1 = ypif[j]
argmax_j1 = j
val2 = yp2if[j]
if val1 * val2 > max_val:
best_word_span = (argmax_j1, j)
best_sent_idx = f
max_val = val1 * val2
return (best_sent_idx, best_word_span[0]), (best_sent_idx, best_word_span[1] + 1)
def get_span_score_pairs(ypi, yp2i):
span_score_pairs = []
for f, (ypif, yp2if) in enumerate(zip(ypi, yp2i)):
for j in range(len(ypif)):
for k in range(j, len(yp2if)):
span = ((f, j), (f, k+1))
score = ypif[j] * yp2if[k]
span_score_pairs.append((span, score))
return span_score_pairs
| bi-att-flow-master | basic_cnn/evaluator.py |
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import RNNCell
from my.tensorflow.nn import linear
class SHCell(RNNCell):
"""
Super-Highway Cell
"""
def __init__(self, input_size, logit_func='tri_linear', scalar=False):
self._state_size = input_size
self._output_size = input_size
self._logit_func = logit_func
self._scalar = scalar
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or "SHCell"):
a_size = 1 if self._scalar else self._state_size
h, u = tf.split(1, 2, inputs)
if self._logit_func == 'mul_linear':
args = [h * u, state * u]
a = tf.nn.sigmoid(linear(args, a_size, True))
elif self._logit_func == 'linear':
args = [h, u, state]
a = tf.nn.sigmoid(linear(args, a_size, True))
elif self._logit_func == 'tri_linear':
args = [h, u, state, h * u, state * u]
a = tf.nn.sigmoid(linear(args, a_size, True))
elif self._logit_func == 'double':
args = [h, u, state]
a = tf.nn.sigmoid(linear(tf.tanh(linear(args, a_size, True)), self._state_size, True))
else:
raise Exception()
new_state = a * state + (1 - a) * h
outputs = state
return outputs, new_state
| bi-att-flow-master | basic_cnn/superhighway.py |
import shutil
from collections import OrderedDict
import http.server
import socketserver
import argparse
import json
import os
import numpy as np
from tqdm import tqdm
from jinja2 import Environment, FileSystemLoader
from basic_cnn.evaluator import get_span_score_pairs, get_best_span
def bool_(string):
if string == 'True':
return True
elif string == 'False':
return False
else:
raise Exception()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default='basic')
parser.add_argument("--data_type", type=str, default='dev')
parser.add_argument("--step", type=int, default=5000)
parser.add_argument("--template_name", type=str, default="visualizer.html")
parser.add_argument("--num_per_page", type=int, default=100)
parser.add_argument("--data_dir", type=str, default="data/squad")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--open", type=str, default='False')
parser.add_argument("--run_id", type=str, default="0")
args = parser.parse_args()
return args
def _decode(decoder, sent):
return " ".join(decoder[idx] for idx in sent)
def accuracy2_visualizer(args):
model_name = args.model_name
data_type = args.data_type
num_per_page = args.num_per_page
data_dir = args.data_dir
run_id = args.run_id.zfill(2)
step = args.step
eval_path =os.path.join("out", model_name, run_id, "eval", "{}-{}.json".format(data_type, str(step).zfill(6)))
print("loading {}".format(eval_path))
eval_ = json.load(open(eval_path, 'r'))
_id = 0
html_dir = "/tmp/list_results%d" % _id
while os.path.exists(html_dir):
_id += 1
html_dir = "/tmp/list_results%d" % _id
if os.path.exists(html_dir):
shutil.rmtree(html_dir)
os.mkdir(html_dir)
cur_dir = os.path.dirname(os.path.realpath(__file__))
templates_dir = os.path.join(cur_dir, 'templates')
env = Environment(loader=FileSystemLoader(templates_dir))
env.globals.update(zip=zip, reversed=reversed)
template = env.get_template(args.template_name)
data_path = os.path.join(data_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(data_dir, "shared_{}.json".format(data_type))
print("loading {}".format(data_path))
data = json.load(open(data_path, 'r'))
print("loading {}".format(shared_path))
shared = json.load(open(shared_path, 'r'))
rows = []
for i, (idx, yi, ypi, yp2i) in tqdm(enumerate(zip(*[eval_[key] for key in ('idxs', 'y', 'yp', 'yp2')])), total=len(eval_['idxs'])):
id_, q, rx, answers = (data[key][idx] for key in ('ids', 'q', '*x', 'answerss'))
x = shared['x'][rx[0]][rx[1]]
ques = [" ".join(q)]
para = [[word for word in sent] for sent in x]
span = get_best_span(ypi, yp2i)
ap = get_segment(para, span)
score = "{:.3f}".format(ypi[span[0][0]][span[0][1]] * yp2i[span[1][0]][span[1][1]-1])
row = {
'id': id_,
'title': "Hello world!",
'ques': ques,
'para': para,
'y': yi[0][0],
'y2': yi[0][1],
'yp': ypi,
'yp2': yp2i,
'a': answers,
'ap': ap,
'score': score
}
rows.append(row)
if i % num_per_page == 0:
html_path = os.path.join(html_dir, "%s.html" % str(i).zfill(8))
if (i + 1) % num_per_page == 0 or (i + 1) == len(eval_['y']):
var_dict = {'title': "Accuracy Visualization",
'rows': rows
}
with open(html_path, "wb") as f:
f.write(template.render(**var_dict).encode('UTF-8'))
rows = []
os.chdir(html_dir)
port = args.port
host = args.host
# Overriding to suppress log message
class MyHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
handler = MyHandler
httpd = socketserver.TCPServer((host, port), handler)
if args.open == 'True':
os.system("open http://%s:%d" % (args.host, args.port))
print("serving at %s:%d" % (host, port))
httpd.serve_forever()
def get_segment(para, span):
return " ".join(para[span[0][0]][span[0][1]:span[1][1]])
if __name__ == "__main__":
ARGS = get_args()
accuracy2_visualizer(ARGS) | bi-att-flow-master | basic_cnn/visualizer.py |
import pytest
@pytest.fixture(autouse=True)
def doctest_fixtures(
doctest_namespace,
tmp_path,
):
doctest_namespace["cache_dir"] = tmp_path
| cached_path-main | conftest.py |
from setuptools import find_packages, setup
def read_requirements(filename: str):
with open(filename) as requirements_file:
import re
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
r"^(git\+)?(https|ssh)://(git@)?github\.com/([\w-]+)/(?P<name>[\w-]+)\.git", req
)
if m is None:
return req
else:
return f"{m.group('name')} @ {req}"
requirements = []
for line in requirements_file:
line = line.strip()
if line.startswith("#") or len(line) <= 0:
continue
requirements.append(fix_url_dependencies(line))
return requirements
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import cached_path whilst setting up.
VERSION = {} # type: ignore
with open("cached_path/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="cached_path",
version=VERSION["VERSION"],
description="",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp cached_path file utils",
url="https://github.com/allenai/cached_path",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"test_fixtures",
"test_fixtures.*",
"conftest",
"*.conftest",
]
),
install_requires=read_requirements("requirements.txt"),
extras_require={"dev": read_requirements("dev-requirements.txt")},
python_requires=">=3.7",
)
| cached_path-main | setup.py |
import os
import pytest
from cached_path.cache_file import CacheFile
from cached_path.testing import BaseTestClass
class TestCacheFile(BaseTestClass):
def test_temp_file_removed_on_error(self):
cache_filename = self.TEST_DIR / "cache_file"
with pytest.raises(IOError, match="I made this up"):
with CacheFile(cache_filename) as handle:
raise IOError("I made this up")
assert not os.path.exists(handle.name)
assert not os.path.exists(cache_filename)
| cached_path-main | tests/cache_file_test.py |
import os
import pytest
from filelock import Timeout
from cached_path.file_lock import FileLock
from cached_path.testing import BaseTestClass
class TestFileLock(BaseTestClass):
def setup_method(self):
super().setup_method()
# Set up a regular lock and a read-only lock.
open(self.TEST_DIR / "lock", "a").close()
open(self.TEST_DIR / "read_only_lock", "a").close()
os.chmod(self.TEST_DIR / "read_only_lock", 0o555)
# Also set up a read-only directory.
os.mkdir(self.TEST_DIR / "read_only_dir", 0o555)
def test_locking(self):
with FileLock(self.TEST_DIR / "lock"):
# Trying to acquire the lock again should fail.
with pytest.raises(Timeout):
with FileLock(self.TEST_DIR / "lock", timeout=0.1):
pass
# Trying to acquire a lock when lacking write permissions on the file should fail.
with pytest.raises(PermissionError):
with FileLock(self.TEST_DIR / "read_only_lock"):
pass
# But this should only issue a warning if we set the `read_only_ok` flag to `True`.
with pytest.warns(UserWarning, match="Lacking permissions"):
with FileLock(self.TEST_DIR / "read_only_lock", read_only_ok=True):
pass
# However this should always fail when we lack write permissions and the file lock
# doesn't exist yet.
with pytest.raises(PermissionError):
with FileLock(self.TEST_DIR / "read_only_dir" / "lock", read_only_ok=True):
pass
| cached_path-main | tests/file_lock_test.py |
from cached_path import common
from cached_path.testing import BaseTestClass
class TestSetCacheDir(BaseTestClass):
def setup_method(self):
super().setup_method()
self.initial_value = common.CACHE_DIRECTORY
def test_toggle_ffl(self):
common.set_cache_dir(self.TEST_DIR / "foo")
assert common.get_cache_dir() == self.TEST_DIR / "foo"
def teardown_method(self):
common.set_cache_dir(self.initial_value)
| cached_path-main | tests/common_test.py |
cached_path-main | tests/__init__.py |
|
import json
import os
import pathlib
import pytest
from cached_path.testing import BaseTestClass
from cached_path.util import filename_to_url, resource_to_filename
class TestUtils(BaseTestClass):
def test_resource_to_filename(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
"https://allennlp.s3.amazonaws.com" + "/long" * 20 + "/url",
]:
filename = resource_to_filename(url)
assert "http" not in filename
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
json.dump(
{"url": url, "etag": None},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag is None
def test_resource_to_filename_with_etags(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = resource_to_filename(url, etag="mytag")
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
baseurl = "http://allenai.org/"
assert resource_to_filename(baseurl + "1") != resource_to_filename(baseurl, etag="1")
def test_resource_to_filename_with_etags_eliminates_quotes(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = resource_to_filename(url, etag='"mytag"')
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
| cached_path-main | tests/util_test.py |
import shutil
import tempfile
import time
from collections import Counter
from pathlib import Path
import pytest
import responses
from flaky import flaky
from requests.exceptions import ConnectionError, HTTPError
from cached_path._cached_path import cached_path, get_from_cache
from cached_path.meta import Meta
from cached_path.schemes.http import HttpClient, RecoverableServerError
from cached_path.testing import BaseTestClass
from cached_path.util import _lock_file_path, _meta_file_path, resource_to_filename
class TestCachedPathHttp(BaseTestClass):
@staticmethod
def set_up_glove(url: str, byt: bytes, change_etag_every: int = 1000):
# Mock response for the datastore url that returns glove vectors
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/gzip",
headers={"Content-Length": str(len(byt))},
)
etags_left = change_etag_every
etag = "0"
def head_callback(_):
"""
Writing this as a callback allows different responses to different HEAD requests.
In our case, we're going to change the ETag header every `change_etag_every`
requests, which will allow us to simulate having a new version of the file.
"""
nonlocal etags_left, etag
headers = {"ETag": etag}
# countdown and change ETag
etags_left -= 1
if etags_left <= 0:
etags_left = change_etag_every
etag = str(int(etag) + 1)
return (200, headers, "")
responses.add_callback(responses.HEAD, url, callback=head_callback)
def setup_method(self):
super().setup_method()
self.glove_file = self.FIXTURES_ROOT / "embeddings/glove.6B.100d.sample.txt.gz"
with open(self.glove_file, "rb") as glove:
self.glove_bytes = glove.read()
def test_offline_mode_fallback(self, monkeypatch):
# Ensures `cached_path` just returns the path to the latest cached version
# of the resource when there's no internet connection, or another recoverable error
# occurs.
# First we mock the `get_etag` method so that it raises a `ConnectionError`,
# like it would if there was no internet connection.
def mocked_http_etag(self):
raise ConnectionError
monkeypatch.setattr(HttpClient, "get_etag", mocked_http_etag)
url = "https://github.com/allenai/allennlp/blob/master/some-fake-resource"
# We'll create two cached versions of this fake resource using two different etags.
etags = ['W/"3e5885bfcbf4c47bc4ee9e2f6e5ea916"', 'W/"3e5885bfcbf4c47bc4ee9e2f6e5ea918"']
filenames = [self.TEST_DIR / resource_to_filename(url, etag) for etag in etags]
for filename, etag in zip(filenames, etags):
meta = Meta(
resource=url,
cached_path=str(filename),
creation_time=time.time(),
etag=etag,
size=2341,
)
meta.to_file()
with open(filename, "w") as f:
f.write("some random data")
# os.path.getmtime is only accurate to the second.
time.sleep(1.1)
# Should know to ignore lock files and extraction directories.
with open(_lock_file_path(filenames[-1]), "w") as f:
f.write("")
(filenames[-1].parent / (filenames[-1].name + "-extracted")).mkdir()
# The version corresponding to the last etag should be returned, since
# that one has the latest "last modified" time.
assert get_from_cache(url)[0] == filenames[-1]
# We also want to make sure this works when the latest cached version doesn't
# have a corresponding etag.
filename = self.TEST_DIR / resource_to_filename(url)
meta = Meta(resource=url, cached_path=str(filename), creation_time=time.time(), size=2341)
meta.to_file()
with open(filename, "w") as f:
f.write("some random data")
assert get_from_cache(url)[0] == filename
@responses.activate
def test_get_from_cache(self):
url = "http://fake.datastore.com/glove.txt.gz"
self.set_up_glove(url, self.glove_bytes, change_etag_every=2)
filename, _ = get_from_cache(url)
assert filename == self.TEST_DIR / resource_to_filename(url, etag="0")
meta = Meta.from_path(_meta_file_path(filename))
assert meta.resource == url
# We should have made one HEAD request and one GET request.
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 1
assert method_counts["GET"] == 1
# And the cached file should have the correct contents
with open(filename, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# A second call to `get_from_cache` should make another HEAD call
# but not another GET call.
filename2, _ = get_from_cache(url)
assert filename2 == filename
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 2
assert method_counts["GET"] == 1
with open(filename2, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# A third call should have a different ETag and should force a new download,
# which means another HEAD call and another GET call.
filename3, _ = get_from_cache(url)
assert filename3 == self.TEST_DIR / resource_to_filename(url, etag="1")
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 3
assert method_counts["GET"] == 2
with open(filename3, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
@responses.activate
def test_http_200(self):
url = "http://fake.datastore.com/glove.txt.gz"
self.set_up_glove(url, self.glove_bytes)
# non-existent file
with pytest.raises(FileNotFoundError):
filename = cached_path(self.FIXTURES_ROOT / "does_not_exist" / "fake_file.tar.gz")
# unparsable URI
with pytest.raises(ValueError):
filename = cached_path("fakescheme://path/to/fake/file.tar.gz")
# existing file as path
assert cached_path(self.glove_file) == self.glove_file
# caches urls
filename = cached_path(url)
assert len(responses.calls) == 2
assert filename == self.TEST_DIR / resource_to_filename(url, etag="0")
with open(filename, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# archives
filename = cached_path(
self.FIXTURES_ROOT / "common" / "quote.tar.gz!quote.txt",
extract_archive=True,
)
with open(filename, "r") as f:
assert f.read().startswith("I mean, ")
@responses.activate
def test_http_404(self):
url_404 = "http://fake.datastore.com/does-not-exist"
byt = b"Does not exist"
for method in (responses.GET, responses.HEAD):
responses.add(
method,
url_404,
body=byt,
status=404,
headers={"Content-Length": str(len(byt))},
)
with pytest.raises(FileNotFoundError):
cached_path(url_404)
@responses.activate
def test_http_500(self):
url_500 = "http://fake.datastore.com/server-error"
byt = b"Server error"
for method in (responses.GET, responses.HEAD):
responses.add(
method,
url_500,
body=byt,
status=500,
headers={"Content-Length": str(len(byt))},
)
with pytest.raises(HTTPError):
cached_path(url_500)
@responses.activate
def test_http_502(self):
url_502 = "http://fake.datastore.com/server-error"
byt = b"Server error"
for method in (responses.GET, responses.HEAD):
responses.add(
method,
url_502,
body=byt,
status=502,
headers={"Content-Length": str(len(byt))},
)
with pytest.raises(RecoverableServerError):
cached_path(url_502)
class TestCachedPathLocalFiles(BaseTestClass):
def test_path_with_home_shortcut(self):
with tempfile.NamedTemporaryFile(dir=Path.home()) as tmp_file:
full_path = Path(tmp_file.name)
fname = full_path.name
short_path = f"~/{fname}"
assert cached_path(short_path) == full_path
assert cached_path(Path(short_path)) == full_path
assert cached_path(f"file://{short_path}") == full_path
class TestCachedPathWithArchive(BaseTestClass):
def setup_method(self):
super().setup_method()
self.tar_file = self.TEST_DIR / "utf-8.tar.gz"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.tar.gz", self.tar_file
)
self.zip_file = self.TEST_DIR / "utf-8.zip"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.zip", self.zip_file
)
def test_extract_with_external_symlink(self):
dangerous_file = self.FIXTURES_ROOT / "common" / "external_symlink.tar.gz"
with pytest.raises(ValueError):
cached_path(dangerous_file, extract_archive=True)
def check_extracted(self, extracted: Path):
assert extracted.is_dir()
assert extracted.parent == self.TEST_DIR
assert (extracted / "dummy.txt").is_file()
assert (extracted / "folder/utf-8_sample.txt").is_file()
assert _meta_file_path(extracted).is_file()
def test_cached_path_extract_local_tar(self):
extracted = cached_path(self.tar_file, extract_archive=True)
self.check_extracted(extracted)
def test_cached_path_extract_local_zip(self):
extracted = cached_path(self.zip_file, extract_archive=True)
self.check_extracted(extracted)
@responses.activate
def test_cached_path_extract_remote_tar(self):
url = "http://fake.datastore.com/utf-8.tar.gz"
byt = open(self.tar_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/tar+gzip",
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, extract_archive=True)
assert extracted.name.endswith("-extracted")
self.check_extracted(extracted)
@responses.activate
def test_cached_path_extract_remote_zip(self):
url = "http://fake.datastore.com/utf-8.zip"
byt = open(self.zip_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/zip",
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, extract_archive=True)
assert extracted.name.endswith("-extracted")
self.check_extracted(extracted)
class TestCachedPathGs(BaseTestClass):
@flaky
def test_cache_blob(self):
path = cached_path("gs://allennlp-public-models/bert-xsmall-dummy.tar.gz")
assert path.is_file()
meta = Meta.from_path(_meta_file_path(path))
assert meta.etag is not None
@flaky
def test_cache_and_extract_blob(self):
path = cached_path(
"gs://allennlp-public-models/bert-xsmall-dummy.tar.gz", extract_archive=True
)
assert path.is_dir()
meta = Meta.from_path(_meta_file_path(path))
assert meta.extraction_dir
assert meta.etag is not None
@flaky
def test_file_not_found(self):
with pytest.raises(FileNotFoundError):
cached_path("gs://allennlp-public-models/does-not-exist")
class TestCachedPathS3(BaseTestClass):
@flaky
def test_cache_object(self):
path = cached_path("s3://allennlp/datasets/squad/squad-dev-v1.1.json")
assert path.is_file()
meta = Meta.from_path(_meta_file_path(path))
assert meta.etag is not None
class TestCachedPathHf(BaseTestClass):
@flaky
def test_cached_download_no_user_or_org(self):
path = cached_path("hf://t5-small/config.json", cache_dir=self.TEST_DIR)
assert path.is_file()
assert self.TEST_DIR in path.parents
@flaky
def test_snapshot_download_no_user_or_org(self):
# This is the smallest snapshot I could find that is not associated with a user / org.
model_name = "distilbert-base-german-cased"
path = cached_path(f"hf://{model_name}", cache_dir=self.TEST_DIR)
assert path.is_dir()
assert self.TEST_DIR in path.parents
def test_snapshot_download_ambiguous_url(self):
# URLs like 'hf://xxxx/yyyy' are potentially ambiguous,
# because this could refer to either:
# 1. the file 'yyyy' in the 'xxxx' repository, or
# 2. the repo 'yyyy' under the user/org name 'xxxx'.
# We default to (1), but if we get a 404 error or 401 error then we try (2).
model_name = "lysandre/test-simple-tagger-tiny"
path = cached_path(
f"hf://{model_name}", cache_dir=self.TEST_DIR
) # should resolve to option 2.
assert path.is_dir()
assert self.TEST_DIR in path.parents
def beaker_available() -> bool:
try:
from beaker import Beaker, BeakerError # type: ignore
try:
beaker = Beaker.from_env()
beaker.account.whoami()
return True
except BeakerError:
return False
except (ImportError, ModuleNotFoundError):
return False
class TestCachedPathBeaker(BaseTestClass):
@flaky
@pytest.mark.skipif(not beaker_available(), reason="Beaker not configured")
def test_cache_object(self):
path = cached_path("beaker://petew/cached-path-readme/README.md")
assert path.is_file()
meta = Meta.from_path(_meta_file_path(path))
assert meta.etag is not None
| cached_path-main | tests/cached_path_test.py |
from cached_path.schemes import (
SchemeClient,
add_scheme_client,
get_scheme_client,
get_supported_schemes,
)
from cached_path.util import is_url_or_existing_file
def test_supported_schemes():
assert "hf" in get_supported_schemes()
class CustomSchemeClient(SchemeClient):
scheme = "foo"
def get_etag(self):
return "AAA"
def get_size(self):
return None
def get_resource(self, temp_file):
pass
def test_add_scheme():
assert "foo" not in get_supported_schemes()
assert not is_url_or_existing_file("foo://bar")
add_scheme_client(CustomSchemeClient)
assert "foo" in get_supported_schemes()
assert is_url_or_existing_file("foo://bar")
assert isinstance(get_scheme_client("foo://bar"), CustomSchemeClient)
| cached_path-main | tests/schemes_test.py |
import pytest
from cached_path.schemes.gs import GsClient
def test_split_gcs_path():
# Test splitting good urls.
assert GsClient.split_gcs_path("gs://my-bucket/subdir/file.txt") == (
"my-bucket",
"subdir/file.txt",
)
assert GsClient.split_gcs_path("gs://my-bucket/file.txt") == ("my-bucket", "file.txt")
# Test splitting bad urls.
with pytest.raises(ValueError):
GsClient.split_gcs_path("gs://")
GsClient.split_gcs_path("gs://myfile.txt")
GsClient.split_gcs_path("myfile.txt")
| cached_path-main | tests/schemes/gs_test.py |
cached_path-main | tests/schemes/__init__.py |
|
import pytest
from cached_path.schemes.s3 import S3Client
def test_split_s3_path():
# Test splitting good urls.
assert S3Client.split_s3_path("s3://my-bucket/subdir/file.txt") == (
"my-bucket",
"subdir/file.txt",
)
assert S3Client.split_s3_path("s3://my-bucket/file.txt") == ("my-bucket", "file.txt")
# Test splitting bad urls.
with pytest.raises(ValueError):
S3Client.split_s3_path("s3://")
S3Client.split_s3_path("s3://myfile.txt")
S3Client.split_s3_path("myfile.txt")
| cached_path-main | tests/schemes/s3_test.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import sys
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath("../../"))
from cached_path.version import VERSION, VERSION_SHORT # noqa: E402
# -- Project information -----------------------------------------------------
project = "cached-path"
copyright = f"{datetime.today().year}, Allen Institute for Artificial Intelligence"
author = "Allen Institute for Artificial Intelligence"
version = VERSION_SHORT
release = VERSION
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"myst_parser",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
"sphinx_copybutton",
"sphinx_autodoc_typehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
source_suffix = [".rst", ".md"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"rich": ("https://rich.readthedocs.io/en/latest/", None),
}
# Tell myst-parser to assign header anchors for h1-h3.
myst_heading_anchors = 4
typehints_defaults = "comma"
copybutton_prompt_text = r">>> |\.\.\. "
copybutton_prompt_is_regexp = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = f"cached-path v{VERSION}"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
html_favicon = "_static/favicon.ico"
| cached_path-main | docs/source/conf.py |
from datetime import datetime
from pathlib import Path
from cached_path.version import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unreleased"):
insert_index = i + 1
elif line.startswith(f"## [v{VERSION}]"):
print("CHANGELOG already up-to-date")
return
elif line.startswith("## [v"):
break
else:
raise RuntimeError("Couldn't find 'Unreleased' section")
lines.insert(insert_index, "\n")
lines.insert(
insert_index + 1,
f"## [v{VERSION}](https://github.com/allenai/cached_path/releases/tag/v{VERSION}) - "
f"{datetime.now().strftime('%Y-%m-%d')}\n",
)
with changelog.open("w") as f:
f.writelines(lines)
if __name__ == "__main__":
main()
| cached_path-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
new_version = packaging.version.parse(TAG)
# Get all tags sorted by version, latest first.
all_tags = os.popen("git tag -l --sort=-version:refname 'v*'").read().split("\n")
# Out of `all_tags`, find the latest previous version so that we can collect all
# commits between that version and the new version we're about to publish.
# Note that we ignore pre-releases unless the new version is also a pre-release.
last_tag: str
for tag in all_tags:
if not tag.strip(): # could be blank line
continue
version = packaging.version.parse(tag)
if new_version.pre is None and version.pre is not None:
continue
if version < new_version:
last_tag = tag
break
commits = os.popen(f"git log {last_tag}..{TAG}^ --oneline --first-parent").read()
return "## Commits\n\n" + commits
def main():
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| cached_path-main | scripts/release_notes.py |
import os
_MAJOR = "1"
_MINOR = "4"
# On main and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("CACHED_PATH_VERSION_SUFFIX", "")
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| cached_path-main | cached_path/version.py |
import os
import tarfile
from hashlib import sha256
from pathlib import Path
from typing import List, Optional, Tuple
from urllib.parse import urlparse
from .common import PathOrStr, get_cache_dir
from .meta import Meta
def resource_to_filename(resource: PathOrStr, etag: Optional[str] = None) -> str:
"""
Convert a ``resource`` into a hashed filename in a repeatable way.
If ``etag`` is specified, append its hash to the resources', delimited
by a period.
THis is essentially the inverse of :func:`filename_to_url()`.
"""
resource_bytes = str(resource).encode("utf-8")
resource_hash = sha256(resource_bytes)
filename = resource_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(
filename: str, cache_dir: Optional[PathOrStr] = None
) -> Tuple[str, Optional[str]]:
"""
Return the URL and etag (which may be ``None``) stored for ``filename``.
Raises :exc:`FileNotFoundError` if ``filename`` or its stored metadata do not exist.
This is essentially the inverse of :func:`resource_to_filename()`.
"""
cache_dir = cache_dir if cache_dir else get_cache_dir()
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
metadata = Meta.from_path(meta_path)
return metadata.resource, metadata.etag
def find_latest_cached(url: str, cache_dir: Optional[PathOrStr] = None) -> Optional[Path]:
"""
Get the path to the latest cached version of a given resource.
"""
cache_dir = Path(cache_dir if cache_dir else get_cache_dir())
filename = resource_to_filename(url)
candidates: List[Tuple[Path, float]] = []
for path in cache_dir.glob(f"{filename}*"):
print(path, path.suffix, path.name)
if path.suffix in {".json", ".lock"} or path.name.endswith("-extracted"):
continue
mtime = path.stat().st_mtime
candidates.append((path, mtime))
# Sort candidates by modification time, newest first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
def check_tarfile(tar_file: tarfile.TarFile):
"""Tar files can contain files outside of the extraction directory, or symlinks that point
outside the extraction directory. We also don't want any block devices fifos, or other
weird file types extracted. This checks for those issues and throws an exception if there
is a problem."""
base_path = os.path.join("tmp", "pathtest")
base_path = os.path.normpath(base_path)
def normalize_path(path: str) -> str:
path = path.rstrip("/")
path = path.replace("/", os.sep)
path = os.path.join(base_path, path)
path = os.path.normpath(path)
return path
for tarinfo in tar_file:
if not (
tarinfo.isreg()
or tarinfo.isdir()
or tarinfo.isfile()
or tarinfo.islnk()
or tarinfo.issym()
):
raise ValueError(
f"Tar file {str(tar_file.name)} contains invalid member {tarinfo.name}."
)
target_path = normalize_path(tarinfo.name)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to create a file outside of its extraction directory."
)
if tarinfo.islnk() or tarinfo.issym():
target_path = normalize_path(tarinfo.linkname)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to link to a file "
"outside of its extraction directory."
)
def is_url_or_existing_file(url_or_filename: PathOrStr) -> bool:
"""
Given something that might be a URL or local path,
determine if it's actually a url or the path to an existing file.
"""
if url_or_filename is None:
return False
from .schemes import get_supported_schemes
url_or_filename = os.path.expanduser(str(url_or_filename))
parsed = urlparse(url_or_filename)
return parsed.scheme in get_supported_schemes() or os.path.exists(url_or_filename)
def _lock_file_path(cache_path: Path) -> Path:
return cache_path.parent / (cache_path.name + ".lock")
def _meta_file_path(cache_path: Path) -> Path:
return cache_path.parent / (cache_path.name + ".json")
| cached_path-main | cached_path/util.py |
import logging
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Tuple
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
from .cache_file import CacheFile
from .common import PathOrStr, get_cache_dir
from .file_lock import FileLock
from .meta import Meta
from .schemes import get_scheme_client, get_supported_schemes, hf_get_from_cache
from .util import (
_lock_file_path,
_meta_file_path,
check_tarfile,
find_latest_cached,
resource_to_filename,
)
if TYPE_CHECKING:
from rich.progress import Progress
logger = logging.getLogger("cached_path")
def cached_path(
url_or_filename: PathOrStr,
cache_dir: Optional[PathOrStr] = None,
extract_archive: bool = False,
force_extract: bool = False,
quiet: bool = False,
progress: Optional["Progress"] = None,
) -> Path:
"""
Given something that might be a URL or local path, determine which.
If it's a remote resource, download the file and cache it, and
then return the path to the cached file. If it's already a local path,
make sure the file exists and return the path.
For URLs, the following schemes are all supported out-of-the-box:
* ``http`` and ``https``,
* ``s3`` for objects on `AWS S3`_,
* ``gs`` for objects on `Google Cloud Storage (GCS)`_, and
* ``hf`` for objects or repositories on `HuggingFace Hub`_.
If you have `Beaker-py`_ installed you can also use URLs of the form:
``beaker://{user_name}/{dataset_name}/{file_path}``.
You can also extend ``cached_path()`` to handle more schemes with :func:`add_scheme_client()`.
.. _AWS S3: https://aws.amazon.com/s3/
.. _Google Cloud Storage (GCS): https://cloud.google.com/storage
.. _HuggingFace Hub: https://huggingface.co/
.. _Beaker-py: https://github.com/allenai/beaker-py
Examples
--------
To download a file over ``https``::
cached_path("https://github.com/allenai/cached_path/blob/main/README.md")
To download an object on GCS::
cached_path("gs://allennlp-public-models/lerc-2020-11-18.tar.gz")
To download the PyTorch weights for the model `epwalsh/bert-xsmall-dummy`_
on HuggingFace, you could do::
cached_path("hf://epwalsh/bert-xsmall-dummy/pytorch_model.bin")
For paths or URLs that point to a tarfile or zipfile, you can append the path
to a specific file within the archive to the ``url_or_filename``, preceeded by a "!".
The archive will be automatically extracted (provided you set ``extract_archive`` to ``True``),
returning the local path to the specific file. For example::
cached_path("model.tar.gz!weights.th", extract_archive=True)
.. _epwalsh/bert-xsmall-dummy: https://huggingface.co/epwalsh/bert-xsmall-dummy
Parameters
----------
url_or_filename :
A URL or path to parse and possibly download.
cache_dir :
The directory to cache downloads. If not specified, the global default cache directory
will be used (``~/.cache/cached_path``). This can be set to something else with
:func:`set_cache_dir()`.
extract_archive :
If ``True``, then zip or tar.gz archives will be automatically extracted.
In which case the directory is returned.
force_extract :
If ``True`` and the file is an archive file, it will be extracted regardless
of whether or not the extracted directory already exists.
.. caution::
Use this flag with caution! This can lead to race conditions if used
from multiple processes on the same file.
quiet :
If ``True``, progress displays won't be printed.
progress :
A custom progress display to use. If not set and ``quiet=False``, a default display
from :func:`~cached_path.get_download_progress()` will be used.
Returns
-------
:class:`pathlib.Path`
The local path to the (potentially cached) resource.
Raises
------
``FileNotFoundError``
If the resource cannot be found locally or remotely.
``ValueError``
When the URL is invalid.
``Other errors``
Other error types are possible as well depending on the client used to fetch
the resource.
"""
if not isinstance(url_or_filename, str):
url_or_filename = str(url_or_filename)
file_path: Path
extraction_path: Optional[Path] = None
etag: Optional[str] = None
# If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here.
exclamation_index = url_or_filename.find("!")
if extract_archive and exclamation_index >= 0:
archive_path = url_or_filename[:exclamation_index]
file_name = url_or_filename[exclamation_index + 1 :]
# Call 'cached_path' recursively now to get the local path to the archive itself.
cached_archive_path = cached_path(
archive_path,
cache_dir=cache_dir,
extract_archive=True,
force_extract=force_extract,
quiet=quiet,
progress=progress,
)
if not cached_archive_path.is_dir():
raise ValueError(
f"{url_or_filename} uses the ! syntax, but does not specify an archive file."
)
# Now return the full path to the desired file within the extracted archive,
# provided it exists.
file_path = cached_archive_path / file_name
if not file_path.exists():
raise FileNotFoundError(f"'{file_name}' not found within '{archive_path}'")
return file_path
parsed = urlparse(url_or_filename)
if parsed.scheme in get_supported_schemes():
# URL, so get it from the cache (downloading if necessary)
file_path, etag = get_from_cache(url_or_filename, cache_dir, quiet=quiet, progress=progress)
if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
# This is the path the file should be extracted to.
# For example ~/.cached_path/cache/234234.21341 -> ~/.cached_path/cache/234234.21341-extracted
extraction_path = file_path.parent / (file_path.name + "-extracted")
elif parsed.scheme == "file":
return cached_path(url_or_filename.replace("file://", "", 1))
else:
orig_url_or_filename = url_or_filename
url_or_filename = Path(url_or_filename).expanduser()
cache_dir = Path(cache_dir if cache_dir else get_cache_dir()).expanduser()
cache_dir.mkdir(parents=True, exist_ok=True)
if url_or_filename.exists():
# File, and it exists.
file_path = url_or_filename
# Normalize the path.
url_or_filename = url_or_filename.resolve()
if (
extract_archive
and file_path.is_file()
and (is_zipfile(file_path) or tarfile.is_tarfile(file_path))
):
# We'll use a unique directory within the cache to root to extract the archive to.
# The name of the directory is a hash of the resource file path and it's modification
# time. That way, if the file changes, we'll know when to extract it again.
extraction_name = (
resource_to_filename(url_or_filename, str(os.path.getmtime(file_path)))
+ "-extracted"
)
extraction_path = cache_dir / extraction_name
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {orig_url_or_filename} as a URL or as a local path")
if extraction_path is not None:
# If the extracted directory already exists (and is non-empty), then no
# need to create a lock file and extract again unless `force_extract=True`.
if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract:
return extraction_path
# Extract it.
with FileLock(_lock_file_path(extraction_path)):
# Check again if the directory exists now that we've acquired the lock.
if os.path.isdir(extraction_path) and os.listdir(extraction_path):
if force_extract:
logger.warning(
"Extraction directory for %s (%s) already exists, "
"overwriting it since 'force_extract' is 'True'",
url_or_filename,
extraction_path,
)
else:
return extraction_path
logger.info("Extracting %s to %s", url_or_filename, extraction_path)
shutil.rmtree(extraction_path, ignore_errors=True)
# We extract first to a temporary directory in case something goes wrong
# during the extraction process so we don't end up with a corrupted cache.
tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])
try:
if is_zipfile(file_path):
with ZipFile(file_path, "r") as zip_file:
zip_file.extractall(tmp_extraction_dir)
zip_file.close()
else:
tar_file = tarfile.open(file_path)
check_tarfile(tar_file)
tar_file.extractall(tmp_extraction_dir)
tar_file.close()
# Extraction was successful, rename temp directory to final
# cache directory and dump the meta data.
os.replace(tmp_extraction_dir, extraction_path)
meta = Meta.new(
url_or_filename,
extraction_path,
etag=etag,
extraction_dir=True,
)
meta.to_file()
finally:
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
return extraction_path
return file_path
def get_from_cache(
url: str,
cache_dir: Optional[PathOrStr] = None,
quiet: bool = False,
progress: Optional["Progress"] = None,
) -> Tuple[Path, Optional[str]]:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file and the ETag.
"""
if url.startswith("hf://"):
return hf_get_from_cache(url, cache_dir), None
cache_dir = Path(cache_dir if cache_dir else get_cache_dir()).expanduser()
cache_dir.mkdir(parents=True, exist_ok=True)
client = get_scheme_client(url)
# Get eTag to add to filename, if it exists.
try:
etag = client.get_etag()
except client.recoverable_errors: # type: ignore
# We might be offline, in which case we don't want to throw an error
# just yet. Instead, we'll try to use the latest cached version of the
# target resource, if it exists. We'll only throw an exception if we
# haven't cached the resource at all yet.
logger.warning(
"Connection error occurred while trying to fetch ETag for %s. "
"Will attempt to use latest cached version of resource",
url,
)
latest_cached = find_latest_cached(url, cache_dir)
if latest_cached:
logger.info(
"ETag request failed with recoverable error, using latest cached "
"version of %s: %s",
url,
latest_cached,
)
meta = Meta.from_path(_meta_file_path(latest_cached))
return latest_cached, meta.etag
else:
logger.error(
"ETag request failed with recoverable error, "
"but no cached version of %s could be found",
url,
)
raise
filename = resource_to_filename(url, etag)
# Get cache path to put the file.
cache_path = cache_dir / filename
# Multiple processes may be trying to cache the same file at once, so we need
# to be a little careful to avoid race conditions. We do this using a lock file.
# Only one process can own this lock file at a time, and a process will block
# on the call to `lock.acquire()` until the process currently holding the lock
# releases it.
logger.debug("waiting to acquire lock on %s", cache_path)
with FileLock(_lock_file_path(cache_path), read_only_ok=True):
if os.path.exists(cache_path):
logger.info("cache of %s is up-to-date", url)
else:
size = client.get_size()
with CacheFile(cache_path) as cache_file:
logger.info("%s not found in cache, downloading to %s", url, cache_path)
from .progress import BufferedWriterWithProgress, get_download_progress
start_and_cleanup = progress is None
progress = progress or get_download_progress(quiet=quiet)
if start_and_cleanup:
progress.start()
try:
display_url = url if len(url) <= 30 else f"\N{horizontal ellipsis}{url[-30:]}"
task_id = progress.add_task(f"Downloading [cyan i]{display_url}[/]", total=size)
writer_with_progress = BufferedWriterWithProgress(cache_file, progress, task_id)
client.get_resource(writer_with_progress)
progress.update(
task_id,
total=writer_with_progress.total_written,
completed=writer_with_progress.total_written,
)
finally:
if start_and_cleanup:
progress.stop()
logger.debug("creating metadata file for %s", cache_path)
meta = Meta.new(
url,
cache_path,
etag=etag,
)
meta.to_file()
return cache_path, etag
| cached_path-main | cached_path/_cached_path.py |
"""
The idea behind **cached-path** is to provide a unified, simple, extendable interface for accessing
both local and remote files.
This can be used behind other APIs that need to access files agnostic to where they are located.
For remote files, **cached-path** supports several different schemes out-of-the-box in addition
``http`` and ``https``, including ``s3`` for AWS S3, ``gs`` for Google Cloud Storage,
and ``hf`` for HuggingFace Hub. See :func:`cached_path.cached_path()` for more details.
You can also extend **cached-path** to support other schemes with :func:`add_scheme_client()`.
"""
from ._cached_path import cached_path
from .common import get_cache_dir, set_cache_dir
from .progress import get_download_progress
from .schemes import SchemeClient, add_scheme_client
from .util import (
check_tarfile,
filename_to_url,
find_latest_cached,
is_url_or_existing_file,
resource_to_filename,
)
__all__ = [
"cached_path",
"get_cache_dir",
"set_cache_dir",
"get_download_progress",
"SchemeClient",
"add_scheme_client",
"check_tarfile",
"filename_to_url",
"find_latest_cached",
"is_url_or_existing_file",
"resource_to_filename",
]
| cached_path-main | cached_path/__init__.py |
import os
import warnings
from typing import Optional
from filelock import AcquireReturnProxy
from filelock import FileLock as _FileLock
from .common import PathOrStr
class FileLock(_FileLock):
"""
This is just a subclass of the `FileLock` class from the `filelock` library, except that
it adds an additional argument to the `__init__` method: `read_only_ok`.
By default this flag is `False`, which an exception will be thrown when a lock
can't be acquired due to lack of write permissions.
But if this flag is set to `True`, a warning will be emitted instead of an error when
the lock already exists but the lock can't be acquired because write access is blocked.
"""
def __init__(self, lock_file: PathOrStr, timeout=-1, read_only_ok: bool = False) -> None:
super().__init__(str(lock_file), timeout=timeout)
self._read_only_ok = read_only_ok
def acquire( # type: ignore[override]
self,
timeout=None,
poll_interval=0.05,
**kwargs,
) -> Optional[AcquireReturnProxy]:
try:
return super().acquire(timeout=timeout, poll_interval=poll_interval, **kwargs)
except OSError as err:
# OSError could be a lot of different things, but what we're looking
# for in particular are permission errors, such as:
# - errno 1 - EPERM - "Operation not permitted"
# - errno 13 - EACCES - "Permission denied"
# - errno 30 - EROFS - "Read-only file system"
if err.errno not in (1, 13, 30):
raise
if os.path.isfile(self.lock_file) and self._read_only_ok:
warnings.warn(
f"Lacking permissions required to obtain lock '{self.lock_file}'. "
"Race conditions are possible if other processes are writing to the same resource.",
UserWarning,
)
return None
else:
raise
| cached_path-main | cached_path/file_lock.py |
import os
from os import PathLike
from pathlib import Path
from typing import Tuple, Union
from urllib.parse import urlparse
PathOrStr = Union[str, PathLike]
CACHE_DIRECTORY: PathOrStr = Path(
os.getenv("CACHED_PATH_CACHE_ROOT", Path.home() / ".cache" / "cached_path")
)
"""
The default global cache directory.
"""
def _split_cloud_path(url: str, provider: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad {} path {}".format(provider, url))
bucket_name = parsed.netloc
provider_path = parsed.path
# Remove '/' at beginning of path.
if provider_path.startswith("/"):
provider_path = provider_path[1:]
return bucket_name, provider_path
def set_cache_dir(cache_dir: PathOrStr) -> None:
"""
Set the global default cache directory.
"""
global CACHE_DIRECTORY
CACHE_DIRECTORY = Path(cache_dir)
def get_cache_dir() -> Path:
"""
Get the global default cache directory.
"""
return Path(CACHE_DIRECTORY)
| cached_path-main | cached_path/common.py |
import io
from typing import List, Optional
from rich.progress import BarColumn, DownloadColumn, Progress, TaskID, TimeElapsedColumn
class QuietProgress:
"""
A mock `Progress` class that does absolutely nothing.
We use this when users pass `quiet=True` since rich's `Progress` still
prints empty lines with `quiet=True`.
"""
def start(self, *args, **kwargs):
del args, kwargs
def stop(self, *args, **kwargs):
del args, kwargs
def update(self, *args, **kwargs):
del args, kwargs
def add_task(self, *args, **kwargs):
del args, kwargs
def advance(self, *args, **kwargs):
del args, kwargs
def stop_task(self, *args, **kwargs):
del args, kwargs
def __enter__(self):
return self
def __exit__(self, *args, **kwargs): # type: ignore
del args, kwargs
class BufferedWriterWithProgress(io.BufferedWriter):
def __init__(self, handle: io.BufferedWriter, progress: Progress, task_id: TaskID):
self.handle = handle
self.progress = progress
self.task_id = task_id
self.total_written = 0
def __enter__(self) -> "BufferedWriterWithProgress":
self.handle.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def closed(self) -> bool:
return self.handle.closed
def close(self):
self.handle.close()
def fileno(self):
return self.handle.fileno()
def flush(self):
self.handle.flush()
def isatty(self) -> bool:
return self.handle.isatty()
def readable(self) -> bool:
return self.handle.readable()
def seekable(self) -> bool:
return self.handle.seekable()
def writable(self) -> bool:
return True
def read(self, size: Optional[int] = -1) -> bytes:
return self.handle.read(size)
def read1(self, size: Optional[int] = -1) -> bytes:
return self.handle.read1()
def readinto(self, b):
return self.handle.readinto(b)
def readinto1(self, b):
return self.handle.readinto1(b)
def readline(self, size: Optional[int] = -1) -> bytes:
return self.handle.readline(size)
def readlines(self, hint: int = -1) -> List[bytes]:
return self.handle.readlines(hint)
def write(self, b) -> int:
n = self.handle.write(b)
self.total_written += n
self.progress.advance(self.task_id, n)
return n
def writelines(self, lines):
return self.handle.writelines(lines)
def seek(self, offset: int, whence: int = 0) -> int:
pos = self.handle.seek(offset, whence)
# self.progress.update(self.task_id, completed=pos)
return pos
def tell(self) -> int:
return self.handle.tell()
@property
def raw(self):
return self.handle.raw
def detach(self):
return self.handle.detach()
def get_download_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeElapsedColumn(),
DownloadColumn(),
# disable=quiet,
)
| cached_path-main | cached_path/progress.py |
import logging
import os
import shutil
import tempfile
from pathlib import Path
from .common import get_cache_dir, set_cache_dir
class BaseTestClass:
"""
A custom testing class that disables some of the more verbose
logging and that creates and destroys a temp directory as a test fixture.
"""
PROJECT_ROOT = (Path(__file__).parent / "..").resolve()
MODULE_ROOT = PROJECT_ROOT / "cached_path"
TOOLS_ROOT = MODULE_ROOT / "tools"
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
def setup_method(self):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG
)
# Disabling some of the more verbose logging statements that typically aren't very helpful
# in tests.
logging.getLogger("urllib3.connectionpool").disabled = True
self.TEST_DIR = Path(tempfile.mkdtemp(prefix="cached_path_tests"))
os.makedirs(self.TEST_DIR, exist_ok=True)
self._initial_cache_dir = get_cache_dir()
set_cache_dir(self.TEST_DIR)
def teardown_method(self):
set_cache_dir(self._initial_cache_dir)
shutil.rmtree(self.TEST_DIR)
| cached_path-main | cached_path/testing.py |
import logging
import os
import tempfile
from pathlib import Path
from .common import PathOrStr
logger = logging.getLogger(__name__)
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated as if it's the actual cache file.
On `__exit__`, the temporarily file is renamed to the cache file. If anything
goes wrong while writing to the temporary file, it will be removed.
"""
def __init__(self, cache_filename: PathOrStr, mode: str = "w+b", suffix: str = ".tmp") -> None:
self.cache_filename = Path(cache_filename)
self.cache_directory = os.path.dirname(self.cache_filename)
self.mode = mode
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, dir=self.cache_directory, delete=False, suffix=suffix
)
def __enter__(self):
return self.temp_file
def __exit__(self, exc_type, exc_value, traceback):
self.temp_file.close()
if exc_value is None:
# Success.
logger.debug(
"Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename
)
# Rename the temp file to the actual cache filename.
os.replace(self.temp_file.name, self.cache_filename)
return True
# Something went wrong, remove the temp file.
logger.debug("removing temp file %s", self.temp_file.name)
os.remove(self.temp_file.name)
return False
| cached_path-main | cached_path/cache_file.py |
import json
import os
import time
from dataclasses import asdict, dataclass
from typing import Optional, Set
from .common import PathOrStr
@dataclass
class Meta:
"""
Any resource that is downloaded to - or extracted in - the cache directory will
have a meta JSON file written next to it, which corresponds to an instance
of this class.
In older versions of AllenNLP, this meta document just had two fields: 'url' and
'etag'. The 'url' field is now the more general 'resource' field, but these old
meta files are still compatible when a `Meta` is instantiated with the `.from_path()`
class method.
"""
resource: str
"""
URL or normalized path to the resource.
"""
cached_path: str
"""
Path to the corresponding cached version of the resource.
"""
creation_time: float
"""
The unix timestamp of when the corresponding resource was cached or extracted.
"""
size: int = 0
"""
The size of the corresponding resource, in bytes.
"""
etag: Optional[str] = None
"""
Optional ETag associated with the current cached version of the resource.
"""
extraction_dir: bool = False
"""
Does this meta corresponded to an extraction directory?
"""
@classmethod
def new(
cls,
resource: PathOrStr,
cached_path: PathOrStr,
*,
etag: Optional[str] = None,
extraction_dir: bool = False
) -> "Meta":
return cls( # type: ignore
resource=str(resource),
cached_path=str(cached_path),
creation_time=time.time(),
size=cls.get_resource_size(cached_path),
etag=etag,
extraction_dir=extraction_dir,
)
def to_file(self) -> None:
with open(self.cached_path + ".json", "w") as meta_file:
json.dump(asdict(self), meta_file)
@classmethod
def from_path(cls, path: PathOrStr) -> "Meta":
path = str(path)
with open(path) as meta_file:
data = json.load(meta_file)
# For backwards compat:
if "resource" not in data:
data["resource"] = data.pop("url")
if "creation_time" not in data:
data["creation_time"] = os.path.getmtime(path[:-5])
if "extraction_dir" not in data and path.endswith("-extracted.json"):
data["extraction_dir"] = True
if "cached_path" not in data:
data["cached_path"] = path[:-5]
if "size" not in data:
data["size"] = cls.get_resource_size(data["cached_path"])
return cls(**data) # type: ignore
@staticmethod
def get_resource_size(path: PathOrStr) -> int:
"""
Get the size of a file or directory.
"""
if os.path.isfile(path):
return os.path.getsize(path)
inodes: Set[int] = set()
total_size = 0
for dirpath, dirnames, filenames in os.walk(str(path)):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link or the same as a file we've already accounted
# for (this could happen with hard links).
inode = os.stat(fp).st_ino
if not os.path.islink(fp) and inode not in inodes:
inodes.add(inode)
total_size += os.path.getsize(fp)
return total_size
| cached_path-main | cached_path/meta.py |
from typing import Set, Type
from .gs import GsClient
from .hf import hf_get_from_cache
from .http import HttpClient
from .s3 import S3Client
from .scheme_client import SchemeClient
__all__ = ["GsClient", "HttpClient", "S3Client", "SchemeClient", "hf_get_from_cache"]
try:
from .beaker import BeakerClient
__all__.append("BeakerClient")
except (ImportError, ModuleNotFoundError):
BeakerClient = None # type: ignore
_SCHEME_TO_CLIENT = {}
def add_scheme_client(client: Type[SchemeClient]) -> None:
"""
Add a new :class:`SchemeClient`.
This can be used to extend :func:`cached_path.cached_path()` to handle custom schemes, or handle
existing schemes differently.
"""
global _SCHEME_TO_CLIENT
if isinstance(client.scheme, tuple):
for scheme in client.scheme:
_SCHEME_TO_CLIENT[scheme] = client
elif isinstance(client.scheme, str):
_SCHEME_TO_CLIENT[client.scheme] = client
else:
raise ValueError(f"Unexpected type for {client} scheme: {client.scheme}")
for client in (HttpClient, S3Client, GsClient):
add_scheme_client(client) # type: ignore
if BeakerClient is not None:
add_scheme_client(BeakerClient)
def get_scheme_client(resource: str) -> SchemeClient:
"""
Get the right client for the given resource.
"""
maybe_scheme = resource.split("://")[0]
return _SCHEME_TO_CLIENT.get(maybe_scheme, HttpClient)(resource)
def get_supported_schemes() -> Set[str]:
"""
Return all supported URL schemes.
"""
return set(_SCHEME_TO_CLIENT.keys()) | {"hf"}
| cached_path-main | cached_path/schemes/__init__.py |
import io
from pathlib import Path
from typing import Optional
from beaker import Beaker, ChecksumFailedError, DatasetNotFound, DatasetReadError
from .scheme_client import SchemeClient
class BeakerClient(SchemeClient):
scheme = ("beaker",)
recoverable_errors = SchemeClient.recoverable_errors + (DatasetReadError, ChecksumFailedError)
def __init__(self, resource: str) -> None:
super().__init__(resource)
self.beaker = Beaker.from_env()
# Beaker resources should be in the form "{user}/{dataset_name}/{path}/{to}/{file}"
path = Path(resource.split("://")[1])
if len(path.parts) < 2:
raise ValueError(
f"Invalid beaker resource URL '{resource}'. "
"Resources should be in the form 'beaker://{user_name}/{dataset_name}/{path_to_file}' "
"or beaker://{dataset_id}/{path_to_file}."
)
try:
user, dataset_name, *filepath_parts = path.parts
self.dataset = self.beaker.dataset.get(f"{user}/{dataset_name}")
except DatasetNotFound:
dataset_id, *filepath_parts = path.parts
self.dataset = self.beaker.dataset.get(dataset_id)
self.filepath = "/".join(filepath_parts)
self.file_info = self.beaker.dataset.file_info(self.dataset, self.filepath)
def get_etag(self) -> Optional[str]:
return None if self.file_info.digest is None else str(self.file_info.digest)
def get_size(self) -> Optional[int]:
return self.file_info.size
def get_resource(self, temp_file: io.BufferedWriter) -> None:
for chunk in self.beaker.dataset.stream_file(self.dataset, self.filepath, quiet=True):
if chunk:
temp_file.write(chunk)
| cached_path-main | cached_path/schemes/beaker.py |
import io
from typing import Optional
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from urllib3.exceptions import MaxRetryError
from .scheme_client import SchemeClient
RECOVERABLE_SERVER_ERROR_CODES = (502, 503, 504)
class RecoverableServerError(requests.exceptions.HTTPError):
"""
Server returned one of `RECOVERABLE_SERVER_ERROR_CODES`.
"""
def session_with_backoff() -> requests.Session:
"""
We ran into an issue where http requests to s3 were timing out,
possibly because we were making too many requests too quickly.
This helper function returns a requests session that has retry-with-backoff
built in. See
<https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.
"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=RECOVERABLE_SERVER_ERROR_CODES)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
class HttpClient(SchemeClient):
scheme = ("http", "https")
recoverable_errors = SchemeClient.recoverable_errors + (RecoverableServerError,)
def __init__(self, resource: str) -> None:
super().__init__(resource)
self._head_response = None
@property
def head_response(self):
if self._head_response is None:
try:
with session_with_backoff() as session:
response = session.head(self.resource, allow_redirects=True)
except MaxRetryError as e:
raise RecoverableServerError(e.reason)
self.validate_response(response)
self._head_response = response
return self._head_response
else:
return self._head_response
def get_etag(self) -> Optional[str]:
return self.head_response.headers.get("ETag")
def get_size(self) -> Optional[int]:
content_length = self.head_response.headers.get("Content-Length")
return None if content_length is None else int(content_length)
def get_resource(self, temp_file: io.BufferedWriter) -> None:
with session_with_backoff() as session:
try:
response = session.get(self.resource, stream=True)
except MaxRetryError as e:
raise RecoverableServerError(e.reason)
self.validate_response(response)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
temp_file.write(chunk)
def validate_response(self, response):
if response.status_code == 404:
raise FileNotFoundError(self.resource)
if response.status_code in RECOVERABLE_SERVER_ERROR_CODES:
raise RecoverableServerError(response=response)
response.raise_for_status()
| cached_path-main | cached_path/schemes/http.py |
import io
from abc import abstractmethod
from typing import ClassVar, Optional, Tuple, Type, Union
import requests
class SchemeClient:
"""
A client used for caching remote resources corresponding to URLs with a particular scheme.
Subclasses must define the :attr:`scheme` class variable and implement
:meth:`get_etag()` and :meth:`get_resource()`.
.. important::
Take care when implementing subclasses to raise the right error types
from :meth:`get_etag()` and :meth:`get_resource()`.
"""
recoverable_errors: ClassVar[Tuple[Type[BaseException], ...]] = (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
)
"""
Subclasses can override this to define error types that will be treated as recoverable.
If ``cached_path()`` catches of one these errors while calling :meth:`get_etag()`, it
will log a warning and return the latest cached version if there is one, otherwise
it will propogate the error.
"""
scheme: ClassVar[Union[str, Tuple[str, ...]]] = tuple()
"""
The scheme or schemes that the client will be used for (e.g. "http").
"""
def __init__(self, resource: str) -> None:
self.resource = resource
@abstractmethod
def get_etag(self) -> Optional[str]:
"""
Get the Etag or an equivalent version identifier associated with the resource.
Returns
-------
``Optional[str]``
The ETag as a ``str`` or ``None`` if there is no ETag associated with
the resource.
Raises
------
``FileNotFoundError``
If the resource doesn't exist.
``Recoverable error``
Any error type defined in ``SchemeClient.recoverable_errors`` will
be treated as a recoverable error.
This means that when of these is caught by ``cached_path()``,
it will look for cached versions of the given resource and return the
latest version if there are any.
Otherwise the error is propogated.
``Other errors``
Any other error type can be raised. These errors will be treated non-recoverable
and will be propogated immediately by ``cached_path()``.
"""
raise NotImplementedError
@abstractmethod
def get_size(self) -> Optional[int]:
"""
Get the size of the resource in bytes (if known).
Returns
-------
``Optional[int]``
The size (in bytes).
Raises
------
``FileNotFoundError``
If the resource doesn't exist.
``Recoverable error``
Any error type defined in ``SchemeClient.recoverable_errors`` will
be treated as a recoverable error.
This means that when of these is caught by ``cached_path()``, the size
will be ignored.
``Other errors``
Any other error type can be raised. These errors will be treated non-recoverable
and will be propogated immediately by ``cached_path()``.
"""
raise NotImplementedError
@abstractmethod
def get_resource(self, temp_file: io.BufferedWriter) -> None:
"""
Download the resource to the given temporary file.
Raises
------
``FileNotFoundError``
If the resource doesn't exist.
``Other errors``
Any other error type can be raised. These errors will be treated non-recoverable
and will be propogated immediately by ``cached_path()``.
"""
raise NotImplementedError
| cached_path-main | cached_path/schemes/scheme_client.py |
"""
Google Cloud Storage.
"""
import io
from typing import Optional, Tuple
from google.api_core.exceptions import NotFound
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import storage
from google.cloud.storage.retry import DEFAULT_RETRY
from ..common import _split_cloud_path
from .scheme_client import SchemeClient
class GsClient(SchemeClient):
scheme = "gs"
def __init__(self, resource: str) -> None:
super().__init__(resource)
self.blob = GsClient.get_gcs_blob(resource)
self._loaded = False
def load(self):
if not self._loaded:
try:
self.blob.reload()
self._loaded = True
except NotFound:
raise FileNotFoundError(self.resource)
def get_etag(self) -> Optional[str]:
self.load()
return self.blob.etag or self.blob.md5_hash
def get_size(self) -> Optional[int]:
self.load()
return self.blob.size
def get_resource(self, temp_file: io.BufferedWriter) -> None:
self.load()
self.blob.download_to_file(temp_file, checksum="md5", retry=DEFAULT_RETRY)
@staticmethod
def split_gcs_path(resource: str) -> Tuple[str, str]:
return _split_cloud_path(resource, "gs")
@staticmethod
def get_gcs_blob(resource: str) -> storage.blob.Blob:
try:
gcs_resource = storage.Client()
except DefaultCredentialsError:
gcs_resource = storage.Client.create_anonymous_client()
bucket_name, gcs_path = GsClient.split_gcs_path(resource)
bucket = gcs_resource.bucket(bucket_name)
blob = bucket.blob(gcs_path)
return blob
| cached_path-main | cached_path/schemes/gs.py |
"""
AWS S3.
"""
import io
from typing import Optional, Tuple
import boto3
import botocore
from ..common import _split_cloud_path
from .scheme_client import SchemeClient
class S3Client(SchemeClient):
recoverable_errors = SchemeClient.recoverable_errors + (
botocore.exceptions.EndpointConnectionError,
)
scheme = "s3"
def __init__(self, resource: str) -> None:
super().__init__(resource)
bucket_name, s3_path = S3Client.split_s3_path(resource)
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
self.s3_object = s3_resource.Object(bucket_name, s3_path)
self._loaded = False
def load(self):
if not self._loaded:
try:
self.s3_object.load()
self._loaded = True
except botocore.exceptions.ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(self.resource))
else:
raise
def get_etag(self) -> Optional[str]:
self.load()
return self.s3_object.e_tag
def get_size(self) -> Optional[int]:
self.load()
return self.s3_object.content_length
def get_resource(self, temp_file: io.BufferedWriter) -> None:
self.load()
self.s3_object.download_fileobj(temp_file)
@staticmethod
def split_s3_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "s3")
| cached_path-main | cached_path/schemes/s3.py |
"""
HuggingFace Hub.
Unlike the other schemes, we don't implement a `SchemeClient` subclass here because
`huggingface_hub` handles the caching logic internally in essentially the same way.
"""
from pathlib import Path
from typing import Optional
import huggingface_hub as hf_hub
import requests
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
)
from ..common import PathOrStr
from ..version import VERSION
def hf_hub_download(
model_identifier: str, filename: Optional[str], cache_dir: Optional[PathOrStr] = None
) -> Path:
revision: Optional[str]
if "@" in model_identifier:
repo_id = model_identifier.split("@")[0]
revision = model_identifier.split("@")[1]
else:
repo_id = model_identifier
revision = None
if filename is not None:
return Path(
hf_hub.hf_hub_download(
repo_id=repo_id,
filename=filename,
revision=revision,
library_name="cached_path",
library_version=VERSION,
cache_dir=cache_dir,
)
)
else:
return Path(hf_hub.snapshot_download(repo_id, revision=revision, cache_dir=cache_dir))
def hf_get_from_cache(url: str, cache_dir: Optional[PathOrStr] = None) -> Path:
if cache_dir is not None:
cache_dir = Path(cache_dir).expanduser()
cache_dir.mkdir(parents=True, exist_ok=True)
# Remove the 'hf://' prefix
identifier = url[5:]
if identifier.count("/") > 1:
filename = "/".join(identifier.split("/")[2:])
model_identifier = "/".join(identifier.split("/")[:2])
return hf_hub_download(model_identifier, filename, cache_dir)
elif identifier.count("/") == 1:
# 'hf://' URLs like 'hf://xxxx/yyyy' are potentially ambiguous,
# because this could refer to either:
# 1. the file 'yyyy' in the 'xxxx' repository, or
# 2. the repo 'yyyy' under the user/org name 'xxxx'.
# We default to (1), but if we get a 404 error or 401 error then we try (2)
try:
model_identifier, filename = identifier.split("/")
return hf_hub_download(model_identifier, filename, cache_dir)
except (RepositoryNotFoundError, RevisionNotFoundError, EntryNotFoundError):
return hf_hub_download(identifier, None, cache_dir)
except requests.exceptions.HTTPError as exc:
if exc.response is not None and exc.response.status_code in {401, 404}:
return hf_hub_download(identifier, None, cache_dir)
else:
raise
except ValueError:
return hf_hub_download(identifier, None, cache_dir)
else:
return hf_hub_download(identifier, None, cache_dir)
| cached_path-main | cached_path/schemes/hf.py |
from concurrent import futures
import random
import sys
import time
import grpc
import numpy
from pyhocon import ConfigFactory
# These have to be before we do any import from keras. It would be nice to be able to pass in a
# value for this, but that makes argument passing a whole lot more complicated. If/when we change
# how arguments work (e.g., loading a file), then we can think about setting this as a parameter.
random.seed(13370)
numpy.random.seed(1337) # pylint: disable=no-member
# pylint: disable=wrong-import-position
from keras import backend as K
from deep_qa.common.checks import ensure_pythonhashseed_set
from deep_qa.common.params import get_choice
from deep_qa.models import concrete_models
from deep_qa.models.reading_comprehension.bidirectional_attention import BidirectionalAttentionFlow
from deep_qa.data.instances.character_span_instance import CharacterSpanInstance
from deep_qa.data.instances.true_false_instance import TrueFalseInstance
from deep_qa.data.instances.multiple_true_false_instance import MultipleTrueFalseInstance
from deep_qa.data.instances.question_answer_instance import QuestionAnswerInstance
from deep_qa.data.instances.background_instance import BackgroundInstance
from proto import message_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class SolverServer(message_pb2.SolverServiceServicer):
def __init__(self, solver):
self.solver = solver
self.answers_multiple_choice_questions = True
if isinstance(self.solver, BidirectionalAttentionFlow):
self.answers_multiple_choice_questions = False
if K.backend() == "tensorflow":
import tensorflow
self.graph = tensorflow.get_default_graph()
with self.graph.as_default():
self.solver.load_model()
else:
self.solver.load_model()
# The name of this method is specified in message.proto.
def AnswerQuestion(self, request, context):
instance = self.read_instance_message(request.question)
try:
if K.backend() == "tensorflow":
with self.graph.as_default():
scores = self.solver.score_instance(instance)
else:
scores = self.solver.score_instance(instance)
except:
print("Instance was: " + str(instance))
raise
response = message_pb2.QuestionResponse()
if self.answers_multiple_choice_questions:
response.type = message_pb2.MULTIPLE_CHOICE
for score in scores.tolist():
response.scores.extend(score)
else:
response.type = message_pb2.DIRECT_ANSWER
begin_span_idx, end_span_idx = scores
string_response = instance.passage_text[begin_span_idx:end_span_idx]
response.answer = string_response
return response
def read_instance_message(self, instance_message):
# pylint: disable=redefined-variable-type
instance_type = instance_message.type
if instance_type == message_pb2.TRUE_FALSE:
text = instance_message.question
instance = TrueFalseInstance(text, None, None)
elif instance_type == message_pb2.MULTIPLE_TRUE_FALSE:
options = []
for instance in instance_message.contained_instances:
options.append(self.read_instance_message(instance))
instance = MultipleTrueFalseInstance(options)
elif instance_type == message_pb2.QUESTION_ANSWER:
question = instance_message.question
options = instance_message.answer_options
instance = QuestionAnswerInstance(question, options, None, None)
elif instance_type == message_pb2.CHARACTER_SPAN:
question = instance_message.question
passage = instance_message.passage
instance = CharacterSpanInstance(question, passage, None, None)
else:
raise RuntimeError("Unrecognized instance type: " + instance_type)
if instance_message.background_instances:
background = instance_message.background_instances
background_instances = [self.read_instance_message(instance) for instance in background]
instance = BackgroundInstance(instance, background_instances)
return instance
def serve(port: int, config_file: str):
# read in the Typesafe-style config file and lookup the port to run on.
solver_params = ConfigFactory.parse_file(config_file)
# create the server and add our RPC "servicer" to it
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
model_type = get_choice(solver_params, 'model_class', concrete_models.keys())
solver_class = concrete_models[model_type]
solver = solver_class(solver_params)
message_pb2.add_SolverServiceServicer_to_server(SolverServer(solver), server)
# start the server on the specified port
server.add_insecure_port('[::]:{0}'.format(port))
print("starting server")
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
def main():
ensure_pythonhashseed_set()
if len(sys.argv) != 3:
print('USAGE: server.py [port] [config_file]')
print('RECEIVED: ' + ' '.join(sys.argv))
sys.exit(-1)
port = int(sys.argv[1])
config_file = sys.argv[2]
serve(port, config_file)
if __name__ == '__main__':
main()
| deep_qa_experiments-master | src/main/python/server.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='message.proto',
package='deep_qa',
syntax='proto3',
serialized_pb=_b('\n\rmessage.proto\x12\x07\x64\x65\x65p_qa\"\xcb\x01\n\x08Instance\x12#\n\x04type\x18\x01 \x01(\x0e\x32\x15.deep_qa.InstanceType\x12\x10\n\x08question\x18\x02 \x01(\t\x12\x16\n\x0e\x61nswer_options\x18\x03 \x03(\t\x12/\n\x14\x62\x61\x63kground_instances\x18\x04 \x03(\x0b\x32\x11.deep_qa.Instance\x12.\n\x13\x63ontained_instances\x18\x05 \x03(\x0b\x32\x11.deep_qa.Instance\x12\x0f\n\x07passage\x18\x06 \x01(\t\"6\n\x0fQuestionRequest\x12#\n\x08question\x18\x01 \x01(\x0b\x32\x11.deep_qa.Instance\"W\n\x10QuestionResponse\x12#\n\x04type\x18\x01 \x01(\x0e\x32\x15.deep_qa.QuestionType\x12\x0e\n\x06scores\x18\x02 \x03(\x01\x12\x0e\n\x06\x61nswer\x18\x03 \x01(\t*o\n\x0cInstanceType\x12\r\n\tUNDEFINED\x10\x00\x12\x0e\n\nTRUE_FALSE\x10\x01\x12\x17\n\x13MULTIPLE_TRUE_FALSE\x10\x02\x12\x13\n\x0fQUESTION_ANSWER\x10\x03\x12\x12\n\x0e\x43HARACTER_SPAN\x10\x04*S\n\x0cQuestionType\x12\x1b\n\x17UNDEFINED_QUESTION_TYPE\x10\x00\x12\x13\n\x0fMULTIPLE_CHOICE\x10\x01\x12\x11\n\rDIRECT_ANSWER\x10\x02\x32X\n\rSolverService\x12G\n\x0e\x41nswerQuestion\x12\x18.deep_qa.QuestionRequest\x1a\x19.deep_qa.QuestionResponse\"\x00\x42\x15\n\x13org.allenai.deep_qab\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INSTANCETYPE = _descriptor.EnumDescriptor(
name='InstanceType',
full_name='deep_qa.InstanceType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRUE_FALSE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTIPLE_TRUE_FALSE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUESTION_ANSWER', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHARACTER_SPAN', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=377,
serialized_end=488,
)
_sym_db.RegisterEnumDescriptor(_INSTANCETYPE)
InstanceType = enum_type_wrapper.EnumTypeWrapper(_INSTANCETYPE)
_QUESTIONTYPE = _descriptor.EnumDescriptor(
name='QuestionType',
full_name='deep_qa.QuestionType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED_QUESTION_TYPE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTIPLE_CHOICE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DIRECT_ANSWER', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=490,
serialized_end=573,
)
_sym_db.RegisterEnumDescriptor(_QUESTIONTYPE)
QuestionType = enum_type_wrapper.EnumTypeWrapper(_QUESTIONTYPE)
UNDEFINED = 0
TRUE_FALSE = 1
MULTIPLE_TRUE_FALSE = 2
QUESTION_ANSWER = 3
CHARACTER_SPAN = 4
UNDEFINED_QUESTION_TYPE = 0
MULTIPLE_CHOICE = 1
DIRECT_ANSWER = 2
_INSTANCE = _descriptor.Descriptor(
name='Instance',
full_name='deep_qa.Instance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='deep_qa.Instance.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='question', full_name='deep_qa.Instance.question', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='answer_options', full_name='deep_qa.Instance.answer_options', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='background_instances', full_name='deep_qa.Instance.background_instances', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contained_instances', full_name='deep_qa.Instance.contained_instances', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='passage', full_name='deep_qa.Instance.passage', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=230,
)
_QUESTIONREQUEST = _descriptor.Descriptor(
name='QuestionRequest',
full_name='deep_qa.QuestionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='question', full_name='deep_qa.QuestionRequest.question', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=286,
)
_QUESTIONRESPONSE = _descriptor.Descriptor(
name='QuestionResponse',
full_name='deep_qa.QuestionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='deep_qa.QuestionResponse.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scores', full_name='deep_qa.QuestionResponse.scores', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='answer', full_name='deep_qa.QuestionResponse.answer', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=288,
serialized_end=375,
)
_INSTANCE.fields_by_name['type'].enum_type = _INSTANCETYPE
_INSTANCE.fields_by_name['background_instances'].message_type = _INSTANCE
_INSTANCE.fields_by_name['contained_instances'].message_type = _INSTANCE
_QUESTIONREQUEST.fields_by_name['question'].message_type = _INSTANCE
_QUESTIONRESPONSE.fields_by_name['type'].enum_type = _QUESTIONTYPE
DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE
DESCRIPTOR.message_types_by_name['QuestionRequest'] = _QUESTIONREQUEST
DESCRIPTOR.message_types_by_name['QuestionResponse'] = _QUESTIONRESPONSE
DESCRIPTOR.enum_types_by_name['InstanceType'] = _INSTANCETYPE
DESCRIPTOR.enum_types_by_name['QuestionType'] = _QUESTIONTYPE
Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict(
DESCRIPTOR = _INSTANCE,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:deep_qa.Instance)
))
_sym_db.RegisterMessage(Instance)
QuestionRequest = _reflection.GeneratedProtocolMessageType('QuestionRequest', (_message.Message,), dict(
DESCRIPTOR = _QUESTIONREQUEST,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:deep_qa.QuestionRequest)
))
_sym_db.RegisterMessage(QuestionRequest)
QuestionResponse = _reflection.GeneratedProtocolMessageType('QuestionResponse', (_message.Message,), dict(
DESCRIPTOR = _QUESTIONRESPONSE,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:deep_qa.QuestionResponse)
))
_sym_db.RegisterMessage(QuestionResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023org.allenai.deep_qa'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class SolverServiceStub(object):
"""The service definition
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AnswerQuestion = channel.unary_unary(
'/deep_qa.SolverService/AnswerQuestion',
request_serializer=QuestionRequest.SerializeToString,
response_deserializer=QuestionResponse.FromString,
)
class SolverServiceServicer(object):
"""The service definition
"""
def AnswerQuestion(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SolverServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'AnswerQuestion': grpc.unary_unary_rpc_method_handler(
servicer.AnswerQuestion,
request_deserializer=QuestionRequest.FromString,
response_serializer=QuestionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'deep_qa.SolverService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaSolverServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The service definition
"""
def AnswerQuestion(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaSolverServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The service definition
"""
def AnswerQuestion(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
AnswerQuestion.future = None
def beta_create_SolverService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('deep_qa.SolverService', 'AnswerQuestion'): QuestionRequest.FromString,
}
response_serializers = {
('deep_qa.SolverService', 'AnswerQuestion'): QuestionResponse.SerializeToString,
}
method_implementations = {
('deep_qa.SolverService', 'AnswerQuestion'): face_utilities.unary_unary_inline(servicer.AnswerQuestion),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_SolverService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('deep_qa.SolverService', 'AnswerQuestion'): QuestionRequest.SerializeToString,
}
response_deserializers = {
('deep_qa.SolverService', 'AnswerQuestion'): QuestionResponse.FromString,
}
cardinalities = {
'AnswerQuestion': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'deep_qa.SolverService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| deep_qa_experiments-master | src/main/python/proto/message_pb2.py |
deep_qa_experiments-master | src/main/python/proto/__init__.py |
|
# There are lots of way to set up this training script. We're putting the bulk of the code inside
# the my_project module, with a simple run script in the base directory. If you prefer, you could
# just take train.py and move it to the top-level directory and use that as your run.py. Do
# whatever you're most comfortable with.
from my_project.train import run_training_loop
run_training_loop(serialization_dir="results/")
| allennlp-template-python-script-master | run.py |
import tempfile
from allennlp.common.testing import ModelTestCase
from my_project.train import (
build_dataset_reader,
build_vocab,
build_model,
build_data_loaders,
build_trainer,
)
class TestSimpleClassifier(ModelTestCase):
def test_model_can_train(self):
with tempfile.TemporaryDirectory() as serialization_dir:
reader = build_dataset_reader()
train_loader, dev_loader = build_data_loaders(
reader, "tests/fixtures/toy_data.tsv", "tests/fixtures/toy_data.tsv"
)
vocab = build_vocab(train_loader, dev_loader)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# Ideally you'd want to build a tiny toy model here, instead of calling the full
# build_model function, like we do with the data above.
model = build_model(vocab)
trainer = build_trainer(
model, serialization_dir, train_loader, train_loader
)
# This built-in test makes sure that your data can load, that it gets passed to the
# model correctly, that your model computes a loss in a way that we can get gradients
# from it, and that all of your parameters get non-zero gradient updates.
self.ensure_model_can_train(trainer)
| allennlp-template-python-script-master | tests/test_model.py |
allennlp-template-python-script-master | tests/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from my_project.dataset_reader import ClassificationTsvReader
class TestTextClassificationJsonReader(AllenNlpTestCase):
def test_read_from_file_ag_news_corpus_and_truncates_properly(self):
reader = ClassificationTsvReader()
data_path = "tests/fixtures/toy_data.tsv"
instances = list(reader.read(data_path))
assert len(instances) == 2
fields = instances[0].fields
expected_tokens = ["it", "is", "movies", "like", "these"]
assert [t.text for t in fields["text"].tokens][:5] == expected_tokens
assert fields["label"].label == "neg"
fields = instances[1].fields
expected_tokens = ["the", "music", "is", "well-chosen", "and"]
assert [t.text for t in fields["text"].tokens][:5] == expected_tokens
assert fields["label"].label == "pos"
| allennlp-template-python-script-master | tests/test_dataset_reader.py |
# These imports are important for making the configuration files find the classes that you wrote.
# If you don't have these, you'll get errors about allennlp not being able to find
# "simple_classifier", or whatever name you registered your model with. These imports and the
# contents of .allennlp_plugins makes it so you can just use `allennlp train`, and we will find your
# classes and use them. If you change the name of `my_project`, you'll also need to change it in
# the same way in the .allennlp_plugins file.
from my_project.model import *
from my_project.dataset_reader import *
| allennlp-template-python-script-master | my_project/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward( # type: ignore
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Shape: (1,)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
| allennlp-template-python-script-master | my_project/model.py |
# This file contains a bunch of build_* methods that configure objects however you want, and a
# run_training_loop method that calls these methods and runs the trainer.
from itertools import chain
from typing import Iterable, Tuple
import allennlp
import torch
from allennlp.data import DataLoader, DatasetReader, Instance, Vocabulary
from allennlp.data.data_loaders import MultiProcessDataLoader
from allennlp.models import Model
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.training.trainer import GradientDescentTrainer, Trainer
from allennlp.training.optimizers import AdamOptimizer
from my_project.dataset_reader import ClassificationTsvReader
from my_project.model import SimpleClassifier
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def build_vocab(train_loader, dev_loader) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(
chain(train_loader.iter_instances(), dev_loader.iter_instances())
)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
reader,
train_data_path: str,
validation_data_path: str,
) -> Tuple[DataLoader, DataLoader]:
train_loader = MultiProcessDataLoader(
reader, train_data_path, batch_size=8, shuffle=True
)
dev_loader = MultiProcessDataLoader(
reader, validation_data_path, batch_size=8, shuffle=False
)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
# There are a *lot* of other things you could configure with the trainer. See
# http://docs.allennlp.org/master/api/training/trainer/#gradientdescenttrainer-objects for more
# information.
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
validation_metric="+accuracy",
)
return trainer
def run_training_loop(serialization_dir: str):
reader = build_dataset_reader()
train_loader, dev_loader = build_data_loaders(
reader, "/path/to/your/training/data", "/path/to/your/validation/data"
)
vocab = build_vocab(train_loader, dev_loader)
model = build_model(vocab)
# This is the allennlp-specific functionality in the Dataset object;
# we need to be able convert strings in the data to integers, and this
# is how we do it.
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
# NOTE: Training using multiple GPUs is hard in this setting. If you want multi-GPU training,
# we recommend using our config file template instead, which handles this case better, as well
# as saving the model in a way that it can be easily loaded later. If you really want to use
# your own python script with distributed training, have a look at the code for the allennlp
# train command (https://github.com/allenai/allennlp/blob/master/allennlp/commands/train.py),
# which is where we handle distributed training. Also, let us know on github that you want
# this; we could refactor things to make this usage much easier, if there's enough interest.
print("Starting training")
trainer.train()
print("Finished training")
| allennlp-template-python-script-master | my_project/train.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance, Field
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance: # type: ignore
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields: Dict[str, Field] = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
| allennlp-template-python-script-master | my_project/dataset_reader.py |
import pandas as pd
import ai2thor.controller
ENV_ARGS = dict(
gridSize=0.25,
width=224,
height=224,
visibilityDistance=1.0,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
useMassThreshold=True,
massThreshold=10,
autoSimulation=False,
autoSyncTransforms=True,
renderInstanceSegmentation=True,
)
commit_id = "a84dd29471ec2201f583de00257d84fac1a03de2"
ENV_ARGS["commit_id"] = commit_id
controller = ai2thor.controller.Controller(**ENV_ARGS)
kitchens = [f"FloorPlan{i}" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}" for i in range(1, 31)]
scenes = kitchens
# + living_rooms + bedrooms + bathrooms
def make_dict_from_object(object_list):
result = {}
for obj in object_list:
result[obj["objectId"]] = dict(position=obj["position"])
return result
def position_distance(s1, s2):
position1 = s1["position"]
position2 = s2["position"]
dist = (
(position1["x"] - position2["x"]) ** 2
+ (position1["y"] - position2["y"]) ** 2
+ (position1["z"] - position2["z"]) ** 2
) ** 0.5
return dist
def object_vibration_list(d1, d2):
vib = {"object": [], "dist": []}
for object in d1.keys():
vib["object"].append(object)
vib["dist"].append(position_distance(d1[object], d2[object]))
return vib
results = []
for scene in scenes:
print(scene)
controller.reset(scene)
total = 200
initial_objects = make_dict_from_object(controller.last_event.metadata["objects"])
for i in range(total):
controller.step("AdvancePhysicsStep")
final_objects = make_dict_from_object(controller.last_event.metadata["objects"])
vib = object_vibration_list(initial_objects, final_objects)
df = pd.DataFrame.from_dict(vib)
df["scene"] = scene
results.append(df)
results = pd.concat(results)
results.to_csv(
"projects/manipulathor_disturb_free/manipulathor_plugin/vibrations.csv", index=False
)
| disturb-free-main | manipulathor_plugin/disturb_dist_dict.py |
"""Task Samplers for the task of ArmPointNav"""
from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import (
ArmPointNavTaskSampler as RawArmPointNavTaskSampler,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.manipulathor_task import (
ArmPointNavTask,
RotateArmPointNavTask,
CamRotateArmPointNavTask,
)
class ArmPointNavTaskSampler(RawArmPointNavTaskSampler):
_TASK_TYPE = ArmPointNavTask
class RotateArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = RotateArmPointNavTask
class CamRotateArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = CamRotateArmPointNavTask
| disturb-free-main | manipulathor_plugin/manipulathor_task_samplers.py |
from allenact_plugins.manipulathor_plugin.manipulathor_tasks import (
ArmPointNavTask as RawArmPointNavTask,
RotateArmPointNavTask as RawRotateArmPointNavTask,
CamRotateArmPointNavTask as RawCamRotateArmPointNavTask,
)
import pandas as pd
DF = pd.read_csv(
"projects/manipulathor_disturb_free/manipulathor_plugin/vibrations.csv"
)
# use dict is much faster to query than dataframe
VIBRATION_DISTANCES = {}
for i in range(DF.shape[0]):
VIBRATION_DISTANCES[DF.at[i, "scene"] + "-" + DF.at[i, "object"]] = DF.at[i, "dist"]
class ArmPointNavTask(RawArmPointNavTask):
_vibration_dist_dict = VIBRATION_DISTANCES
class RotateArmPointNavTask(RawRotateArmPointNavTask):
_vibration_dist_dict = VIBRATION_DISTANCES
class CamRotateArmPointNavTask(RawCamRotateArmPointNavTask):
_vibration_dist_dict = VIBRATION_DISTANCES
| disturb-free-main | manipulathor_plugin/manipulathor_task.py |
"""Utility classes and functions for sensory inputs used by the models."""
from typing import Any, Union, Optional
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.manipulathor_task import DF
class DisturbanceSensor(Sensor):
def __init__(self, uuid: str = "disturbance_binary", **kwargs: Any):
observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.bool)
super().__init__(**prepare_locals_for_super(locals()))
raw = (
DF.groupby("scene").sum() / 200
) # averge vibration per step on all the objects
raw = raw.clip(lower=0.001)
self.vibration_distances_scene = raw.to_dict()["dist"]
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
scene_id = env.scene_name.split("_")[0]
thres = self.vibration_distances_scene[scene_id]
disturbance_distance = task.current_penalized_distance
result = disturbance_distance >= thres # bool
return result
| disturb-free-main | manipulathor_plugin/disturb_sensor.py |
disturb-free-main | armpointnav_baselines/__init__.py |
|
import platform
from abc import ABC
from math import ceil
from typing import Dict, Any, List, Optional, Sequence
import os
import gym
import numpy as np
import torch
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import evenly_distribute_count_into_bins
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS
from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import (
SimpleArmPointNavGeneralSampler,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_base import (
ArmPointNavBaseConfig,
)
from allenact.utils.system import get_logger
class ArmPointNavThorBaseConfig(ArmPointNavBaseConfig, ABC):
"""The base config for all iTHOR PointNav experiments."""
TASK_SAMPLER = SimpleArmPointNavGeneralSampler
VISUALIZERS = []
THOR_COMMIT_ID: Optional[str] = None
NUM_PROCESSES: Optional[int] = None
TRAIN_GPU_IDS = list(range(torch.cuda.device_count()))
SAMPLER_GPU_IDS = TRAIN_GPU_IDS
VALID_GPU_IDS = [torch.cuda.device_count() - 1]
TEST_GPU_IDS = [torch.cuda.device_count() - 1]
TRAIN_DATASET_DIR: Optional[str] = None
VAL_DATASET_DIR: Optional[str] = None
CAP_TRAINING = None
TRAIN_SCENES: str = None
VAL_SCENES: str = None
TEST_SCENES: str = None
OBJECT_TYPES: Optional[Sequence[str]] = None
VALID_SAMPLES_IN_SCENE = 1
TEST_SAMPLES_IN_SCENE = 1
NUMBER_OF_TEST_PROCESS = 10
def __init__(self):
super().__init__()
assert (
self.SCREEN_SIZE == 224
and self.VISIBILITY_DISTANCE == 1
and self.STEP_SIZE == 0.25
)
self.ENV_ARGS = ENV_ARGS
def machine_params(self, mode="train", **kwargs):
sampler_devices: Sequence[int] = []
if mode == "train":
workers_per_device = 1
gpu_ids = (
[]
if not torch.cuda.is_available()
else self.TRAIN_GPU_IDS * workers_per_device
)
nprocesses = (
1
if not torch.cuda.is_available()
else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids))
)
sampler_devices = self.SAMPLER_GPU_IDS
elif mode == "valid":
nprocesses = 1
gpu_ids = [] if not torch.cuda.is_available() else self.VALID_GPU_IDS
elif mode == "test":
nprocesses = self.NUMBER_OF_TEST_PROCESS if torch.cuda.is_available() else 1
gpu_ids = [] if not torch.cuda.is_available() else self.TEST_GPU_IDS
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
sensors = [*self.SENSORS]
if mode != "train":
sensors = [s for s in sensors if not isinstance(s, ExpertActionSensor)]
sensor_preprocessor_graph = (
SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(sensors).observation_spaces,
preprocessors=self.preprocessors(),
)
if mode == "train"
or (
(isinstance(nprocesses, int) and nprocesses > 0)
or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)
)
else None
)
return MachineParams(
nprocesses=nprocesses,
devices=gpu_ids,
sampler_devices=sampler_devices
if mode == "train"
else gpu_ids, # ignored with > 1 gpu_ids
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
# @classmethod
def make_sampler_fn(self, **kwargs) -> TaskSampler:
from datetime import datetime
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
if len(self.VISUALIZERS) > 0:
assert self.test_ckpt is not None
# print("sampler_fn", self.test_ckpt)
exp_folder = os.path.join(
*self.test_ckpt.split("/")[:-2], # experiment folder
"vis",
self.test_ckpt.split("_")[-1], # checkpoint step
self.tag(),
now,
)
kwargs["visualizers"] = [
visualizer(exp_name=exp_folder) for visualizer in self.VISUALIZERS
]
kwargs["objects"] = self.OBJECT_TYPES
return self.TASK_SAMPLER(**kwargs)
@staticmethod
def _partition_inds(n: int, num_parts: int):
return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(
np.int32
)
def _get_sampler_args_for_scene_split(
self,
scenes: List[str],
process_ind: int,
total_processes: int,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
if total_processes > len(scenes): # oversample some scenes -> bias
if total_processes % len(scenes) != 0:
print(
"Warning: oversampling some of the scenes to feed all processes."
" You can avoid this by setting a number of workers divisible by the number of scenes"
)
scenes = scenes * int(ceil(total_processes / len(scenes)))
scenes = scenes[: total_processes * (len(scenes) // total_processes)]
else:
if len(scenes) % total_processes != 0:
print(
"Warning: subsampling some of the scenes to feed all processes."
" You can avoid this by setting a number of workers divisor of the number of scenes"
)
inds = self._partition_inds(len(scenes), total_processes)
return {
"scenes": scenes[inds[process_ind] : inds[process_ind + 1]],
"env_args": self.ENV_ARGS,
"max_steps": self.MAX_STEPS,
"sensors": self.SENSORS,
"action_space": gym.spaces.Discrete(
len(self.TASK_SAMPLER._TASK_TYPE.class_action_names())
),
"seed": seeds[process_ind] if seeds is not None else None,
"deterministic_cudnn": deterministic_cudnn,
"rewards_config": self.REWARD_CONFIG,
}
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
self.TRAIN_SCENES,
process_ind,
total_processes,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_period"] = "manual"
res["sampler_mode"] = "train"
res["cap_training"] = self.CAP_TRAINING
res["env_args"] = {}
res["env_args"].update(self.ENV_ARGS)
res["env_args"]["x_display"] = (
("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None
)
return res
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]],
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
self.VALID_SCENES,
process_ind,
total_processes,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_period"] = self.VALID_SAMPLES_IN_SCENE
res["sampler_mode"] = "val"
res["cap_training"] = self.CAP_TRAINING
res["max_tasks"] = self.VALID_SAMPLES_IN_SCENE * len(res["scenes"])
res["env_args"] = {}
res["env_args"].update(self.ENV_ARGS)
res["env_args"]["x_display"] = (
("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None
)
return res
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]],
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
self.TEST_SCENES,
process_ind,
total_processes,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_period"] = self.TEST_SAMPLES_IN_SCENE
res["sampler_mode"] = "test"
res["num_task_per_scene"] = self.NUM_TASK_PER_SCENE
res["env_args"] = {}
res["cap_training"] = self.CAP_TRAINING
res["env_args"].update(self.ENV_ARGS)
res["env_args"]["x_display"] = (
("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None
)
return res
| disturb-free-main | armpointnav_baselines/experiments/armpointnav_thor_base.py |
from abc import ABC
from typing import Optional, Sequence, Union
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import Builder
class ArmPointNavBaseConfig(ExperimentConfig, ABC):
"""The base object navigation configuration file."""
ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
SENSORS: Optional[Sequence[Sensor]] = None
STEP_SIZE = 0.25
ROTATION_DEGREES = 45.0
VISIBILITY_DISTANCE = 1.0
STOCHASTIC = False
CAMERA_WIDTH = 224
CAMERA_HEIGHT = 224
SCREEN_SIZE = 224
MAX_STEPS = 200
DISTURB_PEN = 0.0
DISTURB_VIS = True
def __init__(self):
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"pickup_success_reward": 5.0,
"failed_stop_reward": 0.0,
"shaping_weight": 1.0, # we are not using this
"failed_action_penalty": -0.03,
"disturb_penalty": self.DISTURB_PEN, # <=0, negative
"disturb_visible": self.DISTURB_VIS, # if consider the visible objects only
}
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return tuple()
| disturb-free-main | armpointnav_baselines/experiments/armpointnav_base.py |
from typing import Dict, Tuple
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.embodiedai.aux_losses.losses import (
InverseDynamicsLoss,
CPCA16Loss,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
# noinspection PyUnresolvedReferences
from allenact.embodiedai.models.fusion_models import AverageFusion
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_base import (
ArmPointNavBaseConfig,
)
class ArmPointNavMixInPPOConfig(ArmPointNavBaseConfig):
NORMALIZE_ADVANTAGE = (
# True
False
)
ADD_PREV_ACTIONS = (
True
# False
)
# selected auxiliary uuids
## if comment all the keys, then it's vanilla DD-PPO
AUXILIARY_UUIDS = [
# InverseDynamicsLoss.UUID,
# CPCA16Loss.UUID,
DisturbPredictionLoss.UUID,
]
MULTIPLE_BELIEFS = False
BELIEF_FUSION = None
def training_pipeline(self, **kwargs):
ppo_steps = int(30000000) # 30M
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = self.MAX_STEPS
save_interval = 1000000 # 1M
log_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
PPOConfig["normalize_advantage"] = self.NORMALIZE_ADVANTAGE
# Total losses
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = self._update_with_auxiliary_losses(named_losses)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder( # lambda lr: lambda * base_lr
LambdaLR,
{"lr_lambda": LinearDecay(steps=ppo_steps, startp=1.0, endp=1.0 / 3)},
),
)
@classmethod
def _update_with_auxiliary_losses(cls, named_losses):
# auxliary losses
aux_loss_total_weight = 2.0
# Total losses
total_aux_losses: Dict[str, Tuple[AbstractActorCriticLoss, float]] = {
InverseDynamicsLoss.UUID: (
InverseDynamicsLoss(subsample_rate=0.2, subsample_min_num=10,),
0.05 * aux_loss_total_weight,
),
CPCA16Loss.UUID: (
CPCA16Loss(subsample_rate=0.2,),
0.05 * aux_loss_total_weight,
),
DisturbPredictionLoss.UUID: (
DisturbPredictionLoss(gamma=cls.DISTURB_FOCAL_GAMMA),
0.05 * aux_loss_total_weight,
),
}
named_losses.update(
{uuid: total_aux_losses[uuid] for uuid in cls.AUXILIARY_UUIDS}
)
return named_losses
| disturb-free-main | armpointnav_baselines/experiments/armpointnav_mixin_ddppo.py |
disturb-free-main | armpointnav_baselines/experiments/__init__.py |
|
from typing import Sequence, Union
import gym
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact_plugins.manipulathor_plugin.manipulathor_sensors import (
RelativeAgentArmToObjectSensor,
RelativeObjectToGoalSensor,
PickedUpObjSensor,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.disturb_sensor import (
DisturbanceSensor,
)
from allenact.utils.experiment_utils import Builder
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_base import (
ArmPointNavBaseConfig,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.arm_pointnav_models import (
ArmPointNavBaselineActorCritic,
)
class ArmPointNavAdvancedACConfig(ArmPointNavBaseConfig):
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
preprocessors = []
return preprocessors
BACKBONE = "gnresnet18"
INFERENCE_COEF = 0.0
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
rgb_uuid = next((s.uuid for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
depth_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, DepthSensor)), None
)
arm2obj_uuid = next(
(
s.uuid
for s in cls.SENSORS
if isinstance(s, RelativeAgentArmToObjectSensor)
),
None,
)
obj2goal_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, RelativeObjectToGoalSensor)),
None,
)
pickedup_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, PickedUpObjSensor)), None,
)
disturbance_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, DisturbanceSensor)), None,
)
return ArmPointNavBaselineActorCritic(
# Env and Task
action_space=gym.spaces.Discrete(
len(cls.TASK_SAMPLER._TASK_TYPE.class_action_names())
),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
arm2obj_uuid=arm2obj_uuid,
obj2goal_uuid=obj2goal_uuid,
pickedup_uuid=pickedup_uuid,
disturbance_uuid=disturbance_uuid,
# RNN
hidden_size=512
if cls.MULTIPLE_BELIEFS == False or len(cls.AUXILIARY_UUIDS) <= 1
else 256,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=cls.ADD_PREV_ACTIONS,
action_embed_size=16,
# CNN
backbone=cls.BACKBONE,
resnet_baseplanes=32,
# goal sensor
goal_embedding_size=32, # change it smaller
goal_space_mode=cls.GOAL_SPACE_MODE,
# Aux
auxiliary_uuids=cls.AUXILIARY_UUIDS,
multiple_beliefs=cls.MULTIPLE_BELIEFS,
beliefs_fusion=cls.BELIEF_FUSION,
inference_coef=cls.INFERENCE_COEF,
)
| disturb-free-main | armpointnav_baselines/experiments/armpointnav_mixin_actorcritic.py |
from abc import ABC
import torch
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
TRAIN_OBJECTS,
TEST_OBJECTS,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_thor_base import (
ArmPointNavThorBaseConfig,
)
class ArmPointNaviThorBaseConfig(ArmPointNavThorBaseConfig, ABC):
"""The base config for all iTHOR ObjectNav experiments."""
THOR_COMMIT_ID = "a84dd29471ec2201f583de00257d84fac1a03de2"
NUM_PROCESSES = 19
TRAIN_GPU_IDS = list(range(torch.cuda.device_count()))
SAMPLER_GPU_IDS = TRAIN_GPU_IDS
VALID_GPU_IDS = [torch.cuda.device_count() - 1]
TEST_GPU_IDS = [torch.cuda.device_count() - 1]
# add all the arguments here
TOTAL_NUMBER_SCENES = 30
TRAIN_SCENES = [
"FloorPlan{}_physics".format(str(i))
for i in range(1, TOTAL_NUMBER_SCENES + 1)
if (i % 3 == 1 or i % 3 == 0) and i != 28
] # last scenes are really bad, then it is 19 training scenes actually
TEST_SCENES = [
"FloorPlan{}_physics".format(str(i))
for i in range(1, TOTAL_NUMBER_SCENES + 1)
if i % 3 == 2 and i % 6 == 2
] # 5 scenes
VALID_SCENES = [
"FloorPlan{}_physics".format(str(i))
for i in range(1, TOTAL_NUMBER_SCENES + 1)
if i % 3 == 2 and i % 6 == 5
] # 5 scenes
ALL_SCENES = TRAIN_SCENES + TEST_SCENES + VALID_SCENES
assert (
len(ALL_SCENES) == TOTAL_NUMBER_SCENES - 1
and len(set(ALL_SCENES)) == TOTAL_NUMBER_SCENES - 1
)
OBJECT_TYPES = tuple(sorted(TRAIN_OBJECTS))
UNSEEN_OBJECT_TYPES = tuple(sorted(TEST_OBJECTS))
| disturb-free-main | armpointnav_baselines/experiments/ithor/armpointnav_ithor_base.py |
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS
from allenact_plugins.manipulathor_plugin.manipulathor_sensors import (
DepthSensorThor,
RelativeAgentArmToObjectSensor,
RelativeObjectToGoalSensor,
PickedUpObjSensor,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.disturb_sensor import (
DisturbanceSensor,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.manipulathor_task_samplers import (
ArmPointNavTaskSampler,
CamRotateArmPointNavTaskSampler,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_mixin_ddppo import (
ArmPointNavMixInPPOConfig,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_mixin_actorcritic import (
ArmPointNavAdvancedACConfig,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.ithor.armpointnav_ithor_base import (
ArmPointNaviThorBaseConfig,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
class ArmPointNavDepth(
ArmPointNaviThorBaseConfig, ArmPointNavMixInPPOConfig, ArmPointNavAdvancedACConfig,
):
"""An Object Navigation experiment configuration in iThor with Depth
input."""
ACTION_SPACE = (
# "original"
"cam_rotate"
)
if ACTION_SPACE == "original":
TASK_SAMPLER = ArmPointNavTaskSampler
else:
TASK_SAMPLER = CamRotateArmPointNavTaskSampler
DISTURB_PEN = (
# -25.0
# -20.0
# -15.0
# -10.0
# -5.0
# -1.0
0.0
)
DISTURB_VIS = False
DISTURB_FOCAL_GAMMA = 2.0
BACKBONE = (
# "simple_cnn"
"gnresnet18"
)
LOAD_PRETRAINED_WEIGHTS = (
# True
False
)
COORD_SYSTEM = (
# "xyz_unsigned"
"polar_radian"
)
GOAL_SPACE_MODE = "man_sel"
SENSORS = [
DepthSensorThor(
height=ArmPointNaviThorBaseConfig.SCREEN_SIZE,
width=ArmPointNaviThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
RelativeAgentArmToObjectSensor(coord_system=COORD_SYSTEM,),
RelativeObjectToGoalSensor(coord_system=COORD_SYSTEM,),
PickedUpObjSensor(),
DisturbanceSensor(),
]
MAX_STEPS = 200
def __init__(self):
super().__init__()
assert (
self.CAMERA_WIDTH == 224
and self.CAMERA_HEIGHT == 224
and self.VISIBILITY_DISTANCE == 1
and self.STEP_SIZE == 0.25
)
self.ENV_ARGS = {**ENV_ARGS, "renderDepthImage": True}
if self.THOR_COMMIT_ID is not None:
self.ENV_ARGS["commit_id"] = self.THOR_COMMIT_ID
@classmethod
def tag(cls):
# some basic assumptions
assert cls.NORMALIZE_ADVANTAGE == False
assert cls.ADD_PREV_ACTIONS == True
assert cls.BACKBONE == "gnresnet18"
# assert cls.LOAD_PRETRAINED_WEIGHTS == False
assert cls.COORD_SYSTEM == "polar_radian"
# assert cls.ACTION_SPACE == "cam_rotate"
assert cls.INFERENCE_COEF == 0.0
aux_tag = cls.BACKBONE
if cls.NORMALIZE_ADVANTAGE:
aux_tag += "-NormAdv"
else:
aux_tag += "-woNormAdv"
if cls.ADD_PREV_ACTIONS:
aux_tag += "-wact"
else:
aux_tag += "-woact"
aux_tag += "-" + cls.GOAL_SPACE_MODE
aux_tag += "-" + cls.COORD_SYSTEM
if cls.LOAD_PRETRAINED_WEIGHTS:
aux_tag += "-finetune"
else:
aux_tag += "-scratch"
aux_tag += f"-disturb_pen{abs(cls.DISTURB_PEN)}"
if cls.DISTURB_VIS:
aux_tag += "_vis"
else:
aux_tag += "_all"
if cls.AUXILIARY_UUIDS is None or (
isinstance(cls.AUXILIARY_UUIDS, list) and len(cls.AUXILIARY_UUIDS) == 0
):
aux_tag += "-no_aux"
else:
aux_tag += "-" + "-".join(cls.AUXILIARY_UUIDS)
if DisturbPredictionLoss.UUID in cls.AUXILIARY_UUIDS:
aux_tag += "-gamma" + str(cls.DISTURB_FOCAL_GAMMA)
if len(cls.AUXILIARY_UUIDS) > 1 and cls.MULTIPLE_BELIEFS:
aux_tag += "-mulbelief-" + cls.BELIEF_FUSION
return aux_tag
| disturb-free-main | armpointnav_baselines/experiments/ithor/armpointnav_depth.py |
disturb-free-main | armpointnav_baselines/experiments/ithor/__init__.py |
|
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS
from allenact_plugins.manipulathor_plugin.manipulathor_sensors import (
DepthSensorThor,
RelativeAgentArmToObjectSensor,
RelativeObjectToGoalSensor,
PickedUpObjSensor,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.disturb_sensor import (
DisturbanceSensor,
)
# noinspection PyUnresolvedReferences
from allenact.embodiedai.aux_losses.losses import (
InverseDynamicsLoss,
CPCA16Loss,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
# noinspection PyUnresolvedReferences
from allenact.embodiedai.models.fusion_models import AverageFusion
from projects.manipulathor_disturb_free.manipulathor_plugin.manipulathor_task_samplers import (
ArmPointNavTaskSampler,
CamRotateArmPointNavTaskSampler,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_mixin_ddppo import (
ArmPointNavMixInPPOConfig,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_mixin_actorcritic import (
ArmPointNavAdvancedACConfig,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.ithor.armpointnav_ithor_base import (
ArmPointNaviThorBaseConfig,
)
# noinspection PyUnresolvedReferences
from allenact_plugins.manipulathor_plugin.manipulathor_viz import (
ImageVisualizer,
TestMetricLogger,
)
from typing import Optional
class TestScene(
ArmPointNaviThorBaseConfig, ArmPointNavMixInPPOConfig, ArmPointNavAdvancedACConfig,
):
VISUALIZERS = [
# lambda exp_name: ImageVisualizer(exp_name,
# add_top_down_view=True,
# add_depth_map=True,
# ),
# lambda exp_name: TestMetricLogger(exp_name),
]
CAMERA_WIDTH = (
224
# 224 * 2
)
CAMERA_HEIGHT = (
224
# 224 * 2
)
NUM_TASK_PER_SCENE = (
None
# 6
)
NUMBER_OF_TEST_PROCESS = 5
TEST_GPU_IDS = [0] # has to be one gpu
TEST_SCENES_DICT = {
"ValidScene": ArmPointNaviThorBaseConfig.VALID_SCENES,
"TestScene": ArmPointNaviThorBaseConfig.TEST_SCENES,
}
OBJECT_TYPES_DICT = {
"novel": ArmPointNaviThorBaseConfig.UNSEEN_OBJECT_TYPES,
"seen": ArmPointNaviThorBaseConfig.OBJECT_TYPES,
"all": ArmPointNaviThorBaseConfig.OBJECT_TYPES
+ ArmPointNaviThorBaseConfig.UNSEEN_OBJECT_TYPES,
}
TEST_SCENES_NAME = (
# "ValidScene"
"TestScene"
)
OBJECT_TYPES_NAME = (
"novel"
# "seen"
# "all"
)
TEST_SCENES = TEST_SCENES_DICT[TEST_SCENES_NAME]
OBJECT_TYPES = OBJECT_TYPES_DICT[OBJECT_TYPES_NAME]
ACTION_SPACE = (
# "original"
"cam_rotate"
)
if ACTION_SPACE == "original":
TASK_SAMPLER = ArmPointNavTaskSampler
else:
TASK_SAMPLER = CamRotateArmPointNavTaskSampler
DISTURB_PEN = (
# -25.0
# -20.0
-15.0
# -10.0
# -5.0
# -1.0
# 0.0
)
DISTURB_VIS = False
INFERENCE_COEF = 0.0
# selected auxiliary uuids
AUXILIARY_UUIDS = [
# InverseDynamicsLoss.UUID,
# CPCA16Loss.UUID,
DisturbPredictionLoss.UUID,
]
MULTIPLE_BELIEFS = False # True #
BELIEF_FUSION = None
MAX_STEPS = 200
BACKBONE = (
# "simple_cnn"
"gnresnet18"
)
ADD_PREV_ACTIONS = (
True
# False
)
COORD_SYSTEM = (
# "xyz_unsigned" # used in CVPR 2021 paper
"polar_radian" # used in our method
)
GOAL_SPACE_MODE = "man_sel"
SENSORS = [
DepthSensorThor(
height=ArmPointNaviThorBaseConfig.SCREEN_SIZE,
width=ArmPointNaviThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
RelativeAgentArmToObjectSensor(coord_system=COORD_SYSTEM,),
RelativeObjectToGoalSensor(coord_system=COORD_SYSTEM,),
PickedUpObjSensor(),
DisturbanceSensor(),
]
def __init__(self, test_ckpt: Optional[str] = None):
super().__init__()
self.test_ckpt = test_ckpt
assert (
self.SCREEN_SIZE == 224
and self.VISIBILITY_DISTANCE == 1
and self.STEP_SIZE == 0.25
)
self.ENV_ARGS = ENV_ARGS
self.ENV_ARGS["width"] = self.CAMERA_WIDTH
self.ENV_ARGS["height"] = self.CAMERA_HEIGHT
depth_uuid = next(
(s.uuid for s in self.SENSORS if isinstance(s, DepthSensorThor)), None
)
if depth_uuid is not None:
self.ENV_ARGS["renderDepthImage"] = True
if self.THOR_COMMIT_ID is not None:
self.ENV_ARGS["commit_id"] = self.THOR_COMMIT_ID
@classmethod
def tag(cls):
assert cls.NUM_TASK_PER_SCENE == None
tag_name = cls.TEST_SCENES_NAME + "-objects_" + str(cls.OBJECT_TYPES_NAME)
if cls.INFERENCE_COEF > 0.0:
tag_name += "-safety" + str(cls.INFERENCE_COEF)
return tag_name
| disturb-free-main | armpointnav_baselines/experiments/eval/TestScene.py |
"""Baseline models for use in the Arm Point Navigation task.
Arm Point Navigation is currently available as a Task in ManipulaTHOR.
"""
from typing import Tuple, Dict, Optional, cast, List
from collections import OrderedDict
from allenact.utils.system import get_logger
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ObservationType,
DistributionType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact.embodiedai.models.basic_models import SimpleCNN
import allenact.embodiedai.models.resnet as resnet
from allenact.embodiedai.models.visual_nav_models import (
VisualNavActorCritic,
FusionType,
)
from allenact.embodiedai.models.aux_models import AuxiliaryModel
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.aux_model import (
AuxiliaryModel as DisturbAuxiliaryModel,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.manipulathor_net_utils import (
input_embedding_net,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
class ArmPointNavBaselineActorCritic(VisualNavActorCritic):
"""Baseline recurrent actor critic model for armpointnav task.
# Attributes
action_space : The space of actions available to the agent. Currently only discrete
actions are allowed (so this space will always be of type `gym.spaces.Discrete`).
observation_space : The observation space expected by the agent. This observation space
should include (optionally) 'rgb' images and 'depth' images.
hidden_size : The hidden size of the GRU RNN.
object_type_embedding_dim: The dimensionality of the embedding corresponding to the goal
object type.
"""
def __init__(
self,
# Env and Task
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
arm2obj_uuid: str,
obj2goal_uuid: str,
pickedup_uuid: str,
disturbance_uuid: str,
# RNN
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=False,
action_embed_size=16,
# Aux loss
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[List[str]] = None,
# safety inference with the disturbance prediction task
inference_coef: float = 0.0,
# below are custom params
rgb_uuid: Optional[str] = None,
depth_uuid: Optional[str] = None,
goal_embedding_size=32,
goal_space_mode=None,
trainable_masked_hidden_state: bool = False,
# perception backbone params,
backbone="gnresnet18",
resnet_baseplanes=32,
):
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
action_space=action_space,
observation_space=observation_space,
hidden_size=hidden_size,
multiple_beliefs=multiple_beliefs,
beliefs_fusion=beliefs_fusion,
auxiliary_uuids=auxiliary_uuids,
)
self.goal_embedding_size = goal_embedding_size
self.goal_space_mode = goal_space_mode
self.backbone = backbone
self.rgb_uuid = rgb_uuid
self.depth_uuid = depth_uuid
self.arm2obj_uuid = arm2obj_uuid
self.obj2goal_uuid = obj2goal_uuid
self.pickedup_uuid = pickedup_uuid
self.disturbance_uuid = disturbance_uuid
assert inference_coef >= 0.0
self.inference_coef = inference_coef
if backbone == "simple_cnn":
self.visual_encoder = SimpleCNN(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
)
else: # resnet family
self.visual_encoder = resnet.GroupNormResNetEncoder(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
)
self.create_state_encoders(
obs_embed_size=self.goal_visual_encoder_output_dims,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
add_prev_actions=add_prev_actions,
prev_action_embed_size=action_embed_size,
trainable_masked_hidden_state=trainable_masked_hidden_state,
)
self.create_actorcritic_head()
self.create_aux_models(
obs_embed_size=self.goal_visual_encoder_output_dims,
action_embed_size=action_embed_size,
)
self.create_goal_sensor_model()
self.train()
get_logger().debug(self)
def create_goal_sensor_model(self):
assert self.goal_space_mode in ["man_sel", "pickup_obs", "coords_only"]
goal_sensor_dim = self.observation_space[self.arm2obj_uuid].shape[0]
assert goal_sensor_dim == self.observation_space[self.obj2goal_uuid].shape[0]
if (
self.goal_space_mode == "man_sel"
): # manual select the coord by boolean selector
goal_embedding_sizes = torch.Tensor(
[goal_sensor_dim, 100, self.goal_embedding_size]
)
elif (
self.goal_space_mode == "pickup_obs"
): # observe the boolean selector to learn selection
goal_embedding_sizes = torch.Tensor(
[goal_sensor_dim * 2 + 1, 100, self.goal_embedding_size]
)
else: # only observe two coords
goal_embedding_sizes = torch.Tensor(
[goal_sensor_dim * 2, 100, self.goal_embedding_size]
)
self.goal_embedder = input_embedding_net(
goal_embedding_sizes.long().tolist(), dropout=0
)
def create_aux_models(self, obs_embed_size: int, action_embed_size: int):
if self.auxiliary_uuids is None:
return
aux_models = OrderedDict()
for aux_uuid in self.auxiliary_uuids:
if aux_uuid == DisturbPredictionLoss.UUID:
model_class = DisturbAuxiliaryModel
else:
model_class = AuxiliaryModel
aux_models[aux_uuid] = model_class(
aux_uuid=aux_uuid,
action_dim=self.action_space.n,
obs_embed_dim=obs_embed_size,
belief_dim=self._hidden_size,
action_embed_size=action_embed_size,
)
self.aux_models = nn.ModuleDict(aux_models)
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.visual_encoder.is_blind
@property
def goal_visual_encoder_output_dims(self):
dims = self.goal_embedding_size
if self.is_blind:
return dims
if self.backbone == "simple_cnn":
input_visual_feature_num = int(self.rgb_uuid is not None) + int(
self.depth_uuid is not None
)
else: # resnet
input_visual_feature_num = 1
return dims + self.recurrent_hidden_state_size * input_visual_feature_num
def forward_encoder(self, observations: ObservationType) -> torch.FloatTensor:
arm2obj_dist_raw = observations[self.arm2obj_uuid]
obj2goal_dist_raw = observations[self.obj2goal_uuid]
pickup_bool_raw = observations[self.pickedup_uuid]
if self.goal_space_mode == "man_sel":
arm2obj_dist_embed = self.goal_embedder(arm2obj_dist_raw)
obj2goal_dist_embed = self.goal_embedder(obj2goal_dist_raw)
# use partial obj state space
after_pickup = pickup_bool_raw == 1
distances = arm2obj_dist_embed
distances[after_pickup] = obj2goal_dist_embed[after_pickup]
elif self.goal_space_mode == "pickup_obs":
inputs_raw = torch.cat(
[
pickup_bool_raw.unsqueeze(-1), # (T, N, 1)
arm2obj_dist_raw,
obj2goal_dist_raw,
],
dim=-1,
)
distances = self.goal_embedder(inputs_raw)
else: # coords_only
inputs_raw = torch.cat([arm2obj_dist_raw, obj2goal_dist_raw], dim=-1)
distances = self.goal_embedder(inputs_raw)
obs_embeds = [distances]
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
obs_embeds = [perception_embed] + obs_embeds
obs_embeds = torch.cat(obs_embeds, dim=-1)
return obs_embeds
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
actor_critic_output, memory = super().forward(
observations, memory, prev_actions, masks,
)
if (
self.auxiliary_uuids is not None
and DisturbPredictionLoss.UUID in self.auxiliary_uuids
and self.inference_coef > 0.0
):
actor_critic_output.distributions = DisturbPredictionLoss.inference(
actor_critic_output, self.inference_coef,
)
return actor_critic_output, memory
| disturb-free-main | armpointnav_baselines/models/arm_pointnav_models.py |
import torch
import torch.nn as nn
class LinearActorHeadNoCategory(nn.Module):
def __init__(self, num_inputs: int, num_outputs: int):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
nn.init.orthogonal_(self.linear.weight, gain=0.01)
nn.init.constant_(self.linear.bias, 0)
def forward(self, x: torch.FloatTensor): # type: ignore
x = self.linear(x) # type:ignore
assert len(x.shape) == 3
return x
| disturb-free-main | armpointnav_baselines/models/base_models.py |
disturb-free-main | armpointnav_baselines/models/__init__.py |
|
"""Defining the auxiliary loss for actor critic type models."""
from typing import Dict, cast, Tuple, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from allenact.embodiedai.aux_losses.losses import AuxiliaryLoss
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
from projects.manipulathor_disturb_free.manipulathor_plugin.disturb_sensor import (
DisturbanceSensor,
)
class DisturbPredictionLoss(AuxiliaryLoss):
UUID = "Disturb_Pred"
def __init__(self, gamma=2.0, *args, **kwargs):
super().__init__(auxiliary_uuid=self.UUID, *args, **kwargs)
self.gamma = gamma
def get_aux_loss(
self,
aux_model: nn.Module,
observations: ObservationType,
obs_embeds: torch.FloatTensor,
actions: torch.FloatTensor,
beliefs: torch.FloatTensor,
masks: torch.FloatTensor,
*args,
**kwargs
):
# num_steps, num_sampler = actions.shape # T, B
# NOTE: alignment:
# bt = RNN(ot, a(t-1))
# d(t+1) <- M(bt, at)
actions = cast(torch.LongTensor, actions)
actions = actions.unsqueeze(-1) # (T, B, 1) for gather
## get disturbance prediction logits
raw_logits = aux_model(beliefs) # (T, B, dim) -> (T, B, A)
logits = torch.gather(input=raw_logits, dim=-1, index=actions) # (T, B, 1)
logits = logits.squeeze(-1)[
:-1
] # (T, B, 1) -> (T-1, B) final action does not have label
raw_disturb = observations[DisturbanceSensor().uuid].float() # (T, B)
next_disturb = raw_disturb[1:] # (T-1, B) next-step disturbance signal
# raw BCE loss -> focal loss
# https://discuss.pytorch.org/t/is-this-a-correct-implementation-for-focal-loss-in-pytorch/43327/5
raw_loss = F.binary_cross_entropy_with_logits(
logits, next_disturb, reduction="none"
) # (T-1, B), -log(pt)
probs = torch.exp(-raw_loss)
raw_focal_loss = (1.0 - probs) ** self.gamma * raw_loss
# NOTE: mask = 0.0 <-> the start of one episode (m1 = 0)
# a1, a2, ..., aN-1, aN, a1, a2, ...
# d2, d3, ..., dN, d1, d2, d3, ...
# m2, m3, ..., mN, m1, m2, m3, ...
masks = masks.squeeze(-1) # (T, B)
loss_masks = masks[1:] # (T-1, B)
num_valid_losses = torch.count_nonzero(loss_masks)
avg_loss = (raw_focal_loss * loss_masks).sum() / torch.clamp(
num_valid_losses, min=1.0
)
# report accuracy metrics
with torch.no_grad():
loss_masks = loss_masks.bool().flatten() # (T-1 * B)
disturb_preds = (torch.sigmoid(logits) > 0.5).int().flatten() # (T-1 * B)
disturb_preds = disturb_preds[loss_masks].cpu().numpy()
disturb_targets = next_disturb.int().flatten()[loss_masks].cpu().numpy()
matrix = confusion_matrix(
y_true=disturb_targets,
y_pred=disturb_preds,
labels=(0, 1), # in case of NaN
)
# real neg: TN | FP
# real pos: FN | TP
no_disturb_recall = matrix[0, 0] / max(matrix[0, 0] + matrix[0, 1], 1.0)
has_disturb_recall = matrix[1, 1] / max(matrix[1, 0] + matrix[1, 1], 1.0)
has_disturb_precision = matrix[1, 1] / max(matrix[0, 1] + matrix[1, 1], 1.0)
overall_acc = (matrix[0, 0] + matrix[1, 1]) / matrix.sum()
disturb_gt_ratio = (matrix[1, 0] + matrix[1, 1]) / matrix.sum()
disturb_pred_ratio = (matrix[0, 1] + matrix[1, 1]) / matrix.sum()
# from fpdb import ForkedPdb; ForkedPdb().set_trace()
return (
avg_loss,
{
"focal_loss": cast(torch.Tensor, avg_loss).item(),
"no_disturb_recall": cast(torch.Tensor, no_disturb_recall).item(),
"has_disturb_recall": cast(torch.Tensor, has_disturb_recall).item(),
"has_disturb_precision": cast(
torch.Tensor, has_disturb_precision
).item(),
"overall_acc": cast(torch.Tensor, overall_acc).item(),
"disturb_gt_ratio": cast(torch.Tensor, disturb_gt_ratio).item(),
"disturb_pred_ratio": cast(torch.Tensor, disturb_pred_ratio).item(),
},
)
@classmethod
def inference(
cls, actor_critic_output: ActorCriticOutput[CategoricalDistr], inference_coef,
):
# one-step inference
beliefs = actor_critic_output.extras[cls.UUID]["beliefs"] # (1, B, -1)
aux_model = actor_critic_output.extras[cls.UUID][
"aux_model"
] # given the trained model
raw_action_logits = actor_critic_output.distributions.logits # (1, B, A)
# NOTE: we don't need masks, because belief has reset if mask = 0.0
# the larger the logit, the higher prob to being predicted disturb
logits = aux_model(beliefs) # (1, B, A)
assert inference_coef > 0.0
new_logits = raw_action_logits - inference_coef * logits
# ignore the negative prediction logits
# new_logits = raw_action_logits - inference_coef * torch.clamp(logits, min=0.0)
return CategoricalDistr(logits=new_logits)
| disturb-free-main | armpointnav_baselines/models/disturb_pred_loss.py |
from typing import Tuple, Dict, Optional, Union, List, cast
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
class AuxiliaryModel(nn.Module):
def __init__(
self,
aux_uuid: str,
action_dim: int,
obs_embed_dim: int,
belief_dim: int,
disturb_hidden_dim: int = 128,
**kwargs
):
super().__init__()
self.aux_uuid = aux_uuid
self.action_dim = action_dim
self.obs_embed_dim = obs_embed_dim
self.belief_dim = belief_dim
assert self.aux_uuid == DisturbPredictionLoss.UUID
self.classifier = nn.ModuleList(
[
nn.Linear(self.belief_dim, disturb_hidden_dim),
nn.ReLU(),
nn.Linear(disturb_hidden_dim, self.action_dim),
]
)
# follow focal loss trick: initialize the bias a large value, so sigmoid is 0.01
# correct on the majority samples (which are background)
torch.nn.init.constant_(self.classifier[-1].bias, -4.5)
def forward(self, features: torch.FloatTensor):
x = features
for m in self.classifier:
x = m(x)
return x
| disturb-free-main | armpointnav_baselines/models/aux_model.py |
import pdb
import torch.nn as nn
import torch.nn.functional as F
def upshuffle(
in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1
):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes * upscale_factor ** 2,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.PixelShuffle(upscale_factor),
nn.LeakyReLU(),
)
def upshufflenorelu(
in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1
):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes * upscale_factor ** 2,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.PixelShuffle(upscale_factor),
)
def combine_block_w_bn(in_planes, out_planes):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, 1, 1),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(),
)
def conv2d_block(in_planes, out_planes, kernel_size, stride=1, padding=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(),
nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_planes),
)
def combine_block_w_do(in_planes, out_planes, dropout=0.0):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, 1, 1), nn.LeakyReLU(), nn.Dropout(dropout),
)
def combine_block_no_do(in_planes, out_planes):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, 1, 1), nn.LeakyReLU(),)
def linear_block(in_features, out_features, dropout=0.0):
return nn.Sequential(
nn.Linear(in_features, out_features), nn.LeakyReLU(), nn.Dropout(dropout),
)
def linear_block_norelu(in_features, out_features):
return nn.Sequential(nn.Linear(in_features, out_features),)
def input_embedding_net(list_of_feature_sizes, dropout=0.0):
modules = []
for i in range(len(list_of_feature_sizes) - 1):
input_size, output_size = list_of_feature_sizes[i : i + 2]
if i + 2 == len(list_of_feature_sizes):
modules.append(linear_block_norelu(input_size, output_size))
else:
modules.append(linear_block(input_size, output_size, dropout=dropout))
return nn.Sequential(*modules)
def _upsample_add(x, y):
_, _, H, W = y.size()
return F.upsample(x, size=(H, W), mode="bilinear") + y
def replace_all_relu_w_leakyrelu(model):
pdb.set_trace()
print("Not sure if using this is a good idea")
modules = model._modules
for m in modules.keys():
module = modules[m]
if isinstance(module, nn.ReLU):
model._modules[m] = nn.LeakyReLU()
elif isinstance(module, nn.Module):
model._modules[m] = replace_all_relu_w_leakyrelu(module)
return model
def replace_all_leakyrelu_w_relu(model):
modules = model._modules
for m in modules.keys():
module = modules[m]
if isinstance(module, nn.LeakyReLU):
model._modules[m] = nn.ReLU()
elif isinstance(module, nn.Module):
model._modules[m] = replace_all_leakyrelu_w_relu(module)
return model
def replace_all_bn_w_groupnorm(model):
pdb.set_trace()
print("Not sure if using this is a good idea")
modules = model._modules
for m in modules.keys():
module = modules[m]
if isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm1d):
feature_number = module.num_features
model._modules[m] = nn.GroupNorm(32, feature_number)
elif isinstance(module, nn.BatchNorm3d):
raise Exception("Not implemented")
elif isinstance(module, nn.Module):
model._modules[m] = replace_all_bn_w_groupnorm(module)
return model
def flat_temporal(tensor, batch_size, sequence_length):
tensor_shape = [s for s in tensor.shape]
assert tensor_shape[0] == batch_size and tensor_shape[1] == sequence_length
result_shape = [batch_size * sequence_length] + tensor_shape[2:]
return tensor.contiguous().view(result_shape)
def unflat_temporal(tensor, batch_size, sequence_length):
tensor_shape = [s for s in tensor.shape]
assert tensor_shape[0] == batch_size * sequence_length
result_shape = [batch_size, sequence_length] + tensor_shape[1:]
return tensor.contiguous().view(result_shape)
| disturb-free-main | armpointnav_baselines/models/manipulathor_net_utils.py |
coleridge-rich-context-ai2-master | rich-context-competition/project/__init__.py |
|
# introduce myself
print( "Publication parse example:" )
# imports
import codecs
import json
import shutil
# declare variables
publications_json_path = None
json_publication_file = None
publication_list = None
publication_counter = -1
publication_info = None
pub_date = None
unique_identifier = None
text_file_name = None
pdf_file_name = None
title = None
publication_id = None
citation_file_from = None
citation_file_to = None
# set path to publications.json
publications_json_path = "/data/input/publications.json"
# open the publications.json file
with open( publications_json_path ) as json_publication_file:
# parse it as JSON
publication_list = json.load( json_publication_file )
# loop over the elements in the list
publication_counter = 0
for publication_info in publication_list:
# increment counter
publication_counter += 1
# get information on publication:
pub_date = publication_info.get( "pub_date", None )
unique_identifier = publication_info.get( "unique_identifier", None )
text_file_name = publication_info.get( "text_file_name", None )
pdf_file_name = publication_info.get( "pdf_file_name", None )
title = publication_info.get( "title", None )
publication_id = publication_info.get( "publication_id", None )
# print.
print( "\n" )
print( "publication {}".format( publication_counter ) )
print( "- pub_date: {}".format( pub_date ) )
print( "- unique_identifier: {}".format( unique_identifier ) )
print( "- text_file_name: {}".format( text_file_name ) )
print( "- pdf_file_name: {}".format( pdf_file_name ) )
print( "- title: {}".format( codecs.encode( title, "ascii", "xmlcharrefreplace" ) ) )
print( "- publication_id: {}".format( publication_id ) )
#-- END loop over publications --#
#-- END with...as --#
# and, finally, test ability to write to "/data/output"
citation_file_from = "/rich-context-competition/evaluate/data_set_citations.json"
citation_file_to = "/data/output/data_set_citations.json"
shutil.copyfile( citation_file_from, citation_file_to )
print( "Copied from {} to {}.".format( citation_file_from, citation_file_to ) ) | coleridge-rich-context-ai2-master | rich-context-competition/project/project.py |
coleridge-rich-context-ai2-master | rich-context-competition/evaluate/__init__.py |
|
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1"><span class="toc-item-num">1 </span>Setup</a></span><ul class="toc-item"><li><span><a href="#Setup---imports" data-toc-modified-id="Setup---imports-1.1"><span class="toc-item-num">1.1 </span>Setup - imports</a></span></li><li><span><a href="#Setup---Functions" data-toc-modified-id="Setup---Functions-1.2"><span class="toc-item-num">1.2 </span>Setup - Functions</a></span><ul class="toc-item"><li><span><a href="#function-plot_precision_recall_n" data-toc-modified-id="function-plot_precision_recall_n-1.2.1"><span class="toc-item-num">1.2.1 </span>function plot_precision_recall_n</a></span></li><li><span><a href="#function-threshold_at_k" data-toc-modified-id="function-threshold_at_k-1.2.2"><span class="toc-item-num">1.2.2 </span>function threshold_at_k</a></span></li><li><span><a href="#function-precision_at_k" data-toc-modified-id="function-precision_at_k-1.2.3"><span class="toc-item-num">1.2.3 </span>function precision_at_k</a></span></li><li><span><a href="#function-recall_at_k" data-toc-modified-id="function-recall_at_k-1.2.4"><span class="toc-item-num">1.2.4 </span>function recall_at_k</a></span></li><li><span><a href="#function-accuracy_at_k" data-toc-modified-id="function-accuracy_at_k-1.2.5"><span class="toc-item-num">1.2.5 </span>function accuracy_at_k</a></span></li></ul></li><li><span><a href="#Setup---output" data-toc-modified-id="Setup---output-1.3"><span class="toc-item-num">1.3 </span>Setup - output</a></span></li></ul></li><li><span><a href="#class-CitationCodingEvaluation" data-toc-modified-id="class-CitationCodingEvaluation-2"><span class="toc-item-num">2 </span>class CitationCodingEvaluation</a></span></li><li><span><a href="#Load-JSON-files" data-toc-modified-id="Load-JSON-files-3"><span class="toc-item-num">3 </span>Load JSON files</a></span></li><li><span><a href="#Process-JSON" data-toc-modified-id="Process-JSON-4"><span class="toc-item-num">4 </span>Process JSON</a></span></li><li><span><a href="#Evaluate" data-toc-modified-id="Evaluate-5"><span class="toc-item-num">5 </span>Evaluate</a></span><ul class="toc-item"><li><span><a href="#precision,-recall,-and-accuracy" data-toc-modified-id="precision,-recall,-and-accuracy-5.1"><span class="toc-item-num">5.1 </span>precision, recall, and accuracy</a></span></li><li><span><a href="#graph-precision-and-recall-at-n" data-toc-modified-id="graph-precision-and-recall-at-n-5.2"><span class="toc-item-num">5.2 </span>graph precision and recall at n</a></span></li><li><span><a href="#output-results-to-file" data-toc-modified-id="output-results-to-file-5.3"><span class="toc-item-num">5.3 </span>output results to file</a></span></li></ul></li></ul></div>
# # Setup
#
# - Back to [Table of Contents](#Table-of-Contents)
# ## Setup - imports
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# imports
import datetime
import json
import matplotlib
import matplotlib.pyplot
import numpy
import pandas as pd
import six
# scikit-learn
import sklearn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
GradientBoostingClassifier,
AdaBoostClassifier)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
# ## Setup - Functions
#
# - Back to [Table of Contents](#Table-of-Contents)
# ### function plot_precision_recall_n
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
def plot_precision_recall_n(y_true, y_prob, model_name, output_path_IN = None ):
"""
y_true: ls
ls of ground truth labels
y_prob: ls
ls of predic proba from model
model_name: str
str of model name (e.g, LR_123)
"""
# imports
from sklearn.metrics import precision_recall_curve
# return reference
details_OUT = {}
# declare variables
y_score = None
precision_curve = None
recall_curve = None
pr_thresholds = None
num_above_thresh = None
pct_above_thresh = None
pct_above_per_thresh = None
current_score = None
above_threshold_list = None
above_threshold_count = -1
fig = None
ax1 = None
ax2 = None
# store the raw scores in y_score
y_score = y_prob
# calculate precision-recall curve
# http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
# Returns:
# - precision_curve - Precison values such that element i is the precision of predictions where cutoff is score >= thresholds[ i ] and the last element is 1.
# - recall_curve - Recall values such that element i is the recall of predictions where cutoff is score >= thresholds[ i ] and the last element is 0.
# - pr_thresholds - Increasing thresholds on the decision function used to decide 1 or 0, used to calculate precision and recall (looks like it is the set of unique values in the predicted value set).
precision_curve, recall_curve, pr_thresholds = precision_recall_curve( y_true, y_score )
# get all but the last precision score (1).
precision_curve = precision_curve[ : -1 ]
# print( "precision_curve: {}".format( precision_curve ) )
# get all but the last recall score (0).
recall_curve = recall_curve[ : -1 ]
# print( "recall_curve: {}".format( recall_curve ) )
# store details
details_OUT[ "precision" ] = precision_curve
details_OUT[ "recall" ] = recall_curve
details_OUT[ "threshold" ] = pr_thresholds
# init loop over thresholds
pct_above_per_thresh = []
number_scored = len(y_score)
# loop over thresholds
for value in pr_thresholds:
# at each threshold, calculate the percent of rows above the threshold.
above_threshold_list = []
above_threshold_count = -1
for current_score in y_score:
# is it at or above threshold?
if ( current_score >= value ):
# it is either at or above threshold - add to list.
above_threshold_list.append( current_score )
#-- END check to see if at or above threshold? --#
#-- END loop over scores. --#
# how many above threshold?
#num_above_thresh = len(y_score[y_score>=value])
above_threshold_count = len( above_threshold_list )
num_above_thresh = above_threshold_count
# percent above threshold
pct_above_thresh = num_above_thresh / float( number_scored )
# add to list.
pct_above_per_thresh.append( pct_above_thresh )
#-- END loop over thresholds --#
details_OUT[ "percent_above" ] = pct_above_per_thresh
# convert to numpy array
pct_above_per_thresh = numpy.array(pct_above_per_thresh)
# init matplotlib
matplotlib.pyplot.clf()
fig, ax1 = matplotlib.pyplot.subplots()
# plot precision line
ax1.plot(pct_above_per_thresh, precision_curve, 'b')
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax1.set_ylim(0,1.05)
# plot recall line
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax2.set_ylim(0,1.05)
# finish off graph
name = model_name
matplotlib.pyplot.title(name)
# is there an output path?
if ( ( output_path_IN is not None ) and ( output_path_IN != "" ) ):
# save the figure to file.
matplotlib.pyplot.savefig( output_path_IN )
#-- END check to see if we output to disk. --#
matplotlib.pyplot.show()
# clear plot.
matplotlib.pyplot.clf()
return details_OUT
#-- END function plot_precision_recall_n() --#
print( "function plot_precision_recall_n() defined at {}".format( datetime.datetime.now() ) )
# ### function threshold_at_k
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
def threshold_at_k( y_scores, k ):
# return reference
value_OUT = None
# declare variables
value_list = None
threshold_index = -1
# sort values
value_list = numpy.sort( y_scores )
# reverse order of list
value_list = value_list[ : : -1 ]
# calculate index of value that is k% of the way through the sorted distribution of scores
threshold_index = int( k * len( y_scores ) )
# get value that is k% of the way through the sorted distribution of scores
value_OUT = value_list[ threshold_index ]
print( "Threshold: {}".format( value_OUT ) )
return value_OUT
#-- END function threshold_at_k() --#
print( "function threshold_at_k() defined at {}".format( datetime.datetime.now() ) )
# ### function precision_at_k
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
def precision_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate precision
value_OUT = precision_score( y_true, y_pred )
return value_OUT
#-- END function precision_at_k() --#
print( "function precision_at_k() defined at {}".format( datetime.datetime.now() ) )
# ### function recall_at_k
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
def recall_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate recall
value_OUT = recall_score( y_true, y_pred )
return value_OUT
#-- END function recall_at_k() --#
print( "function recall_at_k() defined at {}".format( datetime.datetime.now() ) )
# ### function accuracy_at_k
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
def accuracy_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate accuracy
value_OUT = accuracy_score( y_true, y_pred )
return value_OUT
#-- END function accuracy_at_k() --#
print( "function accuracy_at_k() defined at {}".format( datetime.datetime.now() ) )
# ## Setup - output
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# DEBUG
debug_flag = False
# output_to_file flag
output_to_file = True
line_list = None
output_string = None
results_file_path = "/data/output/evaluation_results.txt"
precision_recall_graph_path = "/data/output/precision_recall_graph.pdf"
# if we are outputting to file, start line list.
if ( output_to_file == True ):
# put a list in line_list
line_list = []
#-- END init line list --#
# # class CitationCodingEvaluation
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
from citation_coding_evaluation import CitationCodingEvaluation
# # Load JSON files
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# file paths
baseline_json_path = "./data_set_citations.json"
derived_json_path = "/data/output/data_set_citations.json"
# In[ ]:
# load the baseline JSON
baseline_json_file = None
baseline_json = None
# if output...
output_string = "Reading baseline/ground_truth file: {}".format( baseline_json_path )
print( output_string )
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output to file... --#
# baseline
with open( baseline_json_path ) as baseline_json_file:
# load the JSON from the file.
baseline_json = json.load( baseline_json_file )
#-- END with...as --#
# In[ ]:
# load the derived JSON
derived_json_file = None
derived_json = None
# if output...
output_string = "Reading derived/predicted file: {}".format( derived_json_path )
print( output_string )
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output to file... --#
# baseline
with open( derived_json_path ) as derived_json_file:
# load the JSON from the file.
derived_json = json.load( derived_json_file )
#-- END with...as --#
# In[ ]:
baseline_json
# In[ ]:
derived_json
# # Process JSON
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# init class to handle evaluation
coding_evaluator = CitationCodingEvaluation()
coding_evaluator.debug_flag = debug_flag
# In[ ]:
# process baseline JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_BASELINE
citation_json = baseline_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# In[ ]:
# process derived JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_DERIVED
citation_json = derived_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# # Evaluate
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# create lists
status = coding_evaluator.create_evaluation_lists()
baseline_list = coding_evaluator.get_baseline_list()
derived_raw_list = coding_evaluator.get_derived_raw_list()
derived_binary_list = coding_evaluator.get_derived_binary_list()
# ## precision, recall, and accuracy
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# calculation methods
CALCULATION_METHOD_DEFAULT = "default"
CALCULATION_METHOD_BINARY = "binary"
CACLULATION_METHOD_MACRO = "macro"
CALCULATION_METHOD_MICRO = "micro"
CALCULATION_METHOD_WEIGHTED = "weighted"
# calculation methods to include
calculation_methods = []
calculation_methods.append( CALCULATION_METHOD_DEFAULT )
calculation_methods.append( CALCULATION_METHOD_BINARY )
#calculation_methods.append( CACLULATION_METHOD_MACRO )
#calculation_methods.append( CALCULATION_METHOD_MICRO )
#calculation_methods.append( CALCULATION_METHOD_WEIGHTED )
# confusion matrix
cm = metrics.confusion_matrix( baseline_list, derived_binary_list )
# output
output_string = "\nConfusion matrix:\n{}\n\nBinary Key:\n[[ TN, FP ]\n [ FN, TP ]]".format( cm )
print( output_string )
# if output to file...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# loop over calculation methods
method_to_result_map = {}
for calculation_method in calculation_methods:
# output
output_string = "\n==> {}".format( calculation_method )
print( output_string )
# if output to file...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# binary? If so, do basic calculations as sanity check.
if ( calculation_method == CALCULATION_METHOD_BINARY ):
# calculate precision, recall, accuracy...
# ==> precision
precision = metrics.precision_score( baseline_list, derived_binary_list )
# output
output_string = "\n- {} metrics.precision_score = {}".format( calculation_method, precision )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# ==> recall
recall = metrics.recall_score( baseline_list, derived_binary_list )
# output
output_string = "- {} metrics.recall_score = {}".format( calculation_method, recall )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# ==> accuracy
accuracy = metrics.accuracy_score( baseline_list, derived_binary_list )
# output
output_string = "- {} metrics.accuracy_score = {}".format( calculation_method, accuracy )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
#-- END check to see if CALCULATION_METHOD_BINARY --#
# calculate based on calculation method.
# default?
if ( calculation_method == CALCULATION_METHOD_DEFAULT ):
# default metrics and F-Score - default returns a list for each of
# the scores per label, so get list and output, don't pick one or
# another value.
default_evaluation = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list )
default_precision_list = default_evaluation[ 0 ]
default_recall_list = default_evaluation[ 1 ]
default_F1_list = default_evaluation[ 2 ]
default_support_list = default_evaluation[ 3 ]
# output lists
output_string = "\ndefault lists:"
output_string += "\n- precision list = {}".format( default_precision_list )
output_string += "\n- recall list = {}".format( default_recall_list )
output_string += "\n- F1 list = {}".format( default_F1_list )
output_string += "\n- support list = {}".format( default_support_list )
# add to results map
method_to_result_map[ calculation_method ] = default_evaluation
# look at length of lists (should all be the same).
precision_list_length = len( default_precision_list )
recall_list_length = len( default_recall_list )
F1_list_length = len( default_F1_list )
output_string += "\n\nlist lengths: {}".format( precision_list_length )
if ( precision_list_length > 2 ):
# binary, but list is greater than 2, not binary - output message.
output_string += "\n- NOTE: default output lists have more than two entries - your data is not binary."
#-- END check to see if list length greater than 2 --#
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# all others are just argument to "average" parameter, result in one number per
# derived score. For now, implement them the same.
else:
# F-Score
evaluation_tuple = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list, average = calculation_method )
precision = evaluation_tuple[ 0 ]
recall = evaluation_tuple[ 1 ]
F1 = evaluation_tuple[ 2 ]
support = evaluation_tuple[ 3 ]
# add to results map
method_to_result_map[ calculation_method ] = evaluation_tuple
# output
output_string = "\n{}: precision = {}, recall = {}, F1 = {}, support = {}".format( calculation_method, precision, recall, F1, support )
print( output_string )
# if output to file...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
#-- END default F-Score --#
#-- END loop over calculation_methods --#
# ## output results to file
#
# - Back to [Table of Contents](#Table-of-Contents)
# In[ ]:
# declare variables
results_file = None
line_list_string = None
# do we output to file?
if ( output_to_file == True ):
# yes. open output file.
with open( results_file_path, mode = "w" ) as results_file:
# join line list with "/n", then write.
line_list_string = "\n".join( line_list )
results_file.write( line_list_string )
#-- END with...as --#
#-- END check to see if we output to file --#
| coleridge-rich-context-ai2-master | rich-context-competition/evaluate/evaluate_data_set_coding.py |
# imports
import json
import numpy
import six
import sklearn
from sklearn import metrics
class CitationCodingEvaluation( object ):
#============================================================================
# CONSTANTS-ish
#============================================================================
# result types
RESULT_TYPE_BASELINE = "baseline"
RESULT_TYPE_DERIVED = "derived"
VALID_RESULT_TYPE_LIST = []
VALID_RESULT_TYPE_LIST.append( RESULT_TYPE_BASELINE )
VALID_RESULT_TYPE_LIST.append( RESULT_TYPE_DERIVED )
# JSON property names
JSON_NAME_PUBLICATION_ID = "publication_id"
JSON_NAME_DATA_SET_ID = "data_set_id"
JSON_NAME_SCORE = "score"
JSON_NAME_DATA_SET_MAP = "data_set_map"
#============================================================================
# ! ==> Built-in Instance methods
#============================================================================
def __init__( self, *args, **kwargs ):
# initialize variables
self.debug_flag = False
self.m_citation_map = {}
self.m_baseline_list = []
self.m_derived_binary_list = []
self.m_derived_raw_list = []
self.m_cutoff = 0.0
#-- END method __init__() --#
def __str__( self, fancy_print_IN = True, *args, **kwargs ):
# return reference
string_OUT = ""
# note the class
string_OUT = "CitationCodingEvaluation"
return string_OUT
#-- END method __str__() --#
#============================================================================
# instance methods
#============================================================================
def get_baseline_list( self ):
# return reference
value_OUT = None
# declare variables
instance = None
# get instance
value_OUT = self.m_baseline_list
# got anything?
if ( value_OUT is None ):
# make list instance.
instance = []
# store the instance.
self.set_baseline_list( instance )
# get the instance.
value_OUT = self.get_baseline_list()
#-- END check to see if instance initialized. --#
return value_OUT
#-- END method get_baseline_list --#
def get_citation_map( self ):
# return reference
value_OUT = None
# declare variables
dict_instance = None
# get m_dictionary
value_OUT = self.m_citation_map
# got anything?
if ( value_OUT is None ):
# make dictionary instance.
dict_instance = {}
# store the instance.
self.set_citation_map( dict_instance )
# get the instance.
value_OUT = self.get_citation_map()
#-- END check to see if dictionary initialized. --#
return value_OUT
#-- END method get_citation_map --#
def get_cutoff( self ):
# return reference
value_OUT = None
# get m_dictionary
value_OUT = self.m_cutoff
return value_OUT
#-- END method get_cutoff --#
def get_derived_binary_list( self ):
# return reference
value_OUT = None
# declare variables
instance = None
# get instance
value_OUT = self.m_derived_binary_list
# got anything?
if ( value_OUT is None ):
# make list instance.
instance = []
# store the instance.
self.set_derived_binary_list( instance )
# get the instance.
value_OUT = self.get_derived_binary_list()
#-- END check to see if instance initialized. --#
return value_OUT
#-- END method get_derived_binary_list --#
def get_derived_raw_list( self ):
# return reference
value_OUT = None
# declare variables
instance = None
# get instance
value_OUT = self.m_derived_raw_list
# got anything?
if ( value_OUT is None ):
# make list instance.
instance = []
# store the instance.
self.set_derived_raw_list( instance )
# get the instance.
value_OUT = self.get_derived_raw_list()
#-- END check to see if instance initialized. --#
return value_OUT
#-- END method get_derived_raw_list --#
def create_evaluation_lists( self ):
# return reference
status_OUT = None
# declare variables
citation_map = None
baseline_list = None
derived_binary_list = None
derived_raw_list = None
cutoff_value = None
publication_id_list = None
publication_id = None
publication_dict = None
data_set_map = None
data_set_id_list = None
data_set_id = None
data_set_found_map = None
baseline_score = -1
derived_score = -1
# get citation_map
citation_map = self.get_citation_map()
# init lists
baseline_list = self.set_baseline_list( [] )
derived_binary_list = self.set_derived_binary_list( [] )
derived_raw_list = self.set_derived_raw_list( [] )
# cutoffs
cutoff_value = self.get_cutoff()
# so we can get publication ID list
publication_id_list = list( six.viewkeys( citation_map ) )
publication_id_list.sort()
# loop over publications, and then data sets within.
for publication_id in publication_id_list:
# DEBUG
if ( self.debug_flag == True ):
print( "Publication ID: {}".format( publication_id ) )
#-- END DEBUG --#
# get publication map
publication_dict = citation_map.get( publication_id, None )
# get the data set map and ID list.
data_set_map = publication_dict.get( self.JSON_NAME_DATA_SET_MAP, None )
data_set_id_list = list( six.viewkeys( data_set_map ) )
data_set_id_list.sort()
# loop over data set ID list.
for data_set_id in data_set_id_list:
# DEBUG
if ( self.debug_flag == True ):
print( "==> Data Set ID: {}".format( data_set_id ) )
#-- END DEBUG --#
# get the data_set_found_map
data_set_found_map = data_set_map.get( data_set_id, None )
# get the scores.
baseline_score = data_set_found_map.get( self.RESULT_TYPE_BASELINE, 0.0 )
derived_score = data_set_found_map.get( self.RESULT_TYPE_DERIVED, 0.0 )
# DEBUG
if ( self.debug_flag == True ):
print( " baseline: {}".format( baseline_score ) )
print( " derived.: {}".format( derived_score ) )
#-- END DEBUG --#
# add them to the lists
baseline_list.append( baseline_score )
derived_raw_list.append( derived_score )
if derived_score > cutoff_value:
derived_binary_list.append( 1.0 )
else:
derived_binary_list.append( 0.0 )
#-- END binary value assignment --#
#-- END loop over data set IDs. --#
#-- END loop over publication IDs. --#
return status_OUT
#-- END method create_evaluation_lists() --#
def process_citation_json( self, citation_list_json_IN, result_type_IN ):
# return reference
status_OUT = None
# declare variables
status_string = None
citation_map = None
publication_id = None
data_set_id = None
raw_score = None
citation_json = None
raw_score = None
publication_dict = None
# make sure we have output map.
citation_map = self.get_citation_map()
if ( citation_map is not None ):
# make sure we have JSON
if ( citation_list_json_IN is not None ):
# valid result type?
if ( ( result_type_IN is not None )
and ( result_type_IN != "" )
and ( result_type_IN in self.VALID_RESULT_TYPE_LIST ) ):
# loop over citation items in
for citation_json in citation_list_json_IN:
# get the publication ID, data set ID, and score.
publication_id = citation_json.get( self.JSON_NAME_PUBLICATION_ID )
data_set_id = citation_json.get( self.JSON_NAME_DATA_SET_ID )
raw_score = citation_json.get( self.JSON_NAME_SCORE, None )
# look up the publication in the publication to data set map.
if publication_id not in citation_map:
# init publication dictionary.
publication_dict = {}
publication_dict[ self.JSON_NAME_PUBLICATION_ID ] = publication_id
publication_dict[ self.JSON_NAME_DATA_SET_MAP ] = {}
# store it in the map
citation_map[ publication_id ] = publication_dict
else:
# get the dictionary.
publication_dict = citation_map.get( publication_id, None )
#-- END check to see if publication is in list. --#
# retrieve citation map and id list.
data_set_map = publication_dict.get( self.JSON_NAME_DATA_SET_MAP, None )
# check to see if data set ID is in the map.
if ( data_set_id not in data_set_map ):
# no - make a dictionary and add it.
data_set_found_map = {}
data_set_found_map[ self.JSON_NAME_DATA_SET_ID ] = data_set_id
data_set_found_map[ self.RESULT_TYPE_BASELINE ] = 0.0
data_set_found_map[ self.RESULT_TYPE_DERIVED ] = 0.0
data_set_map[ data_set_id ] = data_set_found_map
else:
# get it.
data_set_found_map = data_set_map.get( data_set_id, None )
#-- END check to see if in map --#
# update the found map.
if ( raw_score is not None ):
# yes - store it.
data_set_found_map[ result_type_IN ] = raw_score
else:
# no - binary - FOUND! (1.0)
data_set_found_map[ result_type_IN ] = 1.0
#-- END check to see if raw score or not. --#
#-- END loop over citations in JSON --#
else:
status_OUT = "ERROR - result type of {} is not valid. Should be one of: {}".format( result_type_IN, VALID_RESULT_TYPE_LIST )
#-- END check to see if valid result_type_IN --#
else:
status_OUT = "WARNING - no JSON passed in, nothing to do."
#-- END check to see if JSON passed in --#
else:
status_OUT = "ERROR - no citation map, returning None."
#-- END check to see if citation map passed in. --#
if ( ( self.debug_flag == True ) and ( status_OUT is not None ) and ( status_OUT != "" ) ):
print( status_OUT )
#-- END check to see if status set --#
return status_OUT
#-- END method process_citation_json() --#
def set_baseline_list( self, instance_IN ):
'''
Accepts list. Stores it and returns it.
'''
# return reference
value_OUT = None
# use store dictionary.
self.m_baseline_list = instance_IN
# return it.
value_OUT = self.m_baseline_list
return value_OUT
#-- END method set_baseline_list() --#
def set_citation_map( self, instance_IN ):
'''
Accepts dictionary. Stores it and returns it.
'''
# return reference
value_OUT = None
# use store dictionary.
self.m_citation_map = instance_IN
# return it.
value_OUT = self.m_citation_map
return value_OUT
#-- END method set_citation_map() --#
def set_cutoff( self, value_IN ):
'''
Accepts value. Stores it and returns it.
'''
# return reference
value_OUT = None
# use store dictionary.
self.m_cutoff = value_IN
# return it.
value_OUT = self.m_cutoff
return value_OUT
#-- END method set_cutoff() --#
def set_derived_binary_list( self, instance_IN ):
'''
Accepts list. Stores it and returns it.
'''
# return reference
value_OUT = None
# use store dictionary.
self.m_derived_binary_list = instance_IN
# return it.
value_OUT = self.m_derived_binary_list
return value_OUT
#-- END method set_derived_binary_list() --#
def set_derived_raw_list( self, instance_IN ):
'''
Accepts list. Stores it and returns it.
'''
# return reference
value_OUT = None
# use store dictionary.
self.m_derived_raw_list = instance_IN
# return it.
value_OUT = self.m_derived_raw_list
return value_OUT
#-- END method set_derived_raw_list() --#
#-- END class CitationCodingEvaluation --#
| coleridge-rich-context-ai2-master | rich-context-competition/evaluate/citation_coding_evaluation.py |
from typing import List
from s2base.scispacy_util import SciSpaCyParser
import textacy
import spacy
import os
import json
from collections import defaultdict
from collections import Counter as mset
import numpy as np
import Levenshtein
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.externals import joblib
from tqdm import tqdm
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_similarity
import re
DECADE_REGEX = re.compile(r' (\d+)s ')
def identity_function(x):
"""Identity function, used for the tfdif vectorizer"""
return x
def get_tfidf_vectorizer():
"""Get or create tfidf vectorizer"""
vectorizer_path = os.path.abspath(os.path.join("project", "tfidf_titles.pkl"))
if os.path.isfile(vectorizer_path):
vectorizer = joblib.load(vectorizer_path)
else:
vectorizer = build_tfidf_vectorizer()
joblib.dump(vectorizer, vectorizer_path)
return vectorizer
def build_tfidf_vectorizer():
"""Build the tfidf vectorizer based on dataset titles"""
kb_path = os.path.abspath(os.path.join("project", "data", "data_sets.json"))
with open(kb_path) as kb_file_:
kb = json.load(kb_file_)
nlp = spacy.load("en_scispacy_core_web_sm")
token_lists = []
for dataset in tqdm(kb, desc="Tokenizing kb titles for tfidf"):
title_text = text_preprocess(dataset["title"])
title_doc = nlp(title_text)
title_tokens = [t.text.lower() for t in title_doc]
token_lists.append(title_tokens)
tfidf = TfidfVectorizer(analyzer='word', tokenizer=identity_function, preprocessor=identity_function, token_pattern=None, norm='l2')
tfidf = tfidf.fit(token_lists)
return tfidf
def text_preprocess(text: str):
"""Preprocess text to remove punctuation and lower case"""
text = textacy.preprocess.remove_punct(text)
text = text.replace("\n", " ").replace("\t", " ").replace(",", " ").replace("|", " ")
text = text.replace(":", " ").replace(".", " ").replace("\xad", " ").replace("\\", " ")
text = DECADE_REGEX.sub(r' \1 ', text)
text = text.lower().rstrip()
text = ' '.join(text.split())
return text
def strip_numbers(text: str):
"""Strip numbers from a piece of text"""
text = text.replace("0", "").replace("1", "").replace("2", "").replace("3", "").replace("4", "").replace("5", "")
text = text.replace("6", "").replace("7", "").replace("8", "").replace("9", "")
return text
def get_substring_candidates(all_ids: List[int],
all_titles: List[str],
all_titles_tokenized: List[List[str]],
mention: str,
sentence: List[str],
stopwords: set,
scispacy_parser: SciSpaCyParser,
tfidf_vectorizer: TfidfVectorizer):
"""Get candidate datasets for a given mention, based on tfidf weighted token overlap with dataset titles
@param all_ids: list of all dataset ids
@param all_titles: list of all datase titles preprocessed
@param all_titles_tokenizer: list of all titles tokenized
@param mention: the mention text
@param sentence: the sentence that the mention text came from
@param stopwords: set of stopwords to filter out
@param scispacy_parser: instance of a scispacy parser
@param tfidf_vectorizer: an already fit tfidf build_tfidf_vectorizer
"""
tokens = [t.text for t in scispacy_parser.scispacy_create_doc(mention)]
candidate_ids = []
candidate_set = set()
dataset_id_to_scores = defaultdict(list)
sentence = text_preprocess(' '.join(sentence)).split()
tfidf_sentence = tfidf_vectorizer.transform([sentence])
tfidf_mention = tfidf_vectorizer.transform([tokens])
sentence_candidate_scores = [(id, sum([tfidf_sentence[0, tfidf_vectorizer.vocabulary_[token]] for token in sentence
if token not in stopwords and token in tfidf_vectorizer.vocabulary_ and token in title]))
for id, title in zip(all_ids, all_titles_tokenized)]
mention_candidate_scores = [(id, sum([tfidf_mention[0, tfidf_vectorizer.vocabulary_[token]] for token in tokens
if token not in stopwords and token in tfidf_vectorizer.vocabulary_ and token in title]))
for id, title in zip(all_ids, all_titles_tokenized)]
candidate_scores = [(mention_score[0], sentence_score[1]*mention_score[1])
for sentence_score, mention_score in zip(sentence_candidate_scores, mention_candidate_scores)]
ids = [candidate[0] for candidate in candidate_scores]
scores = [candidate[1] for candidate in candidate_scores]
return ids, scores | coleridge-rich-context-ai2-master | project/text_utils.py |
"""This file contains a class for the rule based model for generating dataset extraction candidates
"""
import os
import json
from fuzzywuzzy import fuzz
import nltk
from nltk.corpus import stopwords
import re
from s2base import scispacy_util
from tqdm import tqdm
from sklearn.externals import joblib
from spacy.lang import char_classes
import codecs
# Results on test set (organizers' dev fold)
# v1: simple exact string matching on uid, name, mentions with all dev fold mentions excluded: precision = 0.0556640625, recall = 0.4634146341463415, F1 = 0.09938971229293812
# v2: simple exact string matching on uid, name, mentions with all mentions in train set or not in dev fold (accounts for overlap between mentions in train and dev fold): precision = 0.06348314606741573, recall = 0.9186991869918699, F1 = 0.1187598528638991
# v3: simple exact string matching on uid, name, mentions with all mentions in train set (excludes mentions that are not present in either train or dev fold): precision = 0.06848484848484848, recall = 0.9186991869918699, F1 = 0.12746756909193455
# v4: exact string matching like v3, plus filter datasets with a year mentioned that is not present in the text: precision = 0.11098527746319366, recall = 0.7967479674796748, F1 = 0.194831013916501
# v5: exact string matching like v3, plus filter datasets with a year mentiond that is not present in the text, and no years mentioned that are present in the text: precision = 0.09403862300587741, recall = 0.9105691056910569, F1 = 0.17047184170471838
# v6: v5 + filter out mentions that are single, common english words: precision = 0.10526315789473684, recall = 0.9105691056910569, F1 = 0.18871103622577926
# v7: v6 + ignore case: Precision: 0.027991082486995295, Recall: 0.9186991869918699, Accuracy: 0.027921917469730665, F1: 0.05432692307692308
# v8 (best recall): v6 + extra modifications to the mentions based on to_conll work: Precision: 0.10079575596816977, Recall: 0.926829268292683, Accuracy: 0.1, F1: 0.18181818181818185
# v9: v8 + excluding shorter, overlapping (0.8 or 1.0 threshold) mention lists: Precision: 0.12280701754385964, Recall: 0.5691056910569106, Accuracy: 0.11235955056179775, F1: 0.20202020202020204
# v10 (best precision, best f1): v8 + try to exclude references section from text to search: Precision: 0.13820078226857888, Recall: 0.8617886178861789, Accuracy: 0.13520408163265307, F1: 0.2382022471910112
# Results on dev set
# v8: Precision: 0.06193330259720301, Recall: 0.8915929203539823, Accuracy: 0.061470408785845025, F1: 0.11582123868371894
# v10: Precision: 0.13820078226857888, Recall: 0.8617886178861789, Accuracy: 0.13520408163265307, F1: 0.2382022471910112
YEAR_PATTERN = r"((?<![.|\d])\d\d\d\d(?![.|\d]))"
class RuleBasedModel():
"""This class can be used to predict dataset extractions based on string search for mentions from the training set
"""
def __init__(self, train_path, dev_path, kb_path, test_path = None):
# path to the data folder for the train set
self.train_path = train_path
# path to the data folder for the dev set
self.dev_path = dev_path
# path to the json kb file
self.kb_path = kb_path
# optional path to the data folder for the test set
# if this argument is passed in, the model will use mentions from the dev
# and train set to make predictions on the test set.
# Otherwise it will use mentions from the train set to make predictions on the
# test set
self.test_path = test_path
# set of unique mentions in the dev set
self._dev_set_mentions = set()
self._build_dev_set_mentions()
# set of unique mentions in the train set
self._train_set_mentions = set()
self._build_train_set_mentions()
# set of unique mentions in the entire kb
self._all_mentions = set()
self._build_all_mentions()
# dictionary mapping dataset id to a set of mentions of that dataset
self._id_to_mentions = {}
self._build_id_to_mentions()
# set of english stopwords
self._stopwords = set(stopwords.words('english'))
# an instance of a scispacy parser
self._scispacy_parser = scispacy_util.SciSpaCyParser()
# dictionary mapping mention to the number of datasets it is a mention for
self._mention_dataset_count = {}
# the total number of datasets
self._dataset_count = 0
self._build_mention_dataset_count()
# precompile mention regexes
self._dataset_id_to_regexes = {}
for dataset_id in self._id_to_mentions:
compiled_res = []
for mention in self._id_to_mentions[dataset_id]:
mention_patterns = self._build_mention_patterns(mention)
for pattern in mention_patterns:
compiled_re = re.compile(pattern)
compiled_res.append(compiled_re)
self._dataset_id_to_regexes[dataset_id] = compiled_res
def compare_mention_lists(self):
"""Function to print out counts of unique mentions in train and
dev sets.
"""
print("Train:", len(self._train_set_mentions))
print("Dev:", len(self._dev_set_mentions))
print("Intersection:", len(self._train_set_mentions.intersection(self._dev_set_mentions)))
print("Train - Dev:", len(self._train_set_mentions - self._dev_set_mentions))
print("Dev - Train:", len(self._dev_set_mentions - self._train_set_mentions))
total_set = set()
for key in self._id_to_mentions:
for mention in self._id_to_mentions[key]:
total_set.add(mention)
print("Total:", len(total_set))
def _build_id_to_mentions(self):
"""Helper function to build the dictionary mapping dataset id
to sets of mentions of that dataset
"""
with open(self.kb_path) as kb_file:
json_kb = json.load(kb_file)
for dataset in json_kb:
dataset_id = dataset["data_set_id"]
name = dataset["name"]
uid = dataset["unique_identifier"]
mention_list = dataset["mention_list"]
self._id_to_mentions[dataset_id] = set()
# add uid as a "mention"
self._id_to_mentions[dataset_id].add(uid)
# add dataset name as a "mention"
self._id_to_mentions[dataset_id].add(name)
# add all actual mentions as mentions
for mention in mention_list:
if (mention in self._train_set_mentions) or (self.test_path != None and mention in self._dev_set_mentions):
self._id_to_mentions[dataset_id].add(mention)
def _build_dev_set_mentions(self):
"""Helper function to build the set of dev set mentions
"""
dev_labels_path = os.path.join(self.dev_path, "data_set_citations.json")
with open(dev_labels_path) as dev_labels_file:
json_dev_labels = json.load(dev_labels_file)
for pair in json_dev_labels:
for mention in pair["mention_list"]:
self._dev_set_mentions.add(mention)
def _build_train_set_mentions(self):
"""Helper funciton to build the set of train set mentions
"""
train_labels_path = os.path.join(self.train_path, "data_set_citations.json")
with open(train_labels_path) as train_labels_file:
json_train_labels = json.load(train_labels_file)
for pair in json_train_labels:
for mention in pair["mention_list"]:
self._train_set_mentions.add(mention)
def _build_all_mentions(self):
"""Helper funciton to build the set of all mentions in the kb
"""
with open(self.kb_path) as kb_file:
json_kb = json.load(kb_file)
for dataset in json_kb:
for mention in dataset["mention_list"]:
self._all_mentions.add(mention)
def _make_citation_dict(self, pub_id, dataset_id, mention_list, score):
"""Helper function to create the dictionary format that evaluation script expects
for one prediction
@param pub_id: the publication id for this publication - dataset pair
@param dataset_id: the dataset id for this publication - dataset pair
@param mention_list: a list of mention texts supporting this pair
@param score: the prediction score given to this prediction
"""
return {"publication_id": int(pub_id), "data_set_id": int(dataset_id), "mention_list": mention_list, "score": float(score)}
def _get_years_from_text(self, text):
"""Parses a set of candidate years included in text
@param text: the text to search for years in
"""
matches_year = re.findall(YEAR_PATTERN, text)
hyphens = r'(?:\-+|\—+|\-|\–|\—|\~)'
matches_range = re.findall(r"(?<!/)\d\d\d\d{h}\d\d\d\d(?!/)".format(h=hyphens), text)
years_found = set([int(match) for match in matches_year])
# also include all years in any range of years found
for year_range in matches_range:
try:
start, end = re.split(hyphens, year_range)
except:
print("Failed to split:", year_range)
continue
for year in range(int(start), int(end)+1):
years_found.add(year)
# filter candidates years to be between 1000 and 2019
filtered_years_found = set()
for year in years_found:
if not (year < 1000 or year > 2019):
filtered_years_found.add(year)
return filtered_years_found
def _build_mention_patterns(self, mention):
"""Builds patterns to search for in a text based on one mention
@param mention: the raw text of the mention to search for
"""
replaced_mention = mention
replaced_mention = self._scispacy_parser.preprocess_text(replaced_mention)
# replace tokens in the mention text with what they show up as in the actual text
replaced_mention = replaced_mention.replace("\\", "")
replaced_mention = replaced_mention.replace("\u2afd", " ")
replaced_mention = replaced_mention.replace("\u2013", "\xad")
replaced_mention = replaced_mention.replace("\ufb01", "fi")
replaced_mention = re.sub(r"(?<=\S)-\s", "", replaced_mention)
# commented out because it does not change performance on the test set, and makes the model 6x slower
# but I still think it might be correct and the test set just isn't representative of every scenario
# xad_1 = replaced_mention.replace("\xad-", "\xad")
# xad_2 = replaced_mention.replace("-\xad", "\xad")
# plural_mention = replaced_mention + "s"
# plural_xad_1 = xad_1 + "s"
# plural_xad_2 = xad_2 + "s"
# patterns_without_new_lines = [replaced_mention, xad_1, xad_2, plural_mention, plural_xad_1, plural_xad_2]
# build a regex pattern with an optional new line/space between each character to allow for mentions to be
# split over lines
patterns_without_new_lines = [replaced_mention]
patterns_with_new_lines = []
for pattern_without_new_line in patterns_without_new_lines:
pattern = r""
for c in pattern_without_new_line:
if c == " ":
pattern += r"[\n|\s]*"
else:
pattern += re.escape(c)
patterns_with_new_lines.append(pattern)
return patterns_with_new_lines
def _build_dataset_id_to_mention_list_in_text_v8(self, text):
"""Builds a dictionary mapping dataset id to a list of mentions of that dataset in a text
@param text: the text to search for mentions in
"""
dataset_to_mention = {}
self._document_count = 0
for dataset_id in self._dataset_id_to_regexes:
for regex in self._dataset_id_to_regexes[dataset_id]:
match = regex.search(text)
# string matching is significantly faster, but regex is required to allow for new
# lines between characters/words in a mention
# The commented out code below is the string matching version of this search
# if mention_pattern in text:
if match:
if dataset_id in dataset_to_mention:
dataset_to_mention[dataset_id].append(match.group(0))
# dataset_to_mention[dataset_id].append(mention_pattern)
else:
dataset_to_mention[dataset_id] = [match.group(0)]
# dataset_to_mention[dataset_id] = [mention_pattern]
return dataset_to_mention
def _build_dataset_id_to_mention_list_in_text(self, text):
"""Builds a dictionary mapping dataset id to a list of mentions of that dataset in a text
Note: this function just looks for the actual mention text without any augmentation,
as opposed to the above function that augments the raw mention text
@param text: the text to search for mentions in
"""
dataset_to_mention = {}
self._document_count = 0
for dataset_id in self._id_to_mentions:
for mention in self._id_to_mentions[dataset_id]:
mention = self._scispacy_parser.preprocess_text(mention)
if mention in text:
if dataset_id in dataset_to_mention:
dataset_to_mention[dataset_id].append(mention)
else:
dataset_to_mention[dataset_id] = [mention]
return dataset_to_mention
def _build_mention_dataset_count(self):
"""Builds a dictionary mapping mention text to the number of datasets it is a mention for
"""
with open(self.kb_path) as kb_file:
json_kb = json.load(kb_file)
for dataset in json_kb:
self._dataset_count += 1
dataset_id = dataset["data_set_id"]
name = dataset["name"]
uid = dataset["unique_identifier"]
mention_list = dataset["mention_list"]
for mention in mention_list:
if (mention in self._train_set_mentions) or (self.test_path != None and mention in self._dev_set_mentions):
if mention in self._mention_dataset_count:
self._mention_dataset_count[mention] += 1
else:
self._mention_dataset_count[mention] = 1
def filter_common_words_keep(self, mentions):
"""Returns True if the list of mentions has at least one mention that is not
a single, common, English word
@param mentions: the list of mentions to search over
"""
for mention in mentions:
# uses the scispacy parser vocab as proxy for "single, common, English word"
if not (mention in self._scispacy_parser.nlp.vocab):
return True
return False
def dataset_has_year_not_in_text(self, years_found, dataset):
"""Returns True if the input dataset's mentions have a year in them that is
not found in the text
@param years_found: set of years found in the text
@param dataset: dataset id of the dataset of interest
"""
for mention in self._id_to_mentions[dataset]:
years_in_mention = re.findall(YEAR_PATTERN, mention)
years_in_mention = [int(year) for year in years_in_mention]
for year in years_in_mention:
if year not in years_found:
return True
return False
def dataset_year_filter_keep(self, years_found, dataset):
"""More conservative version of the above function. Returns True if the input
dataset's mentions have a year in them that is not found in the text, and do
not have any years in them that are found in the text
@param years_found: set of years found in the text
@param dataset: dataset id of the dataset of interest
"""
bad_year_found = False
for mention in self._id_to_mentions[dataset]:
years_in_mention = re.findall(YEAR_PATTERN, mention)
years_in_mention = [int(year) for year in years_in_mention]
for year in years_in_mention:
if year in years_found:
return True
else:
bad_year_found = True
return not bad_year_found
def _filter_references_section(self, text):
"""Helper function to return the text with the references section stripped out.
It is probably not perfect, but it looks for the last instance of 'reference'
and removes all text after it
@param text: the text to filter the references section from
"""
references_pattern = r"(REFERENCE)|(reference)|(Reference)"
references_found = [i.start() for i in re.finditer(references_pattern, text)]
if references_found != []:
last_reference_index = references_found[-1]
return text[:last_reference_index]
else:
return text
def predict_v3(self, text_file_path):
"""Model v3: this version of the model does exact string matching for dataset names,
dataset uids, and mentions from the training set
@param text_file_path: path to the text file to predict for
"""
pub_id = text_file_path.split(".")[-2].split("/")[-1]
citation_list = []
with open(text_file_path) as text_file:
text = text_file.read()
for dataset_id in self._id_to_mentions:
for mention in self._id_to_mentions[dataset_id]:
if mention in text:
citation = self._make_citation_dict(pub_id, dataset_id, [mention], 1.0)
citation_list.append(citation)
break
return citation_list
def predict_v6(self, text_file_path):
"""Model v6: This version of the model does exact string matching like v3, plus
filters out datasets that do not mention any years from the text and do mention
a year not in the text, plus filters out datasets that are only supported by single,
common word mentions
@param text_file_path: path to the text file to predict for
"""
pub_id = text_file_path.split(".")[-2].split("/")[-1]
citation_list = []
with open(text_file_path) as text_file:
text = text_file.read()
text = self._scispacy_parser.preprocess_text(text)
dataset_to_mention = self._build_dataset_id_to_mention_list_in_text(text)
filtered_years_found = self._get_years_from_text(text)
for dataset in dataset_to_mention:
if self.dataset_year_filter_keep(filtered_years_found, dataset):
if self.filter_common_words_keep(dataset_to_mention[dataset]):
citation = self._make_citation_dict(pub_id, dataset, dataset_to_mention[dataset], 1.0)
citation_list.append(citation)
return citation_list
def predict_v8(self, text_file_path):
"""Model v8: This version of the model is v6, with some extra augmentations to the mentions
that are searched for in the text. These augmentations are based on findings when creating
the conll files
@param text_file_path: path to the text file to predict for
"""
pub_id = text_file_path.split(".")[-2].split("/")[-1]
citation_list = []
with open(text_file_path) as text_file:
text = text_file.read()
text = self._scispacy_parser.preprocess_text(text)
# the difference from v6 is here
dataset_to_mention = self._build_dataset_id_to_mention_list_in_text_v8(text)
filtered_years_found = self._get_years_from_text(text)
for dataset in dataset_to_mention:
if self.dataset_year_filter_keep(filtered_years_found, dataset):
if self.filter_common_words_keep(dataset_to_mention[dataset]):
citation = self._make_citation_dict(pub_id, dataset, dataset_to_mention[dataset], 1.0)
citation_list.append(citation)
return citation_list
def predict_v9(self, text_file_path):
"""Model v9: This version of the model is v8, pluse excluding datasets whose found mentions
are a subset of another dataset's found mentions
@param text_file_path: path to the text file to predict for
"""
pub_id = text_file_path.split(".")[-2].split("/")[-1]
citation_list = []
with open(text_file_path) as text_file:
text = text_file.read()
text = self._scispacy_parser.preprocess_text(text)
dataset_to_mention = self._build_dataset_id_to_mention_list_in_text_v8(text)
filtered_years_found = self._get_years_from_text(text)
filtered_dataset_to_mention = {}
for dataset in dataset_to_mention:
if self.dataset_year_filter_keep(filtered_years_found, dataset):
if self.filter_common_words_keep(dataset_to_mention[dataset]):
filtered_dataset_to_mention[dataset] = dataset_to_mention[dataset]
datasets_to_output = {}
for dataset_1 in filtered_dataset_to_mention:
better_dataset_found = False
for dataset_2 in filtered_dataset_to_mention:
if dataset_1 == dataset_2:
continue
else:
mention_list_1 = set(filtered_dataset_to_mention[dataset_1])
mention_list_2 = set(filtered_dataset_to_mention[dataset_2])
overlap_percent = len(mention_list_1.intersection(mention_list_2)) / float(len(mention_list_1))
if overlap_percent == 1.0 and len(mention_list_2) > len(mention_list_1):
better_dataset_found = True
if not better_dataset_found:
citation = self._make_citation_dict(pub_id, dataset_1, filtered_dataset_to_mention[dataset_1], 1.0)
citation_list.append(citation)
return citation_list
def predict_v10(self, text_file_path):
"""Model v10: This version of the model is v8 plus an attempt to not search for mentions
in the references section
@param text_file_path: path to the text file to predict for
"""
pub_id = text_file_path.split(".")[-2].split("/")[-1]
citation_list = []
with open(text_file_path) as text_file:
text = text_file.read()
text = self._scispacy_parser.preprocess_text(text)
text = self._filter_references_section(text)
dataset_to_mention = self._build_dataset_id_to_mention_list_in_text_v8(text)
filtered_years_found = self._get_years_from_text(text)
for dataset in dataset_to_mention:
if self.dataset_year_filter_keep(filtered_years_found, dataset):
if self.filter_common_words_keep(dataset_to_mention[dataset]):
citation = self._make_citation_dict(pub_id, dataset, dataset_to_mention[dataset], 1.0)
citation_list.append(citation)
return citation_list
def predict(self, text_file_path):
"""Predict datasets for one text file. Returns a list of citation
dictionaries.
@param text_file_path: path to the text file to predict for
"""
return self.predict_v10(text_file_path)
def predict_from_publications_list(self, publication_list, predict_path):
"""Predict datasets for a list of publications, with each publication
in the format provided in publications.json file.
@param publication_list: the result of json.load('publications.json')
"""
citation_list = []
for publication in tqdm(publication_list, desc='extract regex-based dataset candidates'):
predictions = self.predict(os.path.join(predict_path, "input", "files", "text", str(publication["publication_id"]) + ".txt"))
citation_list += predictions
return citation_list
def evaluate(self, predicted_citations_list, labels_path):
"""Run evaluation on the input predictions and labels. This is the same evaluation setup
as the organizers' evaluate script.
@param predicted_citations_list: a list of dictionaries in the correct output format for a citation
@param labels_path: the path to the json file of labels for the predictions being input
"""
if not os.path.isfile(labels_path):
return
with open(labels_path) as labels_json_file:
labels_json = json.load(labels_json_file)
predicted_set = set()
for prediction in predicted_citations_list:
predicted_set.add(str(prediction["publication_id"]) + ":" + str(prediction["data_set_id"]))
actual_set = set()
for actual in labels_json:
actual_set.add(str(actual["publication_id"]) + ":" + str(actual["data_set_id"]))
tp = len(actual_set.intersection(predicted_set))
fp = len(predicted_set - actual_set)
tn = 0
fn = len(actual_set - predicted_set)
accuracy = (tp+tn)/(tp+fp+tn+fn)
precision = tp/(fp+tp) if (tp+fp) != 0 else 1
recall = tp/(tp+fn)
f1 = (2 * precision * recall)/(precision + recall) if (precision + recall) != 0 else 0
# We expect these metrics to give the same results on the test set as ./rcc.sh evaluate
print("Precision:", precision)
print("Recall:", recall)
print("Accuracy:", accuracy)
print("F1:", f1)
def error_analysis(self, output_path, labels_path, pre_linking_candidates):
with open(labels_path) as labels_json_file:
labels_json = json.load(labels_json_file)
with open(output_path) as predictions_json_file:
predicted_citations_list = json.load(predictions_json_file)
kb_path = os.path.abspath(os.path.join("project", "data", "data_sets.json"))
with open(kb_path) as kb_file:
kb_json = json.load(kb_file)
dataset_id_to_kb_entry = {}
for dataset in kb_json:
dataset_id_to_kb_entry[dataset["data_set_id"]] = dataset
pub_id_to_pre_linking = {}
pub_id_to_predicted = {}
pub_id_to_actual = {}
pub_dataset_to_mention_list = {}
all_pub_ids = set()
for prediction in predicted_citations_list:
pub_id = prediction["publication_id"]
dataset_id = prediction["data_set_id"]
all_pub_ids.add(pub_id)
if pub_id in pub_id_to_predicted:
pub_id_to_predicted[pub_id].add(dataset_id)
else:
pub_id_to_predicted[pub_id] = set([dataset_id])
for actual in labels_json:
pub_id = actual["publication_id"]
dataset_id = actual["data_set_id"]
all_pub_ids.add(pub_id)
if pub_id in pub_id_to_actual:
pub_id_to_actual[pub_id].add(dataset_id)
else:
pub_id_to_actual[pub_id] = set([dataset_id])
for pre_linking in pre_linking_candidates:
pub_id = pre_linking["publication_id"]
dataset_id = pre_linking["data_set_id"]
all_pub_ids.add(pub_id)
pub_dataset_to_mention_list[str(pub_id) + "_" + str(dataset_id)] = pre_linking["mention_list"]
if pub_id in pub_id_to_pre_linking:
pub_id_to_pre_linking[pub_id].add(dataset_id)
else:
pub_id_to_pre_linking[pub_id] = set([dataset_id])
for pub_id in all_pub_ids:
if pub_id in pub_id_to_predicted:
predicted = pub_id_to_predicted[pub_id]
else:
predicted = set()
if pub_id in pub_id_to_actual:
actual = pub_id_to_actual[pub_id]
else:
actual = set()
if pub_id in pub_id_to_pre_linking:
pre_linking = pub_id_to_pre_linking[pub_id]
else:
pre_linking = set()
pre_linking_titles = [(dataset_id_to_kb_entry[dataset_id]["title"], dataset_id, pub_dataset_to_mention_list[str(pub_id) + "_" + str(dataset_id)]) for dataset_id in pre_linking]
predicted_titles = [(dataset_id_to_kb_entry[dataset_id]["title"], dataset_id, pub_dataset_to_mention_list[str(pub_id) + "_" + str(dataset_id)]) for dataset_id in predicted]
actual_titles = [(dataset_id_to_kb_entry[dataset_id]["title"], dataset_id) for dataset_id in actual]
print("Publication id:", pub_id)
print()
print("Pre linking:", pre_linking_titles)
print()
print("Post linking:", predicted_titles)
print()
print("Actual:", actual_titles)
print()
def main():
# keeping main function around to use for debugging
train_path = os.path.abspath(os.path.join("project", "data", "train"))
dev_path = os.path.abspath(os.path.join("project", "data", "dev"))
kb_path = os.path.abspath(os.path.join("project", "data", "data_sets.json"))
test_path = os.path.abspath(os.path.join("data"))
model = RuleBasedModel(train_path, dev_path, kb_path, test_path)
model.compare_mention_lists()
ex_file_path = os.path.join(os.getcwd(), "project/data/test/input/files/text/143.txt")
predictions = model.predict(ex_file_path)
print(len(predictions))
print(predictions)
# publications_path = os.path.abspath(os.path.join("data", "input", "publications.json"))
# with open(publications_path) as publications_file:
# json_publications = json.load(publications_file)
# citation_list = model.predict_from_publications_list(json_publications)
# labels_path = os.path.abspath(os.path.join("rich-context-competition", "evaluate", "data_set_citations.json"))
# model.evaluate(citation_list, labels_path)
if __name__ == "__main__":
main()
| coleridge-rich-context-ai2-master | project/rule_based_model.py |
"""This file can be run to create the dataset linking dataset in the format expected by the
Structured Gradient Tree Boosting model. It assumes that there are train/dev/test folders
in project/data. We use this dataset for other downstream models as well, to minimize the
number of times this dataset is regenerated. The create_dataset_input function can be used
with any list of candidates.
"""
import json
import os
import numpy as np
from sklearn.externals import joblib
from s2base import scispacy_util
from spacy.matcher import Matcher
from spacy.vocab import Vocab
from spacy.tokens import Doc
from tqdm import tqdm
import re
import scipy
import math
from nltk.corpus import stopwords
import argparse
YEAR_PATTERN = r"((?<![.|\d])\d\d\d\d(?![\d]))"
SECTION_STRINGS = ["background", "methods", "results", "abstract", "intro", "introduction",
"keywords", "objectives", "conclusion", "measures", "discussion", "method",
"references", "contribution", "data"]
CONTEXT_WORDS = ["data", "information", "respondents", "survey"]
NLTK_STOPWORDS = stopwords.words("english")
def get_scispacy_doc(data_folder_path, publication_id, scispacy_parser):
"""Get spacy doc if cached, otherwise create it
@param data_folder_path: path to the data folder
@param publication_id: the publication id of the doc to load/create
@param scispacy_parser: the SciSpaCyParser instance to use
"""
spacy_doc_dir = os.path.join(data_folder_path, "spacy_tokenized_texts")
# disabled caching because it wasn't working
#if not os.path.exists(spacy_doc_dir):
# os.makedirs(spacy_doc_dir)
spacy_doc_path = os.path.join(spacy_doc_dir, str(publication_id) + ".spacy")
# try to get the spacy document from the cached location, otherwise create it
# this is useful because processing a document with spacy is slow
doc = None
if os.path.exists(spacy_doc_dir):
try:
doc = Doc(scispacy_parser.nlp.vocab).from_disk(spacy_doc_path)
except:
pass
if not doc:
with open(os.path.join(data_folder_path, "input", "files", "text", str(publication_id) + ".txt"), mode='rt') as txt_fp:
pub_text = txt_fp.read()
doc = scispacy_parser.scispacy_create_doc(pub_text)
# disabled caching because it wasn't workin
#doc.to_disk(spacy_doc_path)
return doc
def get_years_from_text(text):
"""Parses a set of candidate years included in text
@param text: the text to search for years in
"""
matches_year = re.findall(YEAR_PATTERN, text)
hyphens = r'(?:\-+|\—+|\-|\–|\—|\~)'
matches_range = re.findall(r"(?<!/)\d\d\d\d{h}\d\d\d\d(?!/)".format(h=hyphens), text)
years_found = set([int(match) for match in matches_year])
# also include all years in any range of years found
for year_range in matches_range:
try:
start, end = re.split(hyphens, year_range)
except:
print("Failed to split:", year_range)
continue
for year in range(int(start), int(end)+1):
years_found.add(year)
# filter candidates years to be between 1000 and 2019
filtered_years_found = set()
for year in years_found:
if not (year < 1000 or year > 2019):
filtered_years_found.add(year)
return filtered_years_found
def compute_entity_probabilities():
"""Computes p(dataset) based on the training set
"""
train_path = os.path.abspath(os.path.join("project", "data", "train"))
train_labels_path = os.path.join(train_path, "data_set_citations.json")
with open(train_labels_path) as train_labels_file:
train_labels_json = json.load(train_labels_file)
dataset_id_to_count = {}
set_of_docs = set()
for label in train_labels_json:
dataset_id = label["data_set_id"]
publication_id = label["publication_id"]
if dataset_id in dataset_id_to_count:
dataset_id_to_count[dataset_id] += 1
else:
dataset_id_to_count[dataset_id] = 1
set_of_docs.add(publication_id)
total_docs = len(set_of_docs)
# normalize at log scale to highlight differences between datasets that appear once or twice, and datasets that appear zero times
normalized_dataset_id_to_count = {dataset_id: -1*np.log(dataset_id_to_count[dataset_id]/total_docs) for dataset_id in dataset_id_to_count}
return normalized_dataset_id_to_count
def compute_entity_given_mention_probs():
"""Computes p(dataset|mention) based on the training set
"""
train_path = os.path.abspath(os.path.join("project", "data", "train"))
train_labels_path = os.path.join(train_path, "data_set_citations.json")
with open(train_labels_path) as train_labels_file:
train_labels_json = json.load(train_labels_file)
mention_to_entity_to_count = {}
for label in train_labels_json:
dataset_id = label["data_set_id"]
publication_id = label["publication_id"]
mention_list = label["mention_list"]
for mention in mention_list:
if mention in mention_to_entity_to_count:
if dataset_id in mention_to_entity_to_count[mention]:
mention_to_entity_to_count[mention][dataset_id] += 1
else:
mention_to_entity_to_count[mention][dataset_id] = 1
else:
mention_to_entity_to_count[mention] = {dataset_id: 1}
normalized_mention_to_entity_to_count = {}
for mention in mention_to_entity_to_count:
# normalize entity probabilities for each mention text
entity_to_count = mention_to_entity_to_count[mention]
total_count = sum([entity_to_count[entity] for entity in entity_to_count])
normalized_entity_to_count = {entity: entity_to_count[entity]/total_count for entity in entity_to_count}
normalized_mention_to_entity_to_count[mention] = normalized_entity_to_count
return normalized_mention_to_entity_to_count
def compute_mention_given_entity_probs():
"""Computes p(mention|dataset) based on the training set
"""
train_path = os.path.abspath(os.path.join("project", "data", "train"))
train_labels_path = os.path.join(train_path, "data_set_citations.json")
with open(train_labels_path) as train_labels_file:
train_labels_json = json.load(train_labels_file)
entity_to_mention_to_count = {}
for label in train_labels_json:
dataset_id = label["data_set_id"]
publication_id = label["publication_id"]
mention_list = label["mention_list"]
for mention in mention_list:
if dataset_id in entity_to_mention_to_count:
if mention in entity_to_mention_to_count[dataset_id]:
entity_to_mention_to_count[dataset_id][mention] += 1
else:
entity_to_mention_to_count[dataset_id][mention] = 1
else:
entity_to_mention_to_count[dataset_id] = {mention: 1}
normalized_entity_to_mention_to_count = {}
for entity in entity_to_mention_to_count:
# normalize mention probabilities for each dataset id
mention_to_count = entity_to_mention_to_count[entity]
total_count = sum([mention_to_count[mention] for mention in mention_to_count])
normalized_mention_to_count = {mention: mention_to_count[mention]/total_count for mention in mention_to_count}
normalized_entity_to_mention_to_count[entity] = normalized_mention_to_count
return normalized_entity_to_mention_to_count
def year_match_nearby(mention_contexts, kb_entry):
"""Searches the mention contexts for a year-like string in the title of dataset.
Returns 0 if there are no years in the dataset title, otherwise 1 for a match
and -1 for no match.
@param mention_contexts: list of mention contexts
@param kb_entry: the dataset entry from the knowledge base
"""
feature_value = None
years_in_dataset_name = get_years_from_text(kb_entry["title"])
if len(years_in_dataset_name) == 0:
return 0
for context in mention_contexts:
years_in_context = set()
for sentence in context[0]:
years_in_sentence = get_years_from_text(sentence.text)
years_in_context = years_in_context.union(years_in_sentence)
if len(years_in_context.intersection(years_in_dataset_name)) > 0:
return 1
return -1
def get_contextual_similarity(candidate_dataset_id,
kb_entry,
mention_contexts,
scispacy_parser,
glove):
"""Computes contextual similarity scores between the candidate dataset description and
the mention contexts using glove embeddings and cosine similarity.
@param candidate_dataset_id: the id of the candidate dataset
@param kb_entry: the knowledge base entry for the candidate dataset
@param mention_contexts: a list of mention contexts to compute similarity over
@param scispacy_parser: a scispacy parser
@param glove: a dictionary of glove word embeddings
"""
glove_dim = 50
bins = np.linspace(0, 1, 11)
num_bins = bins.shape[0]
description = kb_entry["description"]
if description == "":
return [0]*num_bins, [0]*num_bins
description = scispacy_parser.scispacy_create_doc(description)
# try both max pooling and average pooling of word embeddings to get sentence representation
embedded_description_max = []
embedded_description_avg = []
for sentence in description.sents:
tokens = [t.text.lower() for t in sentence]
glove_tokens = [t for t in tokens if t in glove]
embedded_sentence = [np.linalg.norm(glove[t], ord=2) for t in glove_tokens if t not in NLTK_STOPWORDS]
# embedded_sentence = [embedding*idf_dict[t] if t in idf_dict else embedding*idf_dict["<MAX_VALUE>"] for embedding, t in zip(embedded_sentence, glove_token)]
last_embedding_layer = embedded_sentence
if last_embedding_layer == []:
continue
embedded_description_max.append(np.max(last_embedding_layer, axis=0))
embedded_description_avg.append(np.mean(last_embedding_layer, axis=0))
# try both max pooling and average pooling of word embeddings to get sentence representation
embedded_contexts_max = []
embedded_contexts_avg = []
for context in mention_contexts:
embedded_context_max = []
embedded_context_avg = []
for sentence in context[0]:
tokens = [t.text.lower() for t in sentence]
glove_tokens = [t for t in tokens if t in glove]
embedded_sentence = [np.linalg.norm(glove[t], ord=2) for t in glove_tokens if t not in NLTK_STOPWORDS]
# embedded_sentence = [embedding*idf_dict[t] if t in idf_dict else embedding*idf_dict["<MAX_VALUE>"] for embedding, t in zip(embedded_sentence, glove_token)]
last_embedding_layer = embedded_sentence
if last_embedding_layer == []:
continue
embedded_context_max.append(np.max(last_embedding_layer, axis=0))
embedded_context_avg.append(np.mean(last_embedding_layer, axis=0))
embedded_contexts_max.append(embedded_context_max)
embedded_contexts_avg.append(embedded_context_avg)
cosine_distances_max = []
cosine_distances_avg = []
for context_max, context_avg in zip(embedded_contexts_max, embedded_contexts_avg):
for sentence_max, sentence_avg in zip(context_max, context_avg):
for description_max, description_avg in zip(embedded_description_max, embedded_description_avg):
max_cosine = scipy.spatial.distance.cosine(sentence_max, description_max)
avg_cosine = scipy.spatial.distance.cosine(sentence_avg, description_avg)
if not math.isnan(max_cosine):
cosine_distances_max.append(max_cosine)
if not math.isnan(avg_cosine):
cosine_distances_avg.append(avg_cosine)
# bin the similarity scores of description sentence and context sentence pairs
digitized_max = np.digitize(cosine_distances_max, bins)
digitized_avg = np.digitize(cosine_distances_avg, bins)
binned_max = [0]*num_bins
binned_avg = [0]*num_bins
# use a one hot representation with a one for the largest similarity bin that has a pair in it
binned_max[max(digitized_max)-1] = 1
binned_avg[max(digitized_avg)-1] = 1
return binned_max, binned_avg
def max_min_sentence_length(mention_contexts):
"""Get the max and min lengths of the sentence in which the mention text was found
@param mention_contexts: a list of mention contexts
"""
max_len = 0
min_len = float('inf')
for context in mention_contexts:
# select the sentence in the context that contains the mention text
sentence_idx = context[3]
sentence = context[0][sentence_idx]
if len(sentence) > max_len:
max_len = len(sentence)
if len(sentence) < min_len:
min_len = len(sentence)
return max_len, min_len
def get_section_features(spacy_doc, mention_context, section_matcher):
"""Get a one hot representation of which of a set of predefined section headers is
closest before the first instance of the mention text. Each feature maps to a particular
section header, except the last feature, which indicates that no section header was found
prior to the mention text.
@param spacy_doc: a spacy doc representation of the publication
@param mention_context: a tuple representing the mention context
@param section_matcher: a spacy matcher to match section headers followed by
new lines or colons
"""
start_token_idx = mention_context[2][0]
if start_token_idx == 0:
return [0]*len(SECTION_STRINGS) + [1]
doc_to_search = spacy_doc[:start_token_idx]
matches = list(section_matcher(doc_to_search.as_doc()))
if matches == []:
return [0]*len(SECTION_STRINGS) + [1]
matches = sorted(matches, key=lambda match: match[1], reverse=True)
closest_match = matches[0]
closest_section = section_matcher.vocab.strings[closest_match[0]]
features = [0]*len(SECTION_STRINGS) + [0]
features[SECTION_STRINGS.index(closest_section)] = 1
return features
def context_word_overlap_count(mention_contexts, kb_entry):
"""Returns the count of overlapping words between mention contexts and the subjects
field of the dataset's knowledge base entry
@param mention_contexts: a list of mention contexts
@param kb_entry: the knowledge base entry for the candidate dataset
"""
subjects = re.split(",| ", kb_entry["subjects"])
subjects = set([subject.lower() for subject in subjects])
subjects.update(CONTEXT_WORDS)
total_count = 0
for context in mention_contexts:
for sentence in context[0]:
tokens = set([t.text.lower() for t in sentence])
total_count += len(subjects.intersection(tokens))
return total_count
def featurize_candidate_datasets(is_test,
row,
gold_dataset_id,
prior_entity_probs,
entity_to_prob,
prior_mention_given_entity_probs,
mention_text,
dataset_id_to_kb_entry,
mention_contexts,
scispacy_parser,
glove,
spacy_doc,
section_matcher):
"""Featurizes the list of dataset candidates, adding a null candidate, and outputs
the expected format for sgtb
@param is_test: if test data is being processed
@param row: the row from the rule based candidates
@param gold_dataset_id: the id of hte gold dataset for this mention
@param prior_entity_probs: dictionary mapping dataset id to empirical probability
@param entity_to_prob: dictionary containing dataset id to p(dataset id | mention)
@param prior_mention_given_entity_probs: dictionary containing p(mention|dataset)
@param mention_text: the text of the mention
@param dataset_id_to_kb_entry: dictionary mapping dataset id to its entry in the knowledge base
@param mention_contexts: list of mention contexts for this mention
@param scispacy_parser: a scispacy parser
@param glove: a dictionary of glove embeddings
@param spacy_doc: a spacy tokenized doc
@param section_matcher: a spacy matcher to match section headers followed by
new lines or colons
"""
candidate_datasets = []
for candidate_dataset_id in row["candidate_dataset_ids"]:
candidate_dataset_id = str(candidate_dataset_id)
label = int(candidate_dataset_id == gold_dataset_id)
if int(candidate_dataset_id) in prior_entity_probs:
prior_entity_probability = prior_entity_probs[int(candidate_dataset_id)]
else:
prior_entity_probability = 0
if int(candidate_dataset_id) in entity_to_prob:
prior_entity_given_mention_prob = entity_to_prob[int(candidate_dataset_id)]
else:
prior_entity_given_mention_prob = 0
if int(candidate_dataset_id) in prior_mention_given_entity_probs:
if mention_text in prior_mention_given_entity_probs[int(candidate_dataset_id)]:
prior_mention_given_entity_prob = prior_mention_given_entity_probs[int(candidate_dataset_id)][mention_text]
else:
prior_mention_given_entity_prob = 0
else:
prior_mention_given_entity_prob = 0
year_match = year_match_nearby(mention_contexts, dataset_id_to_kb_entry[int(candidate_dataset_id)])
# the contextual similarity features were not found to improve performance and are slow to compute
# contextual_similarity = get_contextual_similarity(candidate_dataset_id,
# dataset_id_to_kb_entry[int(candidate_dataset_id)],
# mention_contexts,
# scispacy_parser,
# glove)
is_null_candidate = 0
mention_length_chars = len(mention_text)
mention_length_tokens = len(scispacy_parser.scispacy_create_doc(mention_text))
max_len, min_len = max_min_sentence_length(mention_contexts)
is_acronym = int(mention_text.isupper() and len(mention_text.split(' ')) == 1)
section_features = get_section_features(spacy_doc, mention_contexts[0], section_matcher)
context_word_overlap = context_word_overlap_count(mention_contexts, dataset_id_to_kb_entry[int(candidate_dataset_id)])
# avoid log(0)
context_word_overlap += 1
context_word_overlap = np.log(context_word_overlap)
# tfidf score for the ner produced candidates
if "score" in row:
score = row["score"]
else:
score = -1
feats = [prior_entity_probability,
prior_entity_given_mention_prob,
prior_mention_given_entity_prob,
year_match,
mention_length_chars,
mention_length_tokens,
max_len,
min_len,
is_acronym] + \
section_features + \
[context_word_overlap,
score] + \
[is_null_candidate]
# contextual_similarity[0] + \
# contextual_similarity[1]
candidate_datasets.append([(str(candidate_dataset_id), label), feats])
# add a null candidate to every list of entity candidates for SGTB training
null_is_null_candidate = 1
if is_test:
null_label = 0
else:
null_label = int(gold_dataset_id == "NULL")
null_feats = [0]*(len(feats)-1) + [null_is_null_candidate]
null_candidate = [("NULL", null_label), null_feats]
candidate_datasets.append(null_candidate)
return candidate_datasets
def create_output_mention(is_test,
row,
prior_entity_probs,
prior_entity_given_mention_probs,
mention_text,
prior_mention_given_entity_probs,
dataset_id_to_kb_entry,
mention_contexts,
scispacy_parser,
glove,
spacy_doc,
section_matcher):
"""Creates the output format and features for a single mention and its candidates
@param is_test: if test data is being processed
@param row: the row from the rule based candidates
@param prior_entity_probs: dictionary mapping dataset id to empirical probability
@param prior_entity_given_mention_probs: dictionary containing p(dataset|mention)
@param mention_text: the text of the mention
@param prior_mention_given_entity_probs: dictionary containing p(mention|dataset)
@param dataset_id_to_kb_entry: dictionary mapping dataset id to its entry in the knowledge base
@param mention_contexts: list of mention contexts for this mention
@param scispacy_parser: a scispacy parser
@param glove: a dictionary of glove embeddings
@param spacy_doc: a spacy tokenized doc
@param section_matcher: a spacy matcher to match section headers followed by
new lines or colons
"""
if not is_test:
gold_dataset_id = str(row["dataset_id"])
else:
gold_dataset_id = "NULL"
# Note: dummy values to match SGTB expected format, but it is not actually used anywhere
offset = [1, 2]
if mention_text in prior_entity_given_mention_probs:
entity_to_prob = prior_entity_given_mention_probs[mention_text]
else:
entity_to_prob = {}
candidate_datasets = featurize_candidate_datasets(is_test,
row,
gold_dataset_id,
prior_entity_probs,
entity_to_prob,
prior_mention_given_entity_probs,
mention_text,
dataset_id_to_kb_entry,
mention_contexts,
scispacy_parser,
glove,
spacy_doc,
section_matcher)
output_mention = [(mention_text, offset, gold_dataset_id), candidate_datasets]
return output_mention
def create_idf_weighting(scispacy_parser):
"""Build a dictionary of inverse document frequency weights for all tokens in training set
@param scispacy_parser: a scispacy parser
"""
data_folder_path = os.path.abspath(os.path.join("project", "data", "train"))
train_path = os.path.abspath(os.path.join("project", "data", "train", "input", "files", "text"))
token_dict = {}
num_docs = 0
for txt_file_name in tqdm(os.listdir(train_path), desc='create idf weights in create_sgtb_dataset.py'):
pub_id = txt_file_name.split(".")[0]
spacy_doc = get_scispacy_doc(data_folder_path, pub_id, scispacy_parser)
tokens_set = set([t.text for t in spacy_doc])
for t in tokens_set:
if t in token_dict:
token_dict[t] += 1
else:
token_dict[t] = 1
num_docs += 1
idf_dict = {t: np.log(num_docs/(token_dict[t])) for t in token_dict}
idf_dict["<MAX_VALUE>"] = max(idf_dict.values())
return idf_dict
def create_dataset_input(rule_based_candidates, mention_context_cache_path, data_folder_path, overall_output_path=None, is_test=False, output_path=None, overwrite_dataset=False):
"""Function to take in the rule based candidates and create
the input format for the SGTB model. This function is intended
to be used for processing test data, as the main function in
this file will convert and save train, dev, and test output.
@param rule_based_candidates: a list of candidates from the rule based model
@param mention_context_cache_path: path to a dictionary mapping <pub_id>:<mention_text> pairs to all contexts
@param data_folder_path: path to the data folder
@param overall_output_path: path to the overall output folder (optional, used for SGTB training)
@param is_test: parameter indicating whether or not the data being processed is test data
@param output_path: the path to write the output to (if not processing test data)
@param overwrite_dataset: whether or not to overwrite the existing dataset (will be true for train
and false for dev and test)
"""
scispacy_parser = scispacy_util.SciSpaCyParser()
prior_entity_probs = compute_entity_probabilities()
prior_entity_given_mention_probs = compute_entity_given_mention_probs()
prior_mention_given_entity_probs = compute_mention_given_entity_probs()
glove_path = os.path.abspath(os.path.join("project", "data", "glove.6B.50d.txt"))
with open(glove_path, "r") as lines:
glove = {line.split()[0]: np.array([float(value) for value in line.split()[1:]])
for line in lines}
# I haven't run the experiments to tell if having a cache actually helps or not, it takes a while to load
# the cache when it is used
# if is_test:
# mention_context_cache = {}
# else:
# try:
# print("Loading cache...")
# mention_context_cache = joblib.load(mention_context_cache_path)["cache"]
# print("Cache loaded...")
# except:
# mention_context_cache = {}
mention_context_cache = {}
kb_path = os.path.abspath(os.path.join("project", "data", "data_sets.json"))
with open(kb_path) as kb_file:
kb_json = json.load(kb_file)
dataset_id_to_kb_entry = {}
for dataset in kb_json:
dataset_id_to_kb_entry[dataset["data_set_id"]] = dataset
matcher = Matcher(scispacy_parser.nlp.vocab)
section_matcher = Matcher(scispacy_parser.nlp.vocab)
for section_name in SECTION_STRINGS:
section_matcher.add(section_name, None, [{"LOWER": section_name}, {"ORTH": "\n"}],
[{"LOWER": section_name}, {"ORTH": ":"}],
[{"ORTH": "\n"}, {"LOWER": section_name}, {"ORTH": "."}])
output_docs = []
pub_ids = []
# we will write a new file on the first document, and amend to it afterwards
first_doc = True
cache_changed = False
for pub_id in tqdm(rule_based_candidates, desc='create dataset in create_sgtb_dataset.py'):
spacy_doc = get_scispacy_doc(data_folder_path, pub_id, scispacy_parser)
pub_ids.append(pub_id)
doc_candidates = rule_based_candidates[pub_id]
output_doc = []
dataset_id_to_longest_mention_text = {}
for row in doc_candidates:
mention_text = row["mention"]
dataset_id = row["candidate_dataset_ids"][0]
if dataset_id in dataset_id_to_longest_mention_text:
if len(mention_text) > len(dataset_id_to_longest_mention_text[dataset_id]):
dataset_id_to_longest_mention_text[dataset_id] = mention_text
else:
dataset_id_to_longest_mention_text[dataset_id] = mention_text
for row in doc_candidates:
mention_text = row["mention"]
dataset_id = row["candidate_dataset_ids"][0]
# if mention_text != dataset_id_to_longest_mention_text[dataset_id]:
# continue
mention_context_cache_key = str(pub_id) + "_" + mention_text
if mention_context_cache_key in mention_context_cache:
mention_contexts = mention_context_cache[mention_context_cache_key]
else:
# search for the mention text in the doc
spacy_mention_text = scispacy_parser.scispacy_create_doc(mention_text)
pattern = []
for token in spacy_mention_text:
pattern.append({"ORTH": token.text})
matcher.add("MENTION", None, pattern)
matches = list(matcher(spacy_doc))
# build and save a mapping of <pub_id>_<mention_text> to all contexts the mention
# is found in
cache_changed = True
mention_contexts = []
token_idx_to_sent_idx = {}
sentences_list = list(spacy_doc.sents)
context_size = 3
for sent_idx, sent in enumerate(sentences_list):
for token in sent:
token_idx = token.i
token_idx_to_sent_idx[token_idx] = sent_idx
for match_id, start, end in matches:
sentence_idx = token_idx_to_sent_idx[start]
start_context_sent_idx = max(0, sentence_idx-context_size)
if start_context_sent_idx == 0:
match_sentence_idx = sentence_idx
else:
match_sentence_idx = context_size
end_context_sent_idx = min(len(sentences_list), sentence_idx+context_size)
mention_context = sentences_list[start_context_sent_idx:end_context_sent_idx+1]
sentences_as_docs = []
for sentence in mention_context:
sentences_as_docs.append(sentence.as_doc())
start_context_token_idx = sentences_list[start_context_sent_idx].start
end_context_token_idx = sentences_list[end_context_sent_idx-1].end
context_with_offsets = (sentences_as_docs, (start_context_token_idx, end_context_token_idx), (start, end), match_sentence_idx)
mention_contexts.append(context_with_offsets)
# limit featurizing to first 3 contexts in order of appearance
mention_contexts = mention_contexts[:3]
mention_context_cache[mention_context_cache_key] = mention_contexts
matcher.remove("MENTION")
if mention_contexts != []:
output_mention = create_output_mention(is_test,
row,
prior_entity_probs,
prior_entity_given_mention_probs,
mention_text,
prior_mention_given_entity_probs,
dataset_id_to_kb_entry,
mention_contexts,
scispacy_parser,
glove,
spacy_doc,
section_matcher)
output_doc.append(output_mention)
# only write output to file if not processing test data
if not is_test:
if first_doc:
with open(output_path, "w") as output_file:
json.dump(output_doc, output_file)
output_file.write("\n")
first_doc = False
if overwrite_dataset:
with open(overall_output_path, "w") as overall_output_file:
json.dump(output_doc, overall_output_file)
overall_output_file.write("\n")
else:
with open(output_path, "a") as output_file:
json.dump(output_doc, output_file)
output_file.write("\n")
with open(overall_output_path, "a") as overall_output_file:
json.dump(output_doc, overall_output_file)
overall_output_file.write("\n")
output_docs.append(json.loads(json.dumps(output_doc)))
# if cache_changed and not is_test:
# joblib.dump({"cache": mention_context_cache}, mention_context_cache_path)
return output_docs, pub_ids
def create_dataset(rule_candidate_path, data_folder_path, overall_output_path, output_path, overwrite_dataset=False):
"""Function to take in the rule based candidates and write the input format
for SGTB to a file
@param rule_candidate_path: a path to the rule based candidates json file
@param data_folder_path: path to the data folder
@param overall_output_path: path to the overall output folder
@param output_path: the path to write the output to (if not processing test data)
@param overwrite_dataset: whether or not to overwrite the existing dataset (will be true for train
and false for dev and test)
"""
mention_context_cache_path = os.path.join(data_folder_path, "mention_context_cache.pkl")
with open(rule_candidate_path) as candidate_file:
rule_candidate_json = json.load(candidate_file)
candidates = rule_candidate_json
create_dataset_input(candidates, mention_context_cache_path, data_folder_path, overall_output_path, is_test=False, output_path=output_path, overwrite_dataset=overwrite_dataset)
def main(dataset_root: str, output_root: str, overall_output_path: str):
train_data_folder_path = os.path.join(dataset_root, "train")
train_candidate_path = os.path.join(dataset_root, "train", "all_candidates_scores.json")
train_output_path = os.path.join(output_root, "sgtb_train_scores.json")
dev_data_folder_path = os.path.join(dataset_root, "dev")
dev_candidate_path = os.path.join(dataset_root, "dev", "all_candidates_scores.json")
dev_output_path = os.path.join(output_root, "sgtb_dev_scores.json")
test_data_folder_path = os.path.join(dataset_root, "test")
test_candidate_path = os.path.join(dataset_root, "test", "all_candidates_scores.json")
test_output_path = os.path.join(output_root, "sgtb_test_scores.json")
create_dataset(train_candidate_path, train_data_folder_path, overall_output_path, output_path=train_output_path, overwrite_dataset=True)
create_dataset(dev_candidate_path, dev_data_folder_path, overall_output_path, output_path=dev_output_path)
create_dataset(test_candidate_path, test_data_folder_path, overall_output_path, output_path=test_output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset_root',
)
parser.add_argument(
'--output_root'
)
parser.add_argument(
'--overall_output_path'
)
args = parser.parse_args()
main(args.dataset_root, args.output_root, args.overall_output_path)
| coleridge-rich-context-ai2-master | project/create_sgtb_dataset.py |
"""This file contains a class that can be used to predict dataset mentions using a trained
named entity recognition (NER) model
"""
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from ner_rcc.rcc_ner import RccNerDatasetReader
from allennlp.common.params import Params
import torch.nn.functional as F
import torch
from tqdm import tqdm
import os
class NerModel():
"""Class for making predictions of dataset mentions using the NER model from AllenNLP
"""
def __init__(self, data_path, model_path):
# the path to the ner-conll folder of publications to predict on, this should generated
# before using the ner model. This can be done by calling to_conll_test.py
self.test_path = data_path
self.model_path = model_path
def predict_from_publication_list(self):
citation_list = []
filenum_mention_set = set()
archive = load_archive(
self.model_path
)
dataset_reader = RccNerDatasetReader.from_params(Params({
"coding_scheme": "BIOUL",
"cutoff_sentence_length": 50,
"filter_sections": False ,
"percent_negatives": 100,
"tag_label": "ner",
"token_indexers": {
"token_characters": {
"type": "characters",
"character_tokenizer": {
"end_tokens": [
"@@PADDING@@",
"@@PADDING@@",
"@@PADDING@@"
]
}
},
"tokens": {
"type": "single_id",
"lowercase_tokens": 'false'
}
}
}))
predictor = Predictor.from_archive(archive)
for filename in tqdm(os.listdir(self.test_path), desc='extract dataset candidates with NER'):
filenum = filename.replace("_extraction.conll", "")
# using the AllenNLP command line tool to predict
instances = dataset_reader.read(f'{self.test_path}/{filename}')
mention = ""
prob = -1
for batch in range(0, len(instances), 16):
instance_batch = instances[batch:min(batch+16, len(instances))]
predicted_batch = predictor.predict_batch_instance(instance_batch)
for instance, prediction in zip(instance_batch, predicted_batch):
for tag, word, logit in zip(prediction['tags'], prediction['words'], prediction['logits']):
# build up a mention based on BILOU tags and add to citation list
# when another O tag is found
if tag == 'O' and mention:
set_key = filenum + "_" + mention.rstrip()
if set_key not in filenum_mention_set:
citation_list.append({
'publication_id': int(filenum),
'mention': mention.rstrip(),
'score': prob,
'instance': [t.text for t in instance["tokens"].tokens]
})
filenum_mention_set.add(set_key)
mention = ""
prob = -1
elif tag != 'O':
if prob == -1:
probs = F.softmax(torch.tensor(logit), dim=-1)
prob_tensor, _ = torch.max(probs, 0)
prob = prob_tensor.data.item()
mention += str(word)
mention += " "
return citation_list
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # only difference
def main():
# main function for debugging purposes
model = NerModel("/data/ner-conll")
model.predict_from_publication_list()
if __name__ == "__main__":
main()
| coleridge-rich-context-ai2-master | project/ner_model.py |
"""This file can be run to create the rule based dataset candidates. It assumes that
there are train/dev/test folders in project/data. The create_rule_based_input function
can be used to convert a citation list (the competition output format) to the format expected
by the next step of the system. The generate_rule_based_dataset function can be used to produce
the rule based candidates for any publications list.
"""
from s2base.scispacy_util import SciSpaCyParser
import os
import json
import rule_based_model
import argparse
import project
def create_rule_based_input(citation_list, gold_labels_set=None):
"""Creates the expected input format for the rule based candidates from a citation list
output of the rule based model. This function is intended to be used for processing
test data, as the main function in this file will convert and save train, dev, and test
output
@param citation_list: a citation list formatted in the contest output format
@param gold_labels_set: (optional) the set of <pub_id>_<dataset_id> pairs that are correct,
should not be passed in at test time
"""
output = {}
for citation in citation_list:
pub_id = citation["publication_id"]
dataset_id = citation["data_set_id"]
mention_list = citation["mention_list"]
if "score" in citation:
score = citation["score"]
else:
score = -1
for mention in mention_list:
if gold_labels_set != None:
gold_entity = "NULL"
entity_key = str(pub_id) + "_" + str(dataset_id)
if entity_key in gold_labels_set:
gold_entity = dataset_id
row_to_add = {"mention": mention, "dataset_id": gold_entity, "candidate_dataset_ids": [dataset_id], "score": score}
else:
# if processing test data, do not include the gold label
row_to_add = {"mention": mention, "candidate_dataset_ids": [dataset_id], "score": score}
if pub_id in output:
output[pub_id].append(row_to_add)
else:
output[pub_id] = [row_to_add]
return output
def generate_labels_based_dataset(data_folder_path, gold_labels_path):
"""Create and save the linking dataset for the labels file. This dataset
is a dictionary mapping publication id to a list of mentions and the dataset
they are labeled as.
Note: The context for all the occurrences of each mention will be genereated
(and cached) as needed in a later step
@param: data_folder_path: path to the data folder to run on (train, dev, or test)
@param: gold_labels_path: path to the labels file to use in generating this dataset
"""
with open(gold_labels_path) as gold_labels_file:
gold_labels_json = json.load(gold_labels_file)
output = {}
for gold_label in gold_labels_json:
pub_id = gold_label["publication_id"]
dataset_id = gold_label["data_set_id"]
mention_list = gold_label["mention_list"]
for mention in mention_list:
row_to_add = {"mention": mention, "dataset_id": dataset_id}
if pub_id in output:
output[pub_id].append(row_to_add)
else:
output[pub_id] = [row_to_add]
out_path = os.path.join(data_folder_path, "linking_labels.json")
with open(out_path, "w") as out_file:
json.dump(output, out_file)
def generate_rule_based_dataset(output_path, data_folder_path, gold_labels_path, ner_output_path: str, is_dev=False):
"""Create and save the linking dataset for the rule based candidates. This dataset
is a dictionary mapping publication id to a list of mentions, the dataset they
are predicted as, and the dataset that is labeled for the publication if the prediction
is correct, or NULL if the prediction is wrong. See the note in the code for more details
Note: The context for all the occurrences of each mention will be genereated
(and cached) as needed at train time
@param: output_path: path to the file to write candidates to
@param: data_folder_path: path to the data folder to run on (train, dev, or test)
@param: gold_labels_path: path to the labels file to use in generating this dataset
@param: ner_output_path: path to the candidate mentions from the NER model
@param: is_dev: whether or not the data being processed is dev data. this impacts
which examples the rule based model is allowed to use
"""
train_path = os.path.abspath(os.path.join("project", "dataset_split_data", "train"))
dev_path = os.path.abspath(os.path.join("project", "dataset_split_data", "dev"))
kb_path = os.path.abspath(os.path.join("project", "data", "data_sets.json"))
test_path = os.path.abspath(os.path.join("data"))
# without the test_path parameter, the rule based model will not use dev examples for training
if is_dev:
model = rule_based_model.RuleBasedModel(train_path, dev_path, kb_path)
else:
model = rule_based_model.RuleBasedModel(train_path, dev_path, kb_path, test_path)
publications_path = os.path.join(data_folder_path, "publications.json")
with open(publications_path) as publications_file:
json_publications = json.load(publications_file)
citation_list = model.predict_from_publications_list(json_publications, data_folder_path)
with open(ner_output_path) as ner_file_:
ner_citation_list = json.load(ner_file_)
ner_citation_list, ner_mention_list = project.generate_citations_from_ner_mentions(ner_citation_list, kb_path)
citation_list += ner_citation_list
with open(gold_labels_path) as gold_labels_file:
gold_labels_json = json.load(gold_labels_file)
output = {}
gold_labels_set = set()
for gold_label in gold_labels_json:
pub_id = gold_label["publication_id"]
dataset_id = gold_label["data_set_id"]
mention_list = gold_label["mention_list"]
for mention in mention_list:
# Note: we make the assumption here that if a publication-dataset pair
# is correct, all mentions that the rule based model produces that support
# that pair are correct. This is needed because the rule based model
# produces mention-dataset pairs that match a mention-dataset pair in the knowledge
# base, but does not necessarily match a mention-dataset pair in the provided labels.
# Additionally, the rule based model finds some slight perturbations of the mentions listed
# in the knowledge base/labels, and we want to count these as correct. We also already don't
# know which occurrence of a mention in a text actually links to the dataset labeled, so this
# type of noise is aleady present in the dataset
gold_labels_set.add(str(pub_id) + "_" + str(dataset_id))
output = create_rule_based_input(citation_list, gold_labels_set)
with open(output_path, "w") as out_file:
json.dump(output, out_file)
def main(dataset_root: str, output_root: str):
train_folder_path = os.path.join(dataset_root, "train")
dev_folder_path = os.path.join(dataset_root, "dev")
test_folder_path = os.path.join(dataset_root, "test")
kb_path = os.path.abspath(os.path.join("project", "data", "datasets.json"))
train_ner_output_path = os.path.join(train_folder_path, "ner_output_split_train.json")
dev_ner_output_path = os.path.join(dev_folder_path, "ner_output.json")
test_ner_output_path = os.path.join(test_folder_path, "ner_output.json")
train_output_path = os.path.join(output_root, "train_candidates.json")
dev_output_path = os.path.join(output_root, "dev_candidates.json")
test_output_path = os.path.join(output_root, "test_candidates.json")
# generate_labels_based_dataset(test_folder_path, os.path.join(test_folder_path, "data_set_citations.json"))
generate_rule_based_dataset(test_output_path, test_folder_path, os.path.join(test_folder_path, "data_set_citations.json"), test_ner_output_path)
# generate_labels_based_dataset(dev_folder_path, os.path.join(dev_folder_path, "data_set_citations.json"))
generate_rule_based_dataset(dev_output_path, dev_folder_path, os.path.join(dev_folder_path, "data_set_citations.json"), dev_ner_output_path, is_dev=True)
# generate_labels_based_dataset(train_folder_path, os.path.join(train_folder_path, "data_set_citations.json"))
generate_rule_based_dataset(train_output_path, train_folder_path, os.path.join(train_folder_path, "data_set_citations.json"), train_ner_output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset_root',
)
parser.add_argument(
'--output_root'
)
args = parser.parse_args()
main(args.dataset_root, args.output_root) | coleridge-rich-context-ai2-master | project/create_linking_dataset.py |
coleridge-rich-context-ai2-master | project/__init__.py |
|
"""Script to sample some publications from the phase 1 holdout set"""
import os
from collections import defaultdict
import numpy as np
from tqdm import tqdm
import json
def main():
SAMPLE_COUNT = 400
holdout_path = os.path.abspath(os.path.join("project", "holdout", "data"))
holdout_publications_path = os.path.abspath(os.path.join(holdout_path, "publications.json"))
holdout_citations_path = os.path.abspath(os.path.join(holdout_path, "data_set_citations.json"))
holdout_ner_path = os.path.abspath(os.path.join(holdout_path, "ner-conll"))
holdout_text_path = os.path.abspath(os.path.join(holdout_path, "input", "files", "text"))
holdout_pdf_path = os.path.abspath(os.path.join(holdout_path, "input", "files", "pdf"))
with open(holdout_publications_path) as fp:
holdout_publications = json.load(fp)
with open(holdout_citations_path) as fp:
holdout_citations = json.load(fp)
pub_id_to_pub = {}
for publication in holdout_publications:
publication_id = publication["publication_id"]
pub_id_to_pub[publication_id] = publication
pub_id_to_citation_entry = defaultdict(list)
for citation in holdout_citations:
publication_id = citation["publication_id"]
pub_id_to_citation_entry[publication_id].append(citation)
holdout_paper_ids = list(pub_id_to_pub.keys())
sampled_ids = np.random.choice(holdout_paper_ids, size=SAMPLE_COUNT, replace=False)
new_folder_path = os.path.abspath(os.path.join("project", "holdout_sampled"))
os.system("mkdir {}".format(new_folder_path))
linking_conll_path = os.path.join(new_folder_path, "linking-conll")
ner_conll_path = os.path.join(new_folder_path, "ner-conll")
input_path = os.path.join(new_folder_path, "input")
files_path = os.path.join(input_path, "files")
pdf_path = os.path.join(files_path, "pdf")
text_path = os.path.join(files_path, "text")
os.system("mkdir {}".format(linking_conll_path))
os.system("mkdir {}".format(ner_conll_path))
os.system("mkdir {}".format(input_path))
os.system("mkdir {}".format(files_path))
os.system("mkdir {}".format(pdf_path))
os.system("mkdir {}".format(text_path))
new_publications = []
new_citations = []
for paper_id in tqdm(sampled_ids):
text_file_name = str(paper_id) + ".txt"
pdf_file_name = str(paper_id) + ".pdf"
ner_file_name = str(paper_id) + "_extraction.conll"
linking_file_name = str(paper_id) + "_linking.conll"
publication_entry = pub_id_to_pub[paper_id]
citation_entries = pub_id_to_citation_entry[paper_id]
new_publications.append(publication_entry)
new_citations += citation_entries
folder_path = os.path.abspath(os.path.join("project", "holdout", "data"))
os.system("cp {} {}".format(os.path.join(folder_path, "input", "files", "text", text_file_name),
os.path.join(text_path, text_file_name)))
os.system("cp {} {}".format(os.path.join(folder_path, "input", "files", "pdf", pdf_file_name),
os.path.join(pdf_path, pdf_file_name)))
os.system("cp {} {}".format(os.path.join(folder_path, "ner-conll", ner_file_name),
os.path.join(ner_conll_path, ner_file_name)))
with open(os.path.join(new_folder_path, "publications.json"), "w") as fp:
json.dump(new_publications, fp, indent=4)
with open(os.path.join(new_folder_path, "data_set_citations.json"), "w") as fp:
json.dump(new_citations, fp, indent=4)
if __name__ == '__main__':
main() | coleridge-rich-context-ai2-master | project/sample_holdout.py |
# Adapted from https://github.com/bloomberg/sgtb
# Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Structured Gradient Tree Boosting
This module contains methods for fitting gradient boosted regression trees for
structured prediction problems.
"""
# Author: Yi Yang
# Email: [email protected]
# [email protected]
from __future__ import print_function
from __future__ import division
from abc import abstractmethod
from sklearn.ensemble._gradient_boosting import predict_stages
from sklearn.tree.tree import DecisionTreeRegressor
import numbers, pickle
import numpy as np
from time import time
from multiprocessing import Process, Queue
class StructuredGradientBoosting(object):
"""Structured Gradient Boosting. (S-MART) """
def __init__(self, n_estimators, beam_width, learning_rate, # SGTB params
min_samples_split, min_samples_leaf, max_depth, # Tree params
ent_ent_feat_dict, num_thread, random_state=1234):
# sklearn tree related
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_depth = max_depth
self.random_state = random_state
self.estimators = np.empty((0, 0), dtype=np.object)
self.n_estimated = 0
# structured learning
self.ent_ent_feat_dict = ent_ent_feat_dict
# self.num_ent_ent_feat = len(list(ent_ent_feat_dict.values())[0])
self.num_ent_ent_feat = 0
self.beam_width = beam_width # width of beam for global training and testing
self.num_thread = num_thread
def fit_stage(self, i, X, y):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=0.,
max_features=None,
max_leaf_nodes=None,
random_state=self.random_state,
presort=False)
tree.fit(X, y, check_input=False, X_idx_sorted=None)
# add tree to ensemble
self.estimators[i, 0] = tree
self.n_estimated = i + 1
def fit(self, train_set, dev_set):
"""Fit the gradient boosting model.
"""
# init state
self.estimators = np.empty((self.n_estimators, 1),
dtype=np.object)
# fit the boosting stages
n_stages = self.fit_stages(train_set, dev_set)
return self
# MAIN METHODs
def fit_stages(self, train_set, dev_set):
"""Iteratively fits the stages.
"""
X, y, indices, ent_ids = train_set
# perform boosting iterations
for i in range(self.n_estimators):
print(i)
start_time = time()
batches = self.split(indices, self.num_thread, True)
q = Queue()
procs = []
for batch in batches:
proc = Process(target=self.get_func_grad, args=(batch, X, y,
ent_ids, q))
procs.append(proc)
proc.start()
result_list = []
for _ in range(len(batches)):
result_list.append(q.get())
for proc in procs:
proc.join()
X_aug, y_aug, sum_match_cnt, sum_gold_cnt = self.merge(result_list)
train_acc = float(sum_match_cnt*100) / sum_gold_cnt
X_aug, y_aug = np.array(X_aug, dtype='float32'), np.array(y_aug, dtype='float32')
end_time = time()
# report dev accuracy every 25 iterations
if (i+1) % 25 == 0:
dev_X, dev_y, dev_indices, dev_ent_ids = dev_set
dev_acc = self.get_acc(dev_X, dev_y, dev_indices, dev_ent_ids)
print ("Iter %d Takes %.4f sec. Train acc %.2f Dev acc %.2f" %(i+1, end_time-start_time, train_acc, dev_acc))
# fit next stage of trees
self.fit_stage(i, X_aug, y_aug)
return i + 1
def beam_search(self, doc, X, ent_ids, gold_seq=None):
""" Beam search, used for training if 'gold_seq' is given, otherwise testing.
'aggregated_score_dict' is used to save previously computed scores of
sequence prefices.
"""
feat_seq_logprobs = []
aggregated_score_dict={}
prev_beam=[[]]
for i, ent_list in enumerate(doc): # ent_list is the ent indices for a mention
if gold_seq:
inner_gold_seq = gold_seq[:i+1]
next_beam = [[]]
sorted_list = []
for prev_seq in prev_beam:
for ent_idx in ent_list:
local_feats = X[ent_idx]
score, feats = self.get_score(local_feats, ent_idx,
prev_seq, ent_ids)
if len(prev_seq) == 0:
aggregated_score = score
else:
aggregated_score = aggregated_score_dict[tuple(prev_seq)] + score
curr_seq = prev_seq + [ent_idx]
aggregated_score_dict[tuple(curr_seq)] = aggregated_score
max_score = -float('inf')
max_tup = None
for next_seq in next_beam:
additional_score = 0.
if len(next_seq) > 0:
additional_score = aggregated_score_dict[tuple(next_seq + [ent_idx])]
if aggregated_score + additional_score > max_score:
max_score = aggregated_score + additional_score
max_tup = (feats, curr_seq, aggregated_score, max_score)
sorted_list.append(max_tup)
if gold_seq and tuple(max_tup[1]) == tuple(inner_gold_seq):
inner_gold_tuple = sorted_list[-1]
sorted_list = sorted(sorted_list, key=lambda p : p[3], reverse=True)
gold_in = False
final_beam = []
for tup in sorted_list[:self.beam_width]:
final_beam.append(tup)
if gold_seq and tuple(tup[1]) == tuple(inner_gold_seq):
gold_in = True
if gold_seq and not gold_in:
final_beam[-1] = inner_gold_tuple
inner_feat_seq_logprobs = []
prev_beam = []
for tup in final_beam:
prev_beam.append(tup[1])
inner_feat_seq_logprobs.append((tup[0], tup[1], tup[2]))
feat_seq_logprobs.append(inner_feat_seq_logprobs)
return feat_seq_logprobs
def compute_func_grad(self, feat_seq_logprobs, y):
""" Compute functional gradients and evaluation statistics """
new_X, func_grads = [], []
final_prob = 0.
for inner_feat_seq_logprobs in feat_seq_logprobs:
temp_X, temp_grads = [], []
z_score = 0.
gold_in = False
for _, _, logprob in inner_feat_seq_logprobs:
z_score += np.exp(logprob)
if z_score > 0:
final_prob = np.exp(inner_feat_seq_logprobs[0][-1]) / z_score
for feats, seq, logprob in inner_feat_seq_logprobs:
prob = 0.
if z_score > 0.: prob = np.exp(logprob) / z_score
label = np.prod(y[np.array(seq, dtype='int32')])
if label == 1: gold_in = True
temp_X.append(feats)
temp_grads.append(label - prob)
if not gold_in:
break
new_X += temp_X
func_grads += temp_grads
pred_seq = feat_seq_logprobs[-1][0][1]
return new_X, func_grads, pred_seq, final_prob
def get_score(self, local_feats, ent_idx, prev_seq, ent_ids):
ent_id = ent_ids[ent_idx]
# if len(prev_seq) == 0:
# global_feats = np.zeros(2 * self.num_ent_ent_feat, dtype='float32') # max pool + avg pool
# else:
# global_feat_list = []
# for prev_ent_idx in prev_seq:
# prev_ent_id = ent_ids[prev_ent_idx]
# feat_val = [0.] * self.num_ent_ent_feat
# if (prev_ent_id, ent_id) in self.ent_ent_feat_dict:
# feat_val = self.ent_ent_feat_dict[(prev_ent_id, ent_id)]
# elif (ent_id, prev_ent_id) in self.ent_ent_feat_dict:
# feat_val = self.ent_ent_feat_dict[(ent_id, prev_ent_id)]
# global_feat_list.append(feat_val)
# global_feat_mat = np.array(global_feat_list, dtype='float32')
# avg_pooled = np.mean(global_feat_mat, 0)
# max_pooled = np.max(global_feat_mat, 0)
# global_feats = np.concatenate([avg_pooled, max_pooled])
# feats = np.concatenate([local_feats, global_feats])
feats = np.concatenate([local_feats])
refeats = feats.reshape((1, len(feats)))
score = self.decision_function(refeats)
score = score[0, 0]
return score, feats
def decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = np.zeros((X.shape[0], 1), dtype=np.float64)
if self.n_estimated > 0:
predict_stages(self.estimators[:self.n_estimated], X, self.learning_rate, score)
return score
# MULTI-THREAD
def split(self, indices, part=8, permutate=False):
if permutate:
permuted_idx = np.random.permutation(len(indices))
indices = [indices[idx] for idx in permuted_idx]
result = []
batch_size = (len(indices) + part - 1) / part
batch_size = int(batch_size)
for j in range(part):
docs = indices[j*batch_size:(j+1)*batch_size]
result.append(docs)
return result
def merge(self, result_list):
X_aug, y_aug = [], []
sum_match_cnt, sum_gold_cnt = 0, 0
for result in result_list:
for p1, p2, match_cnt, gold_cnt in result:
X_aug += p1
y_aug += p2
sum_match_cnt += match_cnt
sum_gold_cnt += gold_cnt
return X_aug, y_aug, sum_match_cnt, sum_gold_cnt
def get_func_grad(self, batch_data, X, y, ent_ids, q, train_flag=True):
result = []
for doc, gold_ids in batch_data:
if train_flag:
gold_seq = []
for ent_list in doc:
gold_idx = -1
for ent_idx in ent_list:
if y[ent_idx] == 1:
gold_idx = ent_idx
break
gold_seq.append(gold_idx)
else:
gold_seq = None
feat_seq_logprobs = self.beam_search(doc, X, ent_ids, gold_seq=gold_seq)
new_X, func_grad, pred_seq, _ = self.compute_func_grad(feat_seq_logprobs, y)
# in KB acc
match_cnt = np.sum(y[np.array(pred_seq, dtype='int32')])
gold_cnt = len(gold_ids)
result.append((new_X, func_grad, match_cnt, gold_cnt))
q.put(result)
def predict(self, X, indices, ent_ids):
"""
Function to predict an output sequence for one document
Inputs
-------
X: a local feature matrix for one document
indices: a list of pair '(a list of mention indices, a list of gold entity ids)'
for one document (Note: the list of gold entity ids will all be NULL, but this is kept
for compatibility with the make_idx_data function output)
ent_ids: wikiID for entities
Outputs
-------
pred_seq_translated: a list of predicted entity ids corresponding to each mention
"""
if indices == []:
return [], []
doc, _ = indices[0]
feat_seq_logprobs = self.beam_search(doc, X, ent_ids, gold_seq=None)
pred_seq = feat_seq_logprobs[-1][0][1]
pred_seq_translated = [ent_ids[idx] for idx in pred_seq]
return pred_seq, pred_seq_translated
# EVALUATION
def get_acc(self, X, y, indices, ent_ids):
batches = self.split(indices, self.num_thread)
q = Queue()
procs = []
for batch in batches:
proc = Process(target=self.get_func_grad, args=(batch,
X, y, ent_ids, q, False))
procs.append(proc)
proc.start()
result_list = []
for _ in range(len(batches)):
result_list.append(q.get())
for proc in procs:
proc.join()
_, _, sum_match_cnt, sum_gold_cnt = self.merge(result_list)
acc = float(sum_match_cnt*100)/sum_gold_cnt
return acc
| coleridge-rich-context-ai2-master | project/structured_gradient_boosting.py |
"""This file can be run to convert all of the publications in train/dev/test to conll format,
both for NER and linking. The output will be in folder called ner-conll and linking-conll.
to_conll_test.py is used to produce conll formatted files for the test publications.
"""
import argparse
import json
import os
from s2base import scispacy_util
from spacy.matcher import Matcher
from sklearn.externals import joblib
from tqdm import tqdm
import re
DATASET_ENTITY_TAG = "DATA"
class ConllParser():
"""Class for parsing the rich context documents into conll 2003 format using scispacy
"""
def __init__(self, data_folder_path, scispacy_parser):
print(data_folder_path)
os.system(f'ls {data_folder_path}/test')
# dictionary mapping publication id to a list of (dataset id, mention list) tuples
self.publication_to_datasets_and_mentions = None
# path to the train_test folder, formatted in the same way as the dev fold data folder
self.data_folder_path = data_folder_path
# path to the publications.json file containing the list of publications to process
self.pub_path = os.path.join(self.data_folder_path, "publications.json")
# path to the data_set_citations.json file containing the publication dataset pairs
self.citations_path = os.path.join(self.data_folder_path, "data_set_citations.json")
# path to the data_sets.json file containing the dataset kb
self.datasets_path = os.path.join(self.data_folder_path, os.pardir, "data_sets.json")
# path to the folder containing the publication text files
self.text_files_path = os.path.join(self.data_folder_path, "input", "files", "text")
# instance of the scispacy parser class
self.scispacy_parser = scispacy_parser
def build_publication_to_datasets_and_mentions(self):
"""Builds the dictionary mapping publication to datasets and mentions
"""
publication_to_datasets_and_mentions = {}
with open(self.citations_path) as json_citations_file:
citation_pairs = json.load(json_citations_file)
for pair in citation_pairs:
pub_id = pair["publication_id"]
if pub_id in publication_to_datasets_and_mentions:
publication_to_datasets_and_mentions[pub_id].append((pair["data_set_id"],
pair["mention_list"]))
else:
publication_to_datasets_and_mentions[pub_id] = [(pair["data_set_id"],
pair["mention_list"])]
self.publication_to_datasets_and_mentions = publication_to_datasets_and_mentions
def build_match_index_to_tag(self, doc, datasets_and_mentions, pub_id):
"""Builds the dictionary mapping the index of a match to the appropriate entity tag
@param doc: a spacy doc to process matches on
@param datasets_and_mentions: a list of (dataset id, mention list) tuples for this doc
"""
matcher = Matcher(self.scispacy_parser.nlp.vocab)
# add each pattern to match on to the matcher with id DATASET_<dataset_id>
for dataset_id, mention_list in datasets_and_mentions:
patterns = []
for mention in mention_list:
replaced_mention = mention
# some mentions had extra escaping
replaced_mention = replaced_mention.replace("\\", "")
# this was a special character that was not in the actual text
replaced_mention = replaced_mention.replace("\u2afd", "")
# -\n in the actual text ends up as -<space> in the mention text
replaced_mention = re.sub(r"(?<=\S)-\s", "", replaced_mention)
# this unicode hyphen ends up as \xad in the actual text
replaced_mention = replaced_mention.replace("\u2013", "\xad")
# this unicode character is actually fi in the text
replaced_mention = replaced_mention.replace("\ufb01", "fi")
# we try replacing different variants of \xad and a hyphen
xad_1 = replaced_mention.replace("\xad-", "\xad")
xad_2 = replaced_mention.replace("-\xad", "\xad")
# we try adding an s at the end of mentions
plural_1 = replaced_mention + "s"
plural_2 = xad_1 + "s"
plural_3 = xad_2 + "s"
mentions_to_try = [replaced_mention, xad_1, xad_2, plural_1, plural_2, plural_3]
for mention_to_try in mentions_to_try:
mention_doc = self.scispacy_parser.nlp(mention_to_try)
pattern = []
for t in mention_doc:
pattern.append({"ORTH": t.text})
# allow new lines to be between tokens
pattern.append({"ORTH": "\n", "OP": "*"})
patterns.append(pattern)
matcher.add("DATASET_" + str(dataset_id), None, *patterns)
matches = matcher(doc)
length_matches = []
for match in matches:
length_matches.append((match[0], match[1], match[2], match[2] - match[1]))
# sort matches by length to capture the longest matches first
length_matches = sorted(length_matches, key=lambda tup: tup[3])
# loop over matches to create a dictionary mapping index in the document to the entity tag
match_index_to_tag = {}
indices_matched = set()
for match_id, start, end, _ in length_matches:
for i in range(start, end):
dataset_id = self.scispacy_parser.nlp.vocab.strings[match_id]
if (i == start) and (i-1) in indices_matched and (not i in indices_matched):
match_index_to_tag[i] = "B-" + DATASET_ENTITY_TAG + ":" + dataset_id
else:
match_index_to_tag[i] = "I-" + DATASET_ENTITY_TAG + ":" + dataset_id
indices_matched.add(i)
return match_index_to_tag
def create_conll_line(self, token, match_index_to_tag):
"""Create one line of the output conll file
@param token: the token for the line being created
@param match_index_to_tag: the dictionary mapping token index to entity tag
"""
word = token.text
pos = token.pos_
tag = "O"
linking_tag = "_"
if token.i in match_index_to_tag:
entity_tag = match_index_to_tag[token.i].split(":")[0]
linking_tag = match_index_to_tag[token.i].split(":")[1]
else:
entity_tag = "O"
output_line = word + " " + pos + " " + tag + " " + entity_tag
extraction_line = output_line
linking_line = output_line + " " + linking_tag
return extraction_line, linking_line
def create_conll_sentence(self, sentence, match_index_to_tag):
"""Creates one sentence of the output conll file
@param sentence: the spacy sentence for the sentence being created
@param match_index_to_tag: the dictionary mapping token index to entity tag
"""
extraction_sentence = ""
linking_sentence = ""
for token in sentence:
# spacy includes space tokens, which we can safely ignore
if token.pos_ == "SPACE":
continue
extraction_line, linking_line = self.create_conll_line(token, match_index_to_tag)
extraction_sentence += extraction_line + "\n"
linking_sentence += linking_line + "\n"
return extraction_sentence, linking_sentence
def create_conll_text(self, doc, match_index_to_tag):
"""Creates one document of conll output
@param doc: the spacy doc to process
@param match_index_to_tag: the dictionary mapping token index to entity tag
"""
extraction_text = ""
linking_text = ""
prev_sent = None
for sent in doc.sents:
extraction_sentence, linking_sentence = self.create_conll_sentence(sent, match_index_to_tag)
# conll format includes an extra new line between each sentence
# we will omit the line (merge sentences) if an entity spans sentences due to a spacy
# sentence splitting error
strip_new_line = False
if prev_sent and prev_sent.endswith("-DATA\n"):
# if previous sentence ends with -DATA, search for the end of the first token in
# the next sentence and see if it ends with -DATA
for i in range(len(extraction_sentence)):
if extraction_sentence[i] == "\n" and extraction_sentence[i-5:i] == "-DATA":
strip_new_line = True
break
if strip_new_line:
extraction_text = extraction_text[:-1]
extraction_text += extraction_sentence + "\n"
linking_text += linking_sentence + "\n"
prev_sent = extraction_sentence
return extraction_text, linking_text
def parse_publication(self, publication):
"""Parses one raw text file into conll format and writes to
../conll/<publication_id>_<extraction or linking>.conll
@param publication: the json publication being processed
"""
try:
publication_id = publication["publication_id"]
datasets_and_mentions = []
if publication_id in self.publication_to_datasets_and_mentions:
datasets_and_mentions = self.publication_to_datasets_and_mentions[publication_id]
publication_text_path = os.path.join(self.text_files_path, str(publication_id) + ".txt")
with open(publication_text_path) as publication_text_file:
full_text = publication_text_file.read()
doc = self.scispacy_parser.scispacy_create_doc(full_text)
match_index_to_tag = self.build_match_index_to_tag(doc, datasets_and_mentions, publication_id)
extraction_file_path = os.path.join(self.text_files_path,
os.pardir,
os.pardir,
os.pardir,
"ner-conll",
str(publication_id) +
"_" +
"extraction" +
".conll")
linking_file_path = os.path.join(self.text_files_path,
os.pardir,
os.pardir,
os.pardir,
"linking-conll",
str(publication_id) +
"_" +
"linking" +
".conll")
extraction_text, linking_text = self.create_conll_text(doc, match_index_to_tag)
with open(extraction_file_path, "w") as publication_conll_file:
publication_conll_file.write(extraction_text)
with open(linking_file_path, "w") as publication_conll_file:
publication_conll_file.write(linking_text)
except:
print("-------------------Publication", publication["publication_id"], "failed-------------------")
def parse_text_files_to_conll_format(self):
"""Parses all the input text files into conll format and writes them to ../conll
"""
self.build_publication_to_datasets_and_mentions()
# parse each text file into a conll file
with open(self.pub_path) as json_publications_file:
publications = json.load(json_publications_file)
# publications = [publication for publication in publications if publication['publication_id'] == 3152]
with joblib.Parallel(n_jobs=os.cpu_count() - 1) as pool:
pool(joblib.delayed(self.parse_publication)(publications[i])
for i in tqdm(range(len(publications)), desc='convert text files to conll format in to_conll.py'))
def main(data_folder_path):
scispacy_parser = scispacy_util.SciSpaCyParser()
train_path = os.path.join(data_folder_path, "train")
dev_path = os.path.join(data_folder_path, "dev")
test_path = os.path.join(data_folder_path, "test")
# parse train set
conll_parser = ConllParser(train_path, scispacy_parser)
conll_parser.parse_text_files_to_conll_format()
# parse dev set
conll_parser = ConllParser(dev_path, scispacy_parser)
conll_parser.parse_text_files_to_conll_format()
# parse test set
conll_parser = ConllParser(test_path, scispacy_parser)
conll_parser.parse_text_files_to_conll_format()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_folder_path',
type=str,
help='The path to the data folder, which should contain train, dev, and test'
)
parser.set_defaults()
args = parser.parse_args()
main(args.data_folder_path)
| coleridge-rich-context-ai2-master | project/to_conll.py |
# Adapted from https://github.com/bloomberg/sgtb
# Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Named entity disambiguation by Structured Gradient Tree Boosting.
"""
# Author: Yi Yang
# Email: [email protected]
# [email protected]
import argparse, json, time, gzip
import numpy as np
import os
from sklearn.externals import joblib
from structured_gradient_boosting import StructuredGradientBoosting
def train_model(train_docs, dev_docs, test_docs, ent_ent_feat_dict, params):
print ("PARAM SETTINGS")
print (params)
train_set = make_idx_data(train_docs, params['num_candidate'], skip=True)
dev_set = make_idx_data(dev_docs, params['num_candidate'])
test_set = make_idx_data(test_docs, params['num_candidate'])
clf = StructuredGradientBoosting(max_depth=params['max_depth'],
learning_rate=params['learning_rate'],
n_estimators=params['n_estimators'],
min_samples_split=params['min_samples_split'],
min_samples_leaf=params['min_samples_leaf'],
ent_ent_feat_dict=ent_ent_feat_dict,
beam_width=params['beam_width'],
num_thread=params['num_thread'])
start_time = time.time()
print("Start", start_time)
clf = clf.fit(train_set, dev_set)
end_time = time.time()
print ("Training take %.2f secs" %(end_time - start_time))
output_path = os.path.abspath(os.path.join("project", "linking_models", "linking_model_v4.pkl"))
joblib.dump({'clf': clf}, output_path)
test_X, test_y, test_indices, test_ent_ids = test_set
test_acc = clf.get_acc(test_X, test_y, test_indices, test_ent_ids)
print ("Test acc %.2f" %(test_acc))
sum_match_count = 0
sum_gold_count = 0
for doc in test_docs:
idx_input = make_idx_data([doc])
y = idx_input[1]
indices = idx_input[2]
predictions, _ = clf.predict(idx_input[0], indices, idx_input[3])
match_cnt = np.sum(y[np.array(predictions, dtype='int32')])
if indices != []:
gold_cnt = len(indices[0][1])
else:
gold_cnt = 0
sum_match_count += match_cnt
sum_gold_count += gold_cnt
acc = float(sum_match_count*100)/sum_gold_count
print("Test acc with predict %.2f" %(acc))
def make_idx_data(docs, ncand=30, skip=False):
"""
Convert data to fit sklearn regression trees.
Inputs
-------
docs: a document list '[[[(mention_str, offset, wikiID), [(entity, label), [feature]]]]]'
ncand: number of entity candidates for a mention
skip: whether to skip mentions whose gold entities are not in candidates
(used for training data only)
Outputs
-------
X: a local feature matrix, label and mention indices arraries
y: a label array
indices: a list of pair '(a list of mention indices, a list of gold entity ids)'
ent_ids: wikiID for entities (used for querying entity-entity features)
"""
X, y, indices, ent_ids = [], [], [], []
i = 0
for doc in docs:
doc_idx = []
gold_ids, skip_ids = [], []
for mentcand in doc:
ment_idx = []
flag = False
tX, ty, tids = [], [], []
#mentcand[1] is the entity candidates
for entcand in mentcand[1][:ncand]:
#entcand[1] is the features for this entity candidate
tX.append(entcand[1])
#entcand[0][1] is the label for this entity candidate
ty.append(entcand[0][1])
if ty[-1] == 1: flag = True
tids.append(entcand[0][0])
ment_idx.append(i)
i += 1
# flag is whether the gold entity is in the candidates
# reset the index if skipping mentions without a gold candidate
if skip and not flag:
i = len(y)
continue
else:
X += tX
y += ty
ent_ids += tids
if len(ment_idx) > 0:
doc_idx.append(ment_idx)
gold_ids.append(mentcand[0][-1])
else: # must be a false negative
skip_ids.append(mentcand[0][-1])
if len(doc_idx) > 0:
# append skip_ids after gold_ids, in order to properly evaluate
# note len(doc_idx) != len(gold_ids+skip_ids)
indices.append((doc_idx, gold_ids+skip_ids))
X = np.array(X, dtype='float32')
y = np.array(y, dtype='int')
return X, y, indices, ent_ids
def main():
parser = argparse.ArgumentParser(description="""Named entity disambiguation with
Structured Gradient Tree Boosting""")
parser.add_argument('--dataset', type=str, default="/project/data/sgtb.json",
help="""Processed dataset file in json format. A document is represented as
'[[[(mention, offset, goldEntity), [(entity,label), [feature]]]]]'""")
parser.add_argument('--train-dev-split-idx', type=int, default=2870,
help="Number of training instances.")
parser.add_argument('--dev-test-split-idx', type=int, default=3188,
help="Number of training and development instances.")
parser.add_argument('--num-candidate', type=int, default=30,
help="Number of entity candidates for each mention.")
parser.add_argument('--entity-features', type=str, default="data/ent_ent_feats.txt.gz",
help="""Pre-computed feature vectors for entity-entity pairs.
Format: 'ent1 ent2<TAB>feat1 feat2 feat3'""")
parser.add_argument('--num-epoch', type=int, default=50,
help="Number of iterations, aka, number of ensembled trees.")
parser.add_argument('--beam-width', type=int, default=4,
help="Beam width, used by beam search.")
parser.add_argument('--learning-rate', type=float, default=1.,
help="Learning rate. It is fixed to 1.")
parser.add_argument('--max-depth', type=int, default=3,
help="Maximum depth of a regression tree.")
parser.add_argument('--min-samples-split', type=int, default=2,
help="Minimum samples required for splitting a node.")
parser.add_argument('--min-samples-leaf', type=int, default=1,
help="Minimum instances required to be a leaf node.")
parser.add_argument('--num-thread', type=int, default=8,
help="SGTB can be easily parallelized. Number of threads.")
args = parser.parse_args()
params = {
'n_estimators' : args.num_epoch,
'beam_width' : args.beam_width,
'learning_rate' : args.learning_rate,
'max_depth' : args.max_depth,
'min_samples_split' : args.min_samples_split,
'min_samples_leaf' : args.min_samples_leaf,
'num_candidate' : args.num_candidate,
'num_thread' : args.num_thread
}
# data
processed_docs = []
with open(args.dataset, 'rb') as f:
for line in f:
processed_docs.append(json.loads(line.strip()))
train_docs, dev_docs, test_docs = processed_docs[ : args.train_dev_split_idx],\
processed_docs[args.train_dev_split_idx : args.dev_test_split_idx],\
processed_docs[args.dev_test_split_idx : ]
# entity-entity features
ent_ent_feat_dict = {}
# with gzip.open(args.entity_features, 'rb') as f:
# for line in f:
# line = line.decode('utf-8')
# ep, feat_str = line.split('\t')
# e1, e2 = ep.split()
# feats = list(map(float, feat_str.split()))
# ent_ent_feat_dict[(e1,e2)] = feats
# train and evaluate
train_model(train_docs, dev_docs, test_docs, ent_ent_feat_dict, params)
if __name__ == '__main__':
main()
| coleridge-rich-context-ai2-master | project/structured_learner.py |
import os
import json
import re
from s2base import scispacy_util
from tqdm import tqdm
import nltk
from nltk.corpus import stopwords
from collections import defaultdict
from create_sgtb_dataset import get_scispacy_doc
from math import log
class MethodExtractor():
def __init__(self, train_path, dev_path, sage_methods_path, leipzig_word_counts_path):
# path to the data folder for the train set
self.train_path = train_path
# path to the data folder for the dev set
self.dev_path = dev_path
# read the list of sage methods and prepare a regex to match them.
sage_method_entries = json.load(open(sage_methods_path, mode='rt'))["@graph"]
method_names = []
for entry in sage_method_entries:
if "skos:prefLabel" in entry:
method_names.append(entry["skos:prefLabel"]["@value"])
if "skos:altLabel" in entry:
if type(entry["skos:altLabel"]) == list:
for label in entry["skos:altLabel"]:
method_names.append(label["@value"])
else:
method_names.append(entry["skos:altLabel"]["@value"])
# lowercase and remove duplicates.
method_names = [name for name in set([name.lower() for name in method_names])]
# remove very short names.
method_regexes = [re.escape(method_name) for method_name in method_names]
methods_regex_string = r'\b(?P<method_name>' + '|'.join(method_regexes) + r')\b'
# to debug the regex: print(methods_regex_string)
self.sage_methods_regex = re.compile(methods_regex_string, re.IGNORECASE)
# set of english stopwords
self._stopwords = set(stopwords.words('english'))
# an instance of a scispacy parser
self._scispacy_parser = scispacy_util.SciSpaCyParser()
# read word counts in the Leipzig corpus.
self._read_leipzig_word_counts_file(leipzig_word_counts_path)
def _read_leipzig_word_counts_file(self, leipzig_word_counts_path):
"""Read word counts in a background corpus. This can be useful for estimating
the confidence of extracted terms. Leipzig corpora are available for download
at http://wortschatz.uni-leipzig.de/en/download/
"""
self._background_word_counts = defaultdict(int)
self._background_lowercased_word_counts = defaultdict(int)
with open(leipzig_word_counts_path, mode='rt') as word_count_file:
for line in word_count_file.readlines():
splits = line.strip().split('\t')
assert len(splits) == 4
WORD_INDEX = 1
FREQUENCY_INDEX = 3
frequency = int(splits[3])
self._background_word_counts[splits[1]] = frequency
self._background_lowercased_word_counts[splits[1].lower()] += frequency
def predict_from_publications_list(self, publication_list, predict_path):
"""Predict datasets for a list of publications, with each publication
in the format provided in publications.json file.
@param publication_list: the result of json.load('publications.json')
"""
citation_list = []
for publication in tqdm(publication_list, desc='predict methods'):
spacy_doc = get_scispacy_doc(predict_path, str(publication["publication_id"]), self._scispacy_parser)
predictions = self.predict(publication["publication_id"], spacy_doc)
citation_list += predictions
return citation_list
def is_sentence(self, spacy_sentence):
"""
Checks if the string is an English sentence.
:param sentence: spacy string
:return: True / False
"""
tokens = [t for t in spacy_sentence]
# Minimum number of words per sentence
MIN_TOKEN_COUNT = 6
if len(tokens) < MIN_TOKEN_COUNT:
return False
# Most tokens should be words
MIN_WORD_TOKENS_RATIO = 0.5
if sum([t.is_alpha for t in tokens]) / len(tokens) < MIN_WORD_TOKENS_RATIO:
return False
text = spacy_sentence.text
# A sentence has to end with a period
if not text.strip().endswith('.'):
return False
# Most characters should be letters, not numbers and not special characters
MIN_LETTER_CHAR_RATIO = 0.5
if sum([c.isalpha() for c in text]) / len(text) < MIN_LETTER_CHAR_RATIO:
return False
return True
def predict(self, pub_id, doc, debug=False):
"""Reads the text file of a publication, extracts methods, and returns
a list of dict objects such as:
{ "publication_id": 876, "method": "opinion poll", "score": 0.680 }
@param txt_file_path: path to the publication text file.
"""
suffix_pattern = re.compile(r'\b(?P<method_name>([A-Z][-\w]+ )+([Aa]nalysis|[Mm]odel|[Tt]heory))\b')
regex_list = [ self.sage_methods_regex, suffix_pattern ]
# This dictionary maps the lowercased version of an extracted method to its original case,
# which simultaneously removes embarrassing duplicates and report an easier to read casing
# for human evaluation.
methods_dict = {}
methods_to_contexts = defaultdict(list)
all_text = ' '.join([t.text for t in doc])
all_text_lowercased = all_text.lower()
for sent in doc.sents:
if not self.is_sentence(sent): continue
tokens = [t.text for t in sent]
sent_str = ' '.join(tokens)
for regex in regex_list:
for match in re.finditer(regex, sent_str):
match_str = match.group('method_name')
if match_str.lower() not in methods_dict:
# skip matches which include stopwords.
stopwords_count = len([token for token in match_str.lower().split() if token in self._stopwords])
if stopwords_count > 0: continue
# skip matches which appear in sentences with title casing.
all_non_stopwords_start_with_capital_letters = True
for token in sent_str.split():
if token in self._stopwords: continue
if len(token) > 0 and token[0].islower():
all_non_stopwords_start_with_capital_letters = False
break
if all_non_stopwords_start_with_capital_letters: continue
methods_dict[match_str.lower()] = match_str
# record the context in which this match was found for debugging purposes.
methods_to_contexts[match_str.lower()].append(sent_str)
# filter out short method names, all lower case, single tokens.
MIN_CHAR_COUNT = 3
MIN_TOKEN_COUNT = 2
methods_dict = { method_lower : method_name for method_lower, method_name in methods_dict.items() \
if len(method_name) >= MIN_CHAR_COUNT \
and len(method_name.split()) >= MIN_TOKEN_COUNT \
and method_name != method_lower }
# score and prepare output.
output = []
for method_lower, method_name in methods_dict.items():
# compute confidence score based on background frequencies, capitalization, length, term frequency.
term_frequency = all_text_lowercased.count(method_lower) + 1.
assert term_frequency > 0
method_lowercased_tokens = [token.lower() for token in method_name.split()]
background_frequencies = [self._background_lowercased_word_counts[token] for token in method_lowercased_tokens]
min_background_frequency = min(background_frequencies) + 1
capitalization_multiplier = 2. if method_name[0].isupper() else 1.
length_multiplier = 0.5 if len(method_lowercased_tokens) < 2 else 1.
score = length_multiplier * capitalization_multiplier * log(term_frequency) / (1. + log(min_background_frequency))
MIN_THRESHOLD_FOR_METHODS = 0.2
if score < MIN_THRESHOLD_FOR_METHODS:
continue
# normalize score
score = log(1+score)
if score > 1.0: score = 1.0
record = { 'publication_id': int(pub_id), 'method': method_name, 'score': round(score, 2) }
if debug:
assert len(methods_to_contexts[method_lower]) > 0
record['contexts'] = methods_to_contexts[method_lower]
output.append(record)
return output
def _filter_references_section(self, text):
"""Helper function to return the text with the references section stripped out.
It is probably not perfect, but it looks for the last instance of 'reference'
and removes all text after it
@param text: the text to filter the references section from
"""
references_pattern = r"(REFERENCE)|(reference)|(Reference)"
references_found = [i.start() for i in re.finditer(references_pattern, text)]
if references_found != []:
last_reference_index = references_found[-1]
return text[:last_reference_index]
else:
return text
| coleridge-rich-context-ai2-master | project/method_extractor.py |
"""This is the main script for outputting predictions for the competition
"""
from rule_based_model import RuleBasedModel
from xgboost import XGBClassifier
from typing import Dict, Union, List
import json
import os
import create_linking_dataset
import create_sgtb_dataset
import structured_gradient_boosting
import structured_learner
from method_extractor import MethodExtractor
from sklearn.externals import joblib
import argparse
import xgboost_linking
from ner_model import NerModel
import logging
logging.basicConfig(level=logging.WARNING)
logging.getLogger('allennlp.common.params').disabled = True
logging.getLogger('allennlp.nn.initializers').disabled = True
logging.getLogger('allennlp.common.from_params').disabled = True
logging.getLogger('ner_rcc.rcc_ner').disabled = True
logging.getLogger('matplotlib.font_manager').disabled = True
logging.getLogger('allennlp.models.archival').disabled = True
logging.getLogger('allennlp.data.vocabulary').disabled = True
logging.getLogger('allennlp.models.model').disabled = True
from allennlp.models.archival import load_archive
from allennlp.service.predictors import Predictor
from field_classifier.classifier import Classifier
from field_classifier.predictor import ClassifierPredictor
from field_classifier.textcat import TextCatReader
import os
import json
import numpy as np
import random
from collections import defaultdict
from tqdm import tqdm
import text_utils
from nltk.corpus import stopwords
from s2base import scispacy_util
def perform_evaluation(rule_based_model: RuleBasedModel,
linking_model: XGBClassifier,
publications_path: str,
labels_path: str,
data_folder_path: str,
ner_predicted_citations: List[Dict[str, Union[str, float, int]]],
rule_based_output_path: str = None,
rule_based_input_path: str = None,
predict_input_path: str = None,
verbose: bool = True):
"""Performs end task evaluation for the competition
@param rule_based_model: the rule based model object to use
@param linking_model: the linking model object to use (SGTB)
@param publications_path: path to the publications.json to evaluate predictions on
@param labels_path: path to the labels for the input publications
@param data_folder_path: path to the data folder
@param ner_predicted_citations: predicted citations based on predicted mentions from the NER model
@param rule_based_output_path: (optional) path to save the rule based model output
@param rule_based_input_path: (optional) path to the rule based model output if saved
@param predict_input_path: (optional) path to the text input files if the rule based output is not saved
"""
citation_list = []
if rule_based_input_path:
citation_list = joblib.load(rule_based_input_path)["citation_list"]
else:
print("Making rule based predictions...")
with open(publications_path) as publications_file:
json_publications = json.load(publications_file)
citation_list = rule_based_model.predict_from_publications_list(json_publications, predict_input_path)
joblib.dump({"citation_list": citation_list, "rule_based_version": "10"}, rule_based_output_path)
citation_list += ner_predicted_citations
print()
if verbose:
print("PRE LINKING EVALUATION")
print()
rule_based_model.evaluate(citation_list, labels_path)
# convert the rule based candidates into the expected format for the linking model
print("Preparing rule based input...")
print(len(citation_list))
rule_based_input = create_linking_dataset.create_rule_based_input(citation_list)
print("Preparing sgtb input...")
sgtb_input, pub_ids = create_sgtb_dataset.create_dataset_input(rule_based_input, os.path.join(data_folder_path, "mention_context_cache.pkl"), data_folder_path, is_test=True)
print("Preparing xgboost input...")
xgb_input_X, xgb_input_y, xgb_pub_ids, xgb_dataset_ids = xgboost_linking.processed_docs_to_xgboost_dataset(sgtb_input, pub_ids)
# create a mapping of publication id to dataset ids predicted by the linking model
print("Making linking predictions...")
all_y_probs = linking_model.predict_proba(xgb_input_X)
linking_predictions = {}
for y_probs, pub_id, dataset_id in zip(all_y_probs, xgb_pub_ids, xgb_dataset_ids):
# 0 is the "no link" label.
if y_probs[0] > 0.75:
continue
if pub_id not in linking_predictions:
linking_predictions[pub_id] = defaultdict(float)
# add up linking probabilities when the same dataset id is linked with multiple
# mentions in the same paper.
assert y_probs[1] > -0.001 and y_probs[1] < 1.001
linking_predictions[pub_id][dataset_id] += y_probs[1]
# commented out code for working with the SGTB model as we are using XGBoost
# linking_predictions = {}
# for doc, pub_id in zip(sgtb_input, pub_ids):
# sgtb_idx_input = structured_learner.make_idx_data([doc])
# _, predictions = linking_model.predict(sgtb_idx_input[0], sgtb_idx_input[2], sgtb_idx_input[3])
# linking_predictions[pub_id] = set(predictions)
# filter the rule based candidates based on the results of the linking model
linking_filtered_citation_list = []
for citation in citation_list:
citation_dataset_id = str(citation["data_set_id"])
if citation["publication_id"] in linking_predictions and \
citation_dataset_id in linking_predictions[citation["publication_id"]]:
# update score.
citation['score'] = min(1., linking_predictions[citation["publication_id"]][citation_dataset_id])
linking_filtered_citation_list.append(citation)
print()
print("POST LINKING EVALUATION")
rule_based_model.evaluate(linking_filtered_citation_list, labels_path)
return linking_filtered_citation_list
def generate_citations_from_ner_mentions(ner_mentions: List[Dict[str, Union[int, str, float]]],
kb_path: str):
"""Generate candidate citations for the mentions produced by the ner model by using TFIDF
weighted overlap with dataset titles
@param ner_mentions: list of the ner_mentions
@param kb_path: path to the knowledge base of datasets
"""
nltk_stopwords = set(stopwords.words('english'))
scispacy_parser = scispacy_util.SciSpaCyParser()
substring_matches = set()
tfidf_vectorizer = text_utils.get_tfidf_vectorizer()
with open(kb_path) as kb_file_:
kb = json.load(kb_file_)
dataset_titles = []
tokenized_dataset_titles = []
dataset_ids = []
dataset_id_to_title = {}
for dataset in tqdm(kb, desc="processing kb"):
dataset_title = text_utils.text_preprocess(dataset["title"])
dataset_id = dataset["data_set_id"]
dataset_titles.append(dataset_title)
tokenized_dataset_titles.append(dataset_title.split(" "))
dataset_ids.append(dataset_id)
dataset_id_to_title[dataset_id] = dataset_title.split(" ")
output_citations = []
num_candidates = []
i = 0
mention_citations = []
for mention in tqdm(ner_mentions, desc="Generating candidates from ner mentions"):
publication_id = mention["publication_id"]
mention_text = mention["mention"]
instance = mention["instance"]
if len(instance) - len(mention_text.split()) < 5:
continue
if len(mention_text.split()) == 1 and not mention_text.isupper():
continue
parsed_sentence = scispacy_parser.scispacy_create_doc(' '.join(instance))
pos_counts = defaultdict(int)
for t in parsed_sentence:
pos_counts[t.pos_] += 1
if pos_counts["NOUN"] + pos_counts["VERB"] == 0:
continue
if (pos_counts["NUM"] + pos_counts["SYM"] + pos_counts["PUNCT"]) > 0.4*len(parsed_sentence) and pos_counts["VERB"] == 0:
continue
mention_citations.append({"publication_id": publication_id, "mention": mention_text, "score": mention["score"]})
mention_text = text_utils.text_preprocess(mention_text)
dataset_candidates = text_utils.get_substring_candidates(dataset_ids,
dataset_titles,
tokenized_dataset_titles,
mention_text,
instance,
nltk_stopwords,
scispacy_parser,
tfidf_vectorizer)
num_candidates.append(0)
sorted_candidates = []
for dataset_id, match_count in zip(dataset_candidates[0], dataset_candidates[1]):
sorted_candidates.append((dataset_id, match_count))
sorted_candidates = sorted(sorted_candidates, key = lambda x: x[1], reverse=True)
filtered_candidates = []
for candidate in sorted_candidates:
score = candidate[1]
if score > 0.0:
filtered_candidates.append((candidate[0], score))
for top_candidate in range(0, min(30, len(filtered_candidates))):
if sorted_candidates != []:
num_candidates[i] += 1
output_dict = {}
output_dict["publication_id"] = publication_id
output_dict["data_set_id"] = sorted_candidates[top_candidate][0]
output_dict["score"] = sorted_candidates[top_candidate][1]
output_dict["mention_list"] = [mention["mention"]]
output_citations.append(output_dict)
i += 1
print("Num mentions:", len(num_candidates))
print("Average candidates per mention:", np.mean(num_candidates))
print("Min, median, max candidates per mention:", np.min(num_candidates), np.median(num_candidates), np.max(num_candidates))
print("unique:", sum(np.unique(num_candidates, return_counts=True)[1]))
return output_citations, mention_citations
def main(dev_evaluation, error_analysis, methods_only, holdout_eval, ner_only=False):
random.seed(2018)
train_path = os.path.abspath(os.path.join("project", "dataset_split_data", "train"))
dev_path = os.path.abspath(os.path.join("project", "dataset_split_data", "dev"))
kb_path = os.path.abspath(os.path.join("project", "data", "data_sets.json"))
sage_methods_path = os.path.abspath(os.path.join("project", "data", "sage_research_methods.json"))
leipzig_word_counts_path = os.path.abspath(os.path.join("project", "data", "eng_wikipedia_2016_1M-words.txt"))
test_path = os.path.abspath(os.path.join("data"))
sgtb_model = joblib.load(os.path.abspath(os.path.join("project", "linking_models", "linking_model_v4.pkl")))["clf"]
xgboost_model = joblib.load(os.path.abspath(os.path.join("project", "linking_models", "xgboost_linking_model_v24_full.pkl")))["clf"]
ner_model_path = os.path.abspath(os.path.join("project", "ner_model", "tweaked_model.tar.gz"))
if holdout_eval:
print("HOLDOUT SET EVALUATION - citations")
holdout_ner_model = NerModel(os.path.abspath(os.path.join("project", "holdout_sampled", "ner-conll")), ner_model_path)
# commented out code recreates the ner output for the sampled holdout set, this would need to happen if the ner model changes
# or the holdout sample changes, but does not impact the submission code path
holdout_ner_predictions = holdout_ner_model.predict_from_publication_list()
with open(os.path.abspath(os.path.join("project", "holdout_sampled", "ner_output.json")), "w") as ner_output_file_:
json.dump(holdout_ner_predictions, ner_output_file_)
with open(os.path.abspath(os.path.join("project", "holdout_sampled", "ner_output_with_instances.json"))) as ner_output_file_:
holdout_ner_predictions = json.load(ner_output_file_)
ner_predicted_citations, ner_predicted_mentions = generate_citations_from_ner_mentions(holdout_ner_predictions, kb_path)
with open(os.path.abspath(os.path.join("project", "holdout_sampled", "ner_candidates.json")), "w") as fp:
json.dump(ner_predicted_citations, fp)
holdout_rule_based_model = RuleBasedModel(train_path, dev_path, kb_path, test_path)
holdout_publications_path = os.path.abspath(os.path.join("project", "holdout_sampled", "publications.json"))
holdout_labels_path = os.path.abspath(os.path.join("project", "holdout_sampled", "data_set_citations.json"))
holdout_rule_based_output_path = os.path.abspath(os.path.join("project", "holdout_sampled", "rule_based_output_v10.pkl"))
holdout_predict_input_path = os.path.abspath(os.path.join("project", "holdout_sampled"))
holdout_predictions = perform_evaluation(holdout_rule_based_model,
xgboost_model,
holdout_publications_path,
holdout_labels_path,
holdout_predict_input_path,
ner_predicted_citations,
rule_based_input_path=holdout_rule_based_output_path,
rule_based_output_path=holdout_rule_based_output_path,
predict_input_path=holdout_predict_input_path)
return
if not ner_only:
# Predict methods.
print("Predicting methods...")
method_extractor = MethodExtractor(train_path, dev_path, sage_methods_path, leipzig_word_counts_path)
output_file_path = os.path.abspath(os.path.join("data", "output", "methods.json"))
test_publications_path = os.path.abspath(os.path.join("data", "input", "publications.json"))
test_predict_input_path = os.path.abspath(os.path.join("data"))
with open(test_publications_path) as publications_file, open(output_file_path, 'w') as output_file:
json_publications = json.load(publications_file)
method_predictions = method_extractor.predict_from_publications_list(json_publications, test_predict_input_path)
json.dump(method_predictions, output_file)
if methods_only: return
if dev_evaluation and not ner_only:
print("DEV SET EVALUATION - citations")
dev_rule_based_model = RuleBasedModel(train_path, dev_path, kb_path)
dev_publications_path = os.path.abspath(os.path.join("project", "data", "dev", "publications.json"))
dev_labels_path = os.path.abspath(os.path.join("project", "data", "dev", "data_set_citations.json"))
dev_rule_based_output_path = os.path.abspath(os.path.join("project", "data", "dev", "rule_based_output_v10.pkl"))
dev_predict_input_path = os.path.abspath(os.path.join("project", "data", "dev"))
dev_predictions = perform_evaluation(dev_rule_based_model,
xgboost_model,
dev_publications_path,
dev_labels_path,
dev_predict_input_path,
rule_based_input_path=None,
rule_based_output_path=dev_rule_based_output_path,
predict_input_path=dev_predict_input_path)
print("TEST SET EVALUATION - citations")
test_predictions_mentions = []
test_publications_path = os.path.abspath(os.path.join("data", "input", "publications.json"))
test_labels_path = os.path.abspath(os.path.join("rich-context-competition", "evaluate", "data_set_citations.json"))
test_rule_based_output_path = os.path.abspath(os.path.join("project", "data", "test", "rule_based_output_v10.pkl"))
test_predict_input_path = os.path.abspath(os.path.join("data"))
# make additional dataset mention predictions using the trained NER model
model = NerModel("/data/ner-conll", ner_model_path)
ner_predictions_list = model.predict_from_publication_list()
ner_predicted_citations, ner_predicted_mentions = generate_citations_from_ner_mentions(ner_predictions_list, kb_path)
test_rule_based_model = RuleBasedModel(train_path, dev_path, kb_path, test_path)
test_predictions_original = perform_evaluation(test_rule_based_model,
xgboost_model,
test_publications_path,
test_labels_path,
test_predict_input_path,
ner_predicted_citations,
rule_based_input_path=None,
rule_based_output_path=test_rule_based_output_path,
predict_input_path=test_predict_input_path)
test_predictions_dict = {}
pub_dataset_to_longest_mention_list = {}
for prediction in test_predictions_original:
dataset_id = prediction["data_set_id"]
publication_id = prediction["publication_id"]
mention_list = prediction["mention_list"]
mention_list_length = len(mention_list)
key = str(dataset_id) + "_" + str(publication_id)
if key in test_predictions_dict:
if mention_list_length > pub_dataset_to_longest_mention_list[key]:
pub_dataset_to_longest_mention_list[key] = mention_list_length
test_predictions_dict[key] = prediction.copy()
else:
pub_dataset_to_longest_mention_list[key] = mention_list_length
test_predictions_dict[key] = prediction.copy()
test_predictions = []
for prediction in test_predictions_dict:
test_predictions.append(test_predictions_dict[prediction])
# write dataset citation predictions to file
output_file_path = os.path.abspath(os.path.join("data", "output", "data_set_citations.json"))
with open(output_file_path, "w") as output_file:
json.dump(test_predictions, output_file)
print("Predicting dataset mentions...")
# build up list of predicted mentions from predicted citations
pub_id_to_mention = {}
for test_prediction in test_predictions:
output_mention_base = test_prediction.copy()
pub_id = output_mention_base["publication_id"]
output_mention_base.pop("mention_list")
output_mention_base.pop("data_set_id")
for mention in test_prediction["mention_list"]:
output_mention = output_mention_base.copy()
output_mention["mention"] = mention
if pub_id in pub_id_to_mention:
if mention in pub_id_to_mention[pub_id]:
continue
else:
pub_id_to_mention[pub_id].add(mention)
else:
pub_id_to_mention[pub_id] = set([mention])
test_predictions_mentions.append(output_mention)
# write dataset mention predictions to file
test_predictions_mentions += ner_predicted_mentions
output_file_path_mentions = os.path.abspath(os.path.join("data", "output", "data_set_mentions.json"))
with open(output_file_path_mentions, "w") as output_file:
json.dump(test_predictions_mentions, output_file)
if ner_only: return
print("Predicting research fields...")
# predict field of study of publications using a trained AllenNLP model
l0_archive = load_archive(
os.path.abspath(os.path.join("project", "data", "model_logs", "l0_model.tar.gz"))
)
l0_predictor = Predictor.from_archive(l0_archive, 'classifier')
l1_archive = load_archive(
os.path.abspath(os.path.join("project", "data", "model_logs", "l1_model.tar.gz"))
)
l1_predictor = Predictor.from_archive(l1_archive, 'classifier')
with open(test_publications_path, 'r') as file_:
test_pubs = json.load(file_)
clf_output = []
l0_label_map = l0_archive.model.vocab.get_index_to_token_vocabulary("labels")
l1_label_map = l1_archive.model.vocab.get_index_to_token_vocabulary("labels")
for test_pub in tqdm(test_pubs, desc="Predicting research fields"):
if test_pub['title'] == '':
continue
l0_prediction = l0_predictor.predict_json({"title": test_pub['title']})
l1_prediction = l1_predictor.predict_json({"title": test_pub['title']})
pred = {}
pred['publication_id'] = test_pub['publication_id']
l0_score = np.max(l0_prediction['label_probs'])
l1_score = np.max(l1_prediction['label_probs'])
l0_field = l0_label_map[np.argmax(l0_prediction['label_probs'])]
l1_field = l1_label_map[np.argmax(l1_prediction['label_probs'])]
if l1_score > 0.4 and l0_field != l1_field:
output_score = round((float(l0_score) + float(l1_score))/2., 2)
output_field = "{}:{}".format(l0_field, l1_field)
else:
output_score = round(float(l0_score), 2)
output_field = "{}".format(l0_field)
pred['score'] = output_score
pred['research_field'] = output_field
clf_output.append(pred)
# write predictions for research fields to file
output_path = os.path.abspath(os.path.join("data", "output", "research_fields.json"))
with open(output_path, "w") as output_file:
json.dump(clf_output, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--dev_evaluation',
action='store_true',
help="Whether to perform evaluation on the dev set in addition to the test set"
)
parser.add_argument(
'--error_analysis',
action='store_true'
)
parser.add_argument(
'--methods-only',
action='store_true',
help="Only predict methods then halt."
)
parser.add_argument(
'--holdout_eval',
action='store_true',
help="Evaluate on the phase 1 holdout set"
)
parser.add_argument(
'--ner_only',
action='store_true',
help="Only output the mentions file then halt"
)
parser.set_defaults(dev_evaluation=False,
error_analysis=False,
methods_only=False,
holdout_eval=False,
ner_only=False)
args = parser.parse_args()
main(args.dev_evaluation,
args.error_analysis,
args.methods_only,
args.holdout_eval,
args.ner_only)
| coleridge-rich-context-ai2-master | project/project.py |
"""This file can be run to convert the test files to conll format, saved in the ner-conll folder.
This is mostly copy pasted from to_conll.py as a quick workaround to run the conll parsing code
at test time. A cleaner implementation would not need this file, and would just make use of
to_conll.py
"""
import os
import json
from sklearn.externals import joblib
from s2base import scispacy_util
from tqdm import tqdm
import re
from create_sgtb_dataset import get_scispacy_doc
import logging
logging.basicConfig(level=logging.ERROR)
# the path to the test publications.json
PUB_PATH = os.path.abspath(os.path.join("data", "input", "publications.json"))
# the path to the test text files
TEXT_FILES_PATH = os.path.abspath(os.path.join("data", "input", "files", "text"))
# an instance of SciSpaCyParser
SCISPACY_PARSER = scispacy_util.SciSpaCyParser()
def create_conll_line(token):
"""Create one line of the output conll file
@param token: the token for the line being created
@param match_index_to_tag: the dictionary mapping token index to entity tag
"""
word = token.text
pos = token.pos_
tag = "O"
linking_tag = "_"
entity_tag = "O"
output_line = word + " " + pos + " " + tag + " " + entity_tag
extraction_line = output_line
linking_line = output_line + " " + linking_tag
return extraction_line, linking_line
def create_conll_sentence(sentence):
"""Creates one sentence of the output conll file
@param sentence: the spacy sentence for the sentence being created
@param match_index_to_tag: the dictionary mapping token index to entity tag
"""
extraction_sentence = ""
linking_sentence = ""
for token in sentence:
# spacy includes space tokens, which we can safely ignore
if token.pos_ == "SPACE" or token.text == "\n" or token.text == " ":
continue
extraction_line, linking_line = create_conll_line(token)
extraction_sentence += extraction_line + "\n"
linking_sentence += linking_line + "\n"
return extraction_sentence, linking_sentence
def create_conll_text(doc):
"""Creates one document of conll output
@param doc: the spacy doc to process
@param match_index_to_tag: the dictionary mapping token index to entity tag
"""
extraction_text = ""
linking_text = ""
prev_sent = None
for sent in doc.sents:
extraction_sentence, linking_sentence = create_conll_sentence(sent)
# conll format includes an extra new line between each sentence
# we will omit the line (merge sentences) if an entity spans sentences due to a spacy
# sentence splitting error
strip_new_line = False
if prev_sent and prev_sent.endswith("-DATA\n"):
# if previous sentence ends with -DATA, search for the end of the first token in
# the next sentence and see if it ends with -DATA
for i in range(len(extraction_sentence)):
if extraction_sentence[i] == "\n" and extraction_sentence[i-5:i] == "-DATA":
strip_new_line = True
break
if strip_new_line:
extraction_text = extraction_text[:-1]
extraction_text += extraction_sentence + "\n"
linking_text += linking_sentence + "\n"
prev_sent = extraction_sentence
return extraction_text, linking_text
def parse_publication(publication):
"""Parses one raw text file into conll format and writes to
../conll/<publication_id>_<extraction or linking>.conll
@param publication: the json publication being processed
"""
publication_id = publication["publication_id"]
datasets_and_mentions = []
publication_text_path = os.path.join(TEXT_FILES_PATH, str(publication_id) + ".txt")
with open(publication_text_path) as publication_text_file:
full_text = publication_text_file.read()
doc = get_scispacy_doc(os.path.join(TEXT_FILES_PATH, os.pardir, os.pardir, os.pardir), publication_id, SCISPACY_PARSER)
if not os.path.isdir(os.path.join(TEXT_FILES_PATH, os.pardir, os.pardir, os.pardir, "ner-conll")):
os.makedirs(os.path.join(TEXT_FILES_PATH, os.pardir, os.pardir, os.pardir, "ner-conll"))
extraction_file_path = os.path.join(TEXT_FILES_PATH,
os.pardir,
os.pardir,
os.pardir,
"ner-conll",
str(publication_id) +
"_" +
"extraction" +
".conll")
extraction_text, _ = create_conll_text(doc)
with open(extraction_file_path, "w") as publication_conll_file:
publication_conll_file.write(extraction_text)
def parse_text_files_to_conll_format():
"""Parses all the input text files into conll format and writes them to ../conll
"""
# parse each text file into a conll file
with open(PUB_PATH) as json_publications_file:
publications = json.load(json_publications_file)
with joblib.Parallel(n_jobs=os.cpu_count() - 1) as pool:
pool(joblib.delayed(parse_publication)(publications[i])
for i in tqdm(range(len(publications)), desc='convert text files to conll format in to_conll_test.py'))
#for i in tqdm(range(len(publications)), desc='convert text files to conll format in to_conll_test.py'):
# parse_publication(publications[i])
if __name__ == "__main__":
parse_text_files_to_conll_format()
| coleridge-rich-context-ai2-master | project/to_conll_test.py |
import xgboost as xgb
import os
import json
from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score, f1_score, precision_recall_curve
import numpy as np
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import PredefinedSplit, RandomizedSearchCV
import random
def processed_docs_to_xgboost_dataset(processed_docs, input_pub_ids=None):
"""Turns the list of processed docs that the sgtb model expects into
binary classification input for sklearn
@param processed_docs: list of processed docs in the format that the sgtb model expects
@param input_pub_ids: (optional) a list of publication ids for each doc, only needed at test time
"""
feature_idx_to_name = {0: "p(e)",
1: "p(e|m)",
2: "p(m|e)",
3: "year_match",
4: "m_length_chars",
5: "m_length_tokens",
6: "max_length_sentence",
7: "min_length_sentence",
8: "is_acronym",
9: "background",
10: "methods",
11: "results",
12: "abstract",
13: "intro",
14: "introduction",
15: "keywords",
16: "objectives",
17: "conclusion",
18: "measures",
19: "discussion",
20: "method",
21: "references",
22: "contribution",
23: "data",
24: "no_section_found",
25: "context_word_overlap",
26: "score",
27: "is_null_candidate"}
feature_idx_to_aggregation_method = {0: np.max,
1: np.max,
2: np.max,
3: np.max,
4: np.max,
5: np.max,
6: np.max,
7: np.min,
8: np.mean,
25: np.max,
26: np.max,
27: np.max,}
for i in range(9, 25):
feature_idx_to_aggregation_method[i] = np.max
idx_dataset_to_X = {}
idx_dataset_to_y = {}
dataset_ids = []
output_pub_ids = []
idx_dataset_to_predicted_dataset = {}
for i, doc in enumerate(processed_docs):
for mention_candidate in doc:
mention = mention_candidate[0]
entity_candidates = mention_candidate[1]
for entity_candidate in entity_candidates:
entity_id, label = entity_candidate[0]
features = entity_candidate[1]
if entity_id == "NULL":
continue
key = str(i) + "_" + str(entity_id)
idx_dataset_to_predicted_dataset[key] = entity_id
if key in idx_dataset_to_X:
idx_dataset_to_X[key].append(features)
else:
idx_dataset_to_X[key] = [features]
idx_dataset_to_y[key] = label
X = []
y = []
for idx_dataset in idx_dataset_to_X:
idx = idx_dataset.split("_")[0]
dataset_id = idx_dataset.split("_")[1]
np_feature_array = np.array(idx_dataset_to_X[idx_dataset])
combined_features = [feature_idx_to_aggregation_method[i](np_feature_array[:, i]) for i in range(np_feature_array.shape[1]-1)]
X.append(combined_features)
y.append(idx_dataset_to_y[idx_dataset])
dataset_ids.append(idx_dataset_to_predicted_dataset[idx_dataset])
if input_pub_ids != None:
output_pub_ids.append(input_pub_ids[int(idx)])
return np.array(X), np.array(y), output_pub_ids, dataset_ids
def main():
train_sgtb_path = os.path.abspath(os.path.join("project", "dataset_split_data", "train", "sgtb_scores.json"))
dev_sgtb_path = os.path.abspath(os.path.join("project", "dataset_split_data", "dev", "sgtb_scores.json"))
test_sgtb_path = os.path.abspath(os.path.join("project", "dataset_split_data", "test", "sgtb_scores.json"))
train_processed_docs = []
with open(train_sgtb_path, 'rb') as train_sgtb_file:
for line in train_sgtb_file:
train_processed_docs.append(json.loads(line.strip()))
dev_processed_docs = []
with open(dev_sgtb_path, 'rb') as dev_sgtb_file:
for line in dev_sgtb_file:
dev_processed_docs.append(json.loads(line.strip()))
test_processed_docs = []
with open(test_sgtb_path, 'rb') as test_sgtb_file:
for line in test_sgtb_file:
test_processed_docs.append(json.loads(line.strip()))
train_dataset = processed_docs_to_xgboost_dataset(train_processed_docs)
dev_dataset = processed_docs_to_xgboost_dataset(dev_processed_docs)
test_dataset = processed_docs_to_xgboost_dataset(test_processed_docs)
train_dmatrix = xgb.DMatrix(data=train_dataset[0], label=train_dataset[1])
dev_dmatrix = xgb.DMatrix(data=dev_dataset[0], label=dev_dataset[1])
test_dmatrix = xgb.DMatrix(data=test_dataset[0], label=test_dataset[1])
np.set_printoptions(threshold=np.nan)
def eval_metric(preds, d):
labels = d.get_label()
return 'roc', -1*roc_auc_score(labels, preds)
train_dev = (np.vstack((train_dataset[0], dev_dataset[0])), np.hstack((train_dataset[1], dev_dataset[1])))
val_fold = np.hstack((-1*np.ones(train_dataset[0].shape[0]), np.zeros(dev_dataset[0].shape[0])))
predefined_fold = PredefinedSplit(val_fold.astype(int))
# specify parameters and distributions to sample from
param_dist = {"max_depth": range(2, 8),
"learning_rate": [10**x for x in range(-1, 0)],
"n_estimators": range(1, 50),
"colsample_by_tree": np.linspace(0.1, 0.5, 5),
"min_child_weight": range(5, 11)}
base_clf = xgb.XGBClassifier(objective="binary:logistic",
silent=True)
seed = 2345345
n_iter_search = 100
search_clf = RandomizedSearchCV(base_clf, param_distributions=param_dist,
n_iter=n_iter_search, cv=predefined_fold,
n_jobs=-1, verbose=2, random_state=seed)
search_clf.fit(train_dev[0], train_dev[1])
# xgb_clf.fit(train_dataset[0],
# train_dataset[1],
# eval_set = [(train_dataset[0], train_dataset[1]), (dev_dataset[0], dev_dataset[1])],
# eval_metric=eval_metric,
# verbose=True,
# early_stopping_rounds=10)
print(search_clf.best_score_)
print(search_clf.best_params_)
xgb_clf = xgb.XGBClassifier(objective="binary:logistic",
silent=True,
**search_clf.best_params_)
xgb_clf.fit(train_dataset[0], train_dataset[1])
print(xgb_clf.feature_importances_)
test_pred = xgb_clf.predict_proba(test_dataset[0])
dev_pred = xgb_clf.predict_proba(dev_dataset[0])
train_pred = xgb_clf.predict_proba(train_dataset[0])
test_pred_threshold = xgb_clf.predict(test_dataset[0])
dev_pred_threshold = xgb_clf.predict(dev_dataset[0])
train_pred_threshold = xgb_clf.predict(train_dataset[0])
test_pred = [probs[1] for probs in test_pred]
dev_pred = [probs[1] for probs in dev_pred]
train_pred = [probs[1] for probs in train_pred]
print(roc_auc_score(test_dataset[1], test_pred))
print(roc_auc_score(dev_dataset[1], dev_pred))
print(roc_auc_score(train_dataset[1], train_pred))
print("test")
print(precision_score(test_dataset[1], test_pred_threshold))
print(recall_score(test_dataset[1], test_pred_threshold))
print(f1_score(test_dataset[1], test_pred_threshold))
# print(precision_recall_curve(test_dataset[1], test_pred))
print("dev")
print(precision_score(dev_dataset[1], dev_pred_threshold))
print(recall_score(dev_dataset[1], dev_pred_threshold))
print(f1_score(dev_dataset[1], dev_pred_threshold))
print("train")
print(precision_score(train_dataset[1], train_pred_threshold))
print(recall_score(train_dataset[1], train_pred_threshold))
print(f1_score(train_dataset[1], train_pred_threshold))
print(xgb_clf.feature_importances_)
model_name = "xgboost_linking_model_v24"
output_model_path = os.path.abspath(os.path.join("project", "linking_models", model_name + ".pkl"))
joblib.dump({"clf": xgb_clf}, output_model_path)
output_model_path_full = os.path.abspath(os.path.join("project", "linking_models", model_name + "_full.pkl"))
xgb_clf.fit(train_dev[0], train_dev[1])
test_pred_threshold = xgb_clf.predict(test_dataset[0])
dev_pred_threshold = xgb_clf.predict(dev_dataset[0])
train_pred_threshold = xgb_clf.predict(train_dataset[0])
print(f1_score(train_dataset[1], train_pred_threshold))
print(f1_score(dev_dataset[1], dev_pred_threshold))
print(f1_score(test_dataset[1], test_pred_threshold))
joblib.dump({"clf": xgb_clf}, output_model_path_full)
train_dev_test = (np.vstack((train_dataset[0], dev_dataset[0], test_dataset[0])), np.hstack((train_dataset[1], dev_dataset[1], test_dataset[1])))
output_model_path_full_test = os.path.abspath(os.path.join("project", "linking_models", model_name + "_full_test.pkl"))
xgb_clf.fit(train_dev_test[0], train_dev_test[1])
test_pred_threshold = xgb_clf.predict(test_dataset[0])
dev_pred_threshold = xgb_clf.predict(dev_dataset[0])
train_pred_threshold = xgb_clf.predict(train_dataset[0])
print(f1_score(train_dataset[1], train_pred_threshold))
print(f1_score(dev_dataset[1], dev_pred_threshold))
print(f1_score(test_dataset[1], test_pred_threshold))
joblib.dump({"clf": xgb_clf}, output_model_path_full_test)
if __name__ == "__main__":
main() | coleridge-rich-context-ai2-master | project/xgboost_linking.py |
"""Script to write all the needed files to a new folder based on splits provided in text files"""
import os
import json
from typing import Dict, Union, List
from collections import defaultdict
from tqdm import tqdm
def load_all_publications(old_base_path: str):
train_path = os.path.join(old_base_path, "train")
train_pubs_path = os.path.join(train_path, "publications.json")
train_citations_path = os.path.join(train_path, "data_set_citations.json")
dev_path = os.path.join(old_base_path, "dev")
dev_pubs_path = os.path.join(dev_path, "publications.json")
dev_citations_path = os.path.join(dev_path, "data_set_citations.json")
test_path = os.path.join(old_base_path, "test")
test_pubs_path = os.path.join(test_path, "publications.json")
test_citations_path = os.path.join(test_path, "data_set_citations.json")
with open(train_pubs_path) as fp:
train_pubs = json.load(fp)
with open(train_citations_path) as fp:
train_citations = json.load(fp)
with open(dev_pubs_path) as fp:
dev_pubs = json.load(fp)
with open(dev_citations_path) as fp:
dev_citations = json.load(fp)
with open(test_pubs_path) as fp:
test_pubs = json.load(fp)
with open(test_citations_path) as fp:
test_citations = json.load(fp)
all_pubs = train_pubs + dev_pubs + test_pubs
all_citations = train_citations + dev_citations + test_citations
pub_id_to_pub = {}
pub_id_to_citation = defaultdict(list)
for pub_entry in all_pubs:
publication_id = pub_entry["publication_id"]
pub_id_to_pub[publication_id] = pub_entry
for citation_entry in all_citations:
publication_id = citation_entry["publication_id"]
pub_id_to_citation[publication_id].append(citation_entry)
return pub_id_to_pub, pub_id_to_citation
def make_split_folder(papers: set,
new_folder_path: str,
old_base_path: str,
all_pubs: Dict[str, Union[str, int]],
all_citations: Dict[str, Union[int, List, float]]):
os.system("mkdir {}".format(new_folder_path))
linking_conll_path = os.path.join(new_folder_path, "linking-conll")
ner_conll_path = os.path.join(new_folder_path, "ner-conll")
input_path = os.path.join(new_folder_path, "input")
files_path = os.path.join(input_path, "files")
pdf_path = os.path.join(files_path, "pdf")
text_path = os.path.join(files_path, "text")
os.system("mkdir {}".format(linking_conll_path))
os.system("mkdir {}".format(ner_conll_path))
os.system("mkdir {}".format(input_path))
os.system("mkdir {}".format(files_path))
os.system("mkdir {}".format(pdf_path))
os.system("mkdir {}".format(text_path))
new_publications = []
new_citations = []
for paper in tqdm(papers):
text_file_name = paper + ".txt"
pdf_file_name = paper + ".pdf"
ner_file_name = paper + "_extraction.conll"
linking_file_name = paper + "_linking.conll"
publication_id = int(paper)
publication_entry = all_pubs[publication_id]
citation_entries = all_citations[publication_id]
new_publications.append(publication_entry)
new_citations += citation_entries
if os.path.isfile(os.path.join(old_base_path, "train", "input", "files", "text", text_file_name)):
folder_path = os.path.join(old_base_path, "train")
elif os.path.isfile(os.path.join(old_base_path, "dev", "input", "files", "text", text_file_name)):
folder_path = os.path.join(old_base_path, "dev")
else:
folder_path = os.path.join(old_base_path, "test")
os.system("cp {} {}".format(os.path.join(folder_path, "input", "files", "text", text_file_name),
os.path.join(text_path, text_file_name)))
os.system("cp {} {}".format(os.path.join(folder_path, "input", "files", "pdf", pdf_file_name),
os.path.join(pdf_path, pdf_file_name)))
os.system("cp {} {}".format(os.path.join(folder_path, "ner-conll", ner_file_name),
os.path.join(ner_conll_path, ner_file_name)))
os.system("cp {} {}".format(os.path.join(folder_path, "linking-conll", linking_file_name),
os.path.join(linking_conll_path, linking_file_name)))
with open(os.path.join(new_folder_path, "publications.json"), "w") as fp:
json.dump(new_publications, fp, indent=4)
with open(os.path.join(new_folder_path, "data_set_citations.json"), "w") as fp:
json.dump(new_citations, fp, indent=4)
def load_papers_set_from_file(path: str) -> set:
paper_ids = set()
with open(path) as fp:
line = fp.readline()
while line:
paper_ids.add(line.rstrip())
line = fp.readline()
return paper_ids
def main():
old_folder_base_path = os.path.abspath(os.path.join("project", "data"))
new_folder_path = os.path.abspath(os.path.join("project", "dataset_split_data"))
new_split_path = os.path.abspath(os.path.join("project", "ner_retraining", "data"))
train_papers_path = os.path.join(new_split_path, "train_papers.txt")
dev_papers_path = os.path.join(new_split_path, "dev_papers.txt")
test_papers_path = os.path.join(new_split_path, "test_papers.txt")
train_papers = load_papers_set_from_file(train_papers_path)
dev_papers = load_papers_set_from_file(dev_papers_path)
test_papers = load_papers_set_from_file(test_papers_path)
pub_id_to_pub, pub_id_to_citation = load_all_publications(old_folder_base_path)
make_split_folder(train_papers,
os.path.join(new_folder_path, "train"),
old_folder_base_path,
pub_id_to_pub,
pub_id_to_citation)
make_split_folder(dev_papers,
os.path.join(new_folder_path, "dev"),
old_folder_base_path,
pub_id_to_pub,
pub_id_to_citation)
make_split_folder(test_papers,
os.path.join(new_folder_path, "test"),
old_folder_base_path,
pub_id_to_pub,
pub_id_to_citation)
if __name__ == '__main__':
main() | coleridge-rich-context-ai2-master | project/create_dataset_split_folder.py |
coleridge-rich-context-ai2-master | project/ner_rcc/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.