python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
from overrides import overrides
from keras import backend as K
from keras.engine import InputSpec
from ..masked_layer import MaskedLayer
class BOWEncoder(MaskedLayer):
'''
Bag of Words Encoder takes a matrix of shape (num_words, word_dim) and returns a vector of size (word_dim),
which is an average of the (unmasked) rows in the input matrix. This could have been done using a Lambda
layer, except that Lambda layer does not support masking (as of Keras 1.0.7).
'''
def __init__(self, **kwargs):
self.input_spec = [InputSpec(ndim=3)]
# For consistency of handling sentence encoders, we will often get passed this parameter.
# We don't use it, but Layer will complain if it's there, so we get rid of it here.
kwargs.pop('units', None)
super(BOWEncoder, self).__init__(**kwargs)
@overrides
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[2]) # removing second dimension
@overrides
def call(self, inputs, mask=None):
# pylint: disable=redefined-variable-type
if mask is None:
return K.mean(inputs, axis=1)
else:
# Compute weights such that masked elements have zero weights and the remaining
# weight is ditributed equally among the unmasked elements.
# Mask (samples, num_words) has 0s for masked elements and 1s everywhere else.
# Mask is of type int8. While theano would automatically make weighted_mask below
# of type float32 even if mask remains int8, tensorflow would complain. Let's cast it
# explicitly to remain compatible with tf.
float_mask = K.cast(mask, 'float32')
# Expanding dims of the denominator to make it the same shape as the numerator, epsilon added to avoid
# division by zero.
# (samples, num_words)
weighted_mask = float_mask / (K.sum(float_mask, axis=1, keepdims=True) + K.epsilon())
if K.ndim(weighted_mask) < K.ndim(inputs):
weighted_mask = K.expand_dims(weighted_mask)
return K.sum(inputs * weighted_mask, axis=1) # (samples, word_dim)
@overrides
def compute_mask(self, inputs, mask=None):
# We need to override this method because Layer passes the input mask unchanged since this layer
# supports masking. We don't want that. After the input is averaged, we can stop propagating
# the mask.
return None
| deep_qa-master | deep_qa/layers/encoders/bag_of_words.py |
from keras import backend as K
from keras.engine import InputSpec
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...tensors.backend import switch
class PositionalEncoder(MaskedLayer):
'''
A ``PositionalEncoder`` is very similar to a kind of weighted bag of words encoder,
where the weighting is done by an index-dependent vector, not a scalar. If you think
this is an odd thing to do, it is. The original authors provide no real reasoning behind
the exact method other than it takes into account word order. This is here mainly to reproduce
results for comparison.
It takes a matrix of shape (num_words, word_dim) and returns a vector of size (word_dim),
which implements the following linear combination of the rows:
representation = sum_(j=1)^(n) { l_j * w_j }
where w_j is the j-th word representation in the sentence and l_j is a vector defined as follows:
l_kj = (1 - j)/m - (k/d)((1-2j)/m)
where:
- j is the word sentence index.
- m is the sentence length.
- k is the vector index(ie the k-th element of a vector).
- d is the dimension of the embedding.
- * represents element-wise multiplication.
This method was originally introduced in End-To-End Memory Networks(pg 4-5):
https://arxiv.org/pdf/1503.08895v5.pdf
'''
def __init__(self, **kwargs):
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
# For consistency of handling sentence encoders, we will often get passed this parameter.
# We don't use it, but Layer will complain if it's there, so we get rid of it here.
kwargs.pop('units', None)
super(PositionalEncoder, self).__init__(**kwargs)
@overrides
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[2]) # removing second dimension
@overrides
def call(self, inputs, mask=None):
# pylint: disable=redefined-variable-type
# This section implements the positional encoder on all the vectors at once.
# The general idea is to use ones matrices in the shape of `inputs` to create indexes per
# word.
if mask is None:
ones_like_x = K.ones_like(inputs)
else:
float_mask = K.cast(mask, 'float32')
ones_like_x = K.ones_like(inputs) * K.expand_dims(float_mask, 2)
# This is an odd way to get the number of words(ie the first dimension of inputs).
# However, if the input is masked, using the dimension directly does not
# equate to the correct number of words. We fix this by adding up a relevant
# row of ones which has been masked if required.
masked_m = K.expand_dims(K.sum(ones_like_x, 1), 1)
if mask is None:
one_over_m = ones_like_x / masked_m
j_index = K.cumsum(ones_like_x, 1)
else:
one_over_m = switch(ones_like_x, ones_like_x/masked_m, K.zeros_like(ones_like_x))
j_index = K.cumsum(ones_like_x, 1) * K.expand_dims(float_mask, 2)
k_over_d = K.cumsum(ones_like_x, 2) * 1.0/K.cast(K.shape(inputs)[2], 'float32')
l_weighting_vectors = (ones_like_x - (j_index * one_over_m)) - \
(k_over_d * (ones_like_x - 2 * j_index * one_over_m))
return K.sum(l_weighting_vectors * inputs, 1)
@overrides
def compute_mask(self, inputs, mask=None):
# We need to override this method because Layer passes the input mask unchanged since this
# layer supports masking. We don't want that. After the input is merged we can stop
# propagating the mask.
return None
| deep_qa-master | deep_qa/layers/encoders/positional_encoder.py |
from overrides import overrides
from keras.engine import InputSpec
from keras import backend as K
from keras.layers.recurrent import GRU, _time_distributed_dense
class AttentiveGru(GRU):
"""
GRUs typically operate over sequences of words. The motivation behind this encoding is that
a weighted average loses ordering information over it's inputs - for instance, this is important
in the BABI tasks.
See Dynamic Memory Networks for more information: https://arxiv.org/pdf/1603.01417v1.pdf.
This class extends the Keras Gated Recurrent Unit by implementing a method which substitutes
the GRU update gate (normally a vector, z - it is noted below where it is normally computed) for a scalar
attention weight (one per input, such as from the output of a softmax over the input vectors), which is
pre-computed. As mentioned above, instead of using word embedding sequences as input to the GRU,
we are using sentence encoding sequences.
The implementation of this class is subtle - it is only very slightly different from a standard GRU.
When it is initialised, the Keras backend will call the build method. It uses this to check that inputs being
passed to this function are the correct size, so we allow this to be the actual input size as normal.
However, for the internal implementation, everywhere where this global shape is used, we override it to be one
less, as we are passing in a tensor of shape (batch, knowledge_length, 1 + encoding_dim) as we are including
the attention mask. Therefore, we need all of the weights to have shape (*, encoding_dim),
NOT (*, 1 + encoding_dim). All of the below methods which are overridden use some
form of this dimension, so we correct them.
"""
def __init__(self, output_dim, input_length, **kwargs):
self.name = kwargs.pop('name')
super(AttentiveGru, self).__init__(output_dim,
input_length=input_length,
input_dim=output_dim + 1,
name=self.name, **kwargs)
@overrides
def step(self, inputs, states):
# pylint: disable=invalid-name
"""
The input to step is a tensor of shape (batch, 1 + encoding_dim), i.e. a timeslice of
the input to this AttentiveGRU, where the time axis is the knowledge_length.
Before we start, we strip off the attention from the beginning. Then we do the equations for a
normal GRU, except we don't calculate the output gate z, substituting the attention weight for
it instead.
Note that there is some redundancy here - for instance, in the GPU mode, we do a
larger matrix multiplication than required, as we don't use one part of it. However, for
readability and similarity to the original GRU code in Keras, it has not been changed. In each section,
there are commented out lines which contain code. If you were to uncomment these, remove the differences
in the input size and replace the attention with the z gate at the output, you would have a standard
GRU back again. We literally copied the Keras GRU code here, making some small modifications.
"""
attention = inputs[:, 0]
inputs = inputs[:, 1:]
h_tm1 = states[0] # previous memory
B_U = states[1] # dropout matrices for recurrent units
B_W = states[2]
if self.implementation == 2:
matrix_x = K.dot(inputs * B_W[0], self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
matrix_inner = K.dot(h_tm1 * B_U[0], self.recurrent_kernel[:, :2 * self.units])
x_r = matrix_x[:, self.units: 2 * self.units]
inner_r = matrix_inner[:, self.units: 2 * self.units]
# x_z = matrix_x[:, :self.units]
# inner_z = matrix_inner[:, :self.units]
# z = self.recurrent_activation(x_z + inner_z)
r = self.recurrent_activation(x_r + inner_r)
x_h = matrix_x[:, 2 * self.units:]
inner_h = K.dot(r * h_tm1 * B_U[0], self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + inner_h)
else:
if self.implementation == 0:
# x_z = inputs[:, :self.units]
x_r = inputs[:, self.units: 2 * self.units]
x_h = inputs[:, 2 * self.units:]
elif self.implementation == 1:
# x_z = K.dot(inputs * B_W[0], self.W_z) + self.b_z
x_r = K.dot(inputs * B_W[1], self.kernel_r)
x_h = K.dot(inputs * B_W[2], self.kernel_h)
if self.use_bias:
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
else:
raise Exception('Unknown implementation')
# z = self.recurrent_activation(x_z + K.dot(h_tm1 * B_U[0], self.U_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1 * B_U[1], self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * B_U[2], self.recurrent_kernel_h))
# Here is the KEY difference between a GRU and an AttentiveGRU. Instead of using
# a learnt output gate (z), we use a scalar attention vector (batch, 1) for this
# particular background knowledge vector.
h = K.expand_dims(attention, 1) * hh + (1 - K.expand_dims(attention, 1)) * h_tm1
return h, [h]
@overrides
def build(self, input_shape):
"""
This is used by Keras to verify things, but also to build the weights.
The only differences from the Keras GRU (which we copied exactly
other than the below) are:
We generate weights with dimension input_dim[2] - 1, rather than
dimension input_dim[2].
There are a few variables which are created in non-'gpu' modes which
are not required. These are commented out but left in for clarity below.
"""
new_input_shape = list(input_shape)
new_input_shape[2] -= 1
super(AttentiveGru, self).build(tuple(new_input_shape))
self.input_spec = [InputSpec(shape=input_shape)]
@overrides
def preprocess_input(self, inputs, training=None):
"""
We have to override this preprocessing step, because if we are using the cpu,
we do the weight - input multiplications in the internals of the GRU as separate,
smaller matrix multiplications and concatenate them after. Therefore, before this
happens, we split off the attention and then add it back afterwards.
"""
if self.implementation == 0:
attention = inputs[:, :, 0] # Shape:(samples, knowledge_length)
inputs = inputs[:, :, 1:] # Shape:(samples, knowledge_length, word_dim)
input_shape = self.input_spec[0].shape
input_dim = input_shape[2] - 1
timesteps = input_shape[1]
x_z = _time_distributed_dense(inputs, self.kernel_z, self.bias_z,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_r = _time_distributed_dense(inputs, self.kernel_r, self.bias_r,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_h = _time_distributed_dense(inputs, self.kernel_h, self.bias_h,
self.dropout, input_dim, self.units,
timesteps, training=training)
# Add attention back on to it's original place.
return K.concatenate([K.expand_dims(attention, 2), x_z, x_r, x_h], axis=2)
else:
return inputs
| deep_qa-master | deep_qa/layers/encoders/attentive_gru.py |
from keras import backend as K
from keras.layers import GRU, InputSpec
class ShareableGRU(GRU):
def __init__(self, *args, **kwargs):
super(ShareableGRU, self).__init__(*args, **kwargs)
def call(self, x, mask=None, **kwargs):
input_shape = K.int_shape(x)
res = super(ShareableGRU, self).call(x, mask, **kwargs)
self.input_spec = [InputSpec(shape=(self.input_spec[0].shape[0],
None,
self.input_spec[0].shape[2]))]
if K.ndim(x) == K.ndim(res):
# A recent change in Keras
# (https://github.com/fchollet/keras/commit/a9b6bef0624c67d6df1618ca63d8e8141b0df4d0)
# made it so that K.rnn with a tensorflow backend does not retain shape information for
# the sequence length, even if it's present in the input. We need to fix that here so
# that our models have the right shape information. A simple K.reshape is good enough
# to fix this.
result_shape = K.int_shape(res)
if input_shape[1] is not None and result_shape[1] is None:
shape = (input_shape[0] if input_shape[0] is not None else -1,
input_shape[1], result_shape[2])
res = K.reshape(res, shape=shape)
return res
| deep_qa-master | deep_qa/layers/encoders/shareable_gru.py |
from keras import backend as K
from keras import activations
from overrides import overrides
from .word_alignment import WordAlignmentEntailment
from ..attention import WeightedSum
from ...tensors.backend import switch, apply_feed_forward
class DecomposableAttentionEntailment(WordAlignmentEntailment):
"""
This layer is a reimplementation of the entailment algorithm described in
"A Decomposable Attention Model for Natural Language Inference", Parikh et
al., 2016. The algorithm has three main steps:
(1) Attend: Compute dot products between all pairs of projections of words
in the hypothesis and the premise, normalize those dot products to use
them to align each word in premise to a phrase in the hypothesis and
vice-versa. These alignments are then used to summarize the aligned
phrase in the other sentence as a weighted sum. The initial word
projections are computed using a feed forward NN, F.
(2) Compare: Pass a concatenation of each word in the premise and the
summary of its aligned phrase in the hypothesis through a feed forward
NN, G, to get a projected comparison. Do the same with the hypothesis
and the aligned phrase from the premise.
(3) Aggregate: Sum over the comparisons to get a single vector each for
premise-hypothesis comparison, and hypothesis-premise comparison. Pass
them through a third feed forward NN (H), to get the entailment
decision.
This layer can take either a tuple (premise, hypothesis) or a concatenation
of them as input.
Input:
- Tuple input: a premise sentence and a hypothesis sentence, both with shape ``(batch_size,
sentence_length, embed_dim)`` and masks of shape ``(batch_size, sentence_length)``
- Single input: a single tensor of shape ``(batch_size, sentence_length * 2, embed_dim)``, with
a mask of shape ``(batch_size, sentence_length * 2)``, which we will split in half to get the
premise and hypothesis sentences.
Output:
- Entailment decisions with the given ``output_dim``.
Parameters
----------
num_hidden_layers: int, optional (default=1)
Number of hidden layers in each of the feed forward neural nets described above.
hidden_layer_width: int, optional (default=50)
Width of each hidden layer in each of the feed forward neural nets described above.
hidden_layer_activation: str, optional (default='relu')
Activation for each hidden layer in each of the feed forward neural nets described above.
final_activation: str, optional (default='softmax')
Activation to use for the final output. Should almost certainly be 'softmax'.
output_dim: int, optional (default=3)
Dimensionality of the final output. If this is the last layer in your model, this needs to
be the same as the number of labels you have.
initializer: str, optional (default='uniform')
Will be passed to ``self.add_weight()`` for each of the weight matrices in the feed forward
neural nets described above.
Notes
-----
premise_length = hypothesis_length = sentence_length below.
"""
def __init__(self,
num_hidden_layers: int=1,
hidden_layer_width: int=50,
hidden_layer_activation: str='relu',
final_activation: str='softmax',
output_dim: int=3,
initializer: str='uniform',
**kwargs):
self.num_hidden_layers = num_hidden_layers
self.hidden_layer_width = hidden_layer_width
self.hidden_layer_activation = hidden_layer_activation
self.final_activation = final_activation
self.output_dim = output_dim
self.initializer = initializer
# Weights will be initialized in the build method.
self.premise_length = None
self.hypothesis_length = None
self.attend_weights = [] # weights related to F
self.compare_weights = [] # weights related to G
self.aggregate_weights = [] # weights related to H
self.scorer = None
super(DecomposableAttentionEntailment, self).__init__(**kwargs)
@overrides
def build(self, input_shape):
'''
This model has three feed forward NNs (F, G and H in the paper). We assume that all three
NNs have the same hyper-parameters: num_hidden_layers, hidden_layer_width and
hidden_layer_activation. That is, F, G and H have the same structure and activations. Their
actual weights are different, though. H has a separate softmax layer at the end.
'''
super(DecomposableAttentionEntailment, self).build(input_shape)
if isinstance(input_shape, list):
# input_shape is a list containing the shapes of the two inputs.
self.premise_length = input_shape[0][1]
self.hypothesis_length = input_shape[1][1]
# input_dim below is embedding dim for the model in the paper since they feed embedded
# input directly into this layer.
self.input_dim = input_shape[0][-1]
else:
# NOTE: This will probably fail silently later on in this code if your premise and
# hypothesis actually have different lengths.
self.premise_length = self.hypothesis_length = int(input_shape[1] / 2)
self.input_dim = input_shape[-1]
attend_input_dim = self.input_dim
compare_input_dim = 2 * self.input_dim
aggregate_input_dim = self.hidden_layer_width * 2
for i in range(self.num_hidden_layers):
self.attend_weights.append(self.add_weight(shape=(attend_input_dim, self.hidden_layer_width),
name='%s_attend_%d' % (self.name, i),
initializer=self.initializer))
self.compare_weights.append(self.add_weight(shape=(compare_input_dim, self.hidden_layer_width),
name='%s_compare_%d' % (self.name, i),
initializer=self.initializer))
self.aggregate_weights.append(self.add_weight(shape=(aggregate_input_dim, self.hidden_layer_width),
name='%s_aggregate_%d' % (self.name, i),
initializer=self.initializer))
attend_input_dim = self.hidden_layer_width
compare_input_dim = self.hidden_layer_width
aggregate_input_dim = self.hidden_layer_width
self.scorer = self.add_weight(shape=(self.hidden_layer_width, self.output_dim),
initializer=self.initializer,
name='%s_score' % self.name)
@overrides
def compute_output_shape(self, input_shape):
# (batch_size, 2)
if isinstance(input_shape, list):
return (input_shape[0][0], self.output_dim)
else:
return (input_shape[0], self.output_dim)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
return None
@overrides
def call(self, inputs, mask=None):
# premise_length = hypothesis_length in the following lines, but the names are kept separate to keep
# track of the axes being normalized.
# The inputs can be a two different tensors, or a concatenation. Hence, the conditional below.
if isinstance(inputs, list) or isinstance(inputs, tuple):
premise_embedding, hypothesis_embedding = inputs
# (batch_size, premise_length), (batch_size, hypothesis_length)
premise_mask, hypothesis_mask = mask
else:
premise_embedding = inputs[:, :self.premise_length, :]
hypothesis_embedding = inputs[:, self.premise_length:, :]
# (batch_size, premise_length), (batch_size, hypothesis_length)
premise_mask = None if mask is None else mask[:, :self.premise_length]
hypothesis_mask = None if mask is None else mask[:, self.premise_length:]
if premise_mask is not None:
premise_embedding = switch(K.expand_dims(premise_mask), premise_embedding,
K.zeros_like(premise_embedding))
if hypothesis_mask is not None:
hypothesis_embedding = switch(K.expand_dims(hypothesis_mask), hypothesis_embedding,
K.zeros_like(hypothesis_embedding))
activation = activations.get(self.hidden_layer_activation)
# (batch_size, premise_length, hidden_dim)
projected_premise = apply_feed_forward(premise_embedding, self.attend_weights, activation)
# (batch_size, hypothesis_length, hidden_dim)
projected_hypothesis = apply_feed_forward(hypothesis_embedding, self.attend_weights, activation)
## Step 1: Attend
p2h_alignment = self._align(projected_premise, projected_hypothesis, premise_mask, hypothesis_mask)
# beta in the paper (equation 2)
# (batch_size, premise_length, emb_dim)
p2h_attention = self._attend(hypothesis_embedding, p2h_alignment)
h2p_alignment = self._align(projected_hypothesis, projected_premise, hypothesis_mask, premise_mask)
# alpha in the paper (equation 2)
# (batch_size, hyp_length, emb_dim)
h2p_attention = self._attend(premise_embedding, h2p_alignment)
## Step 2: Compare
# Equation 3 in the paper.
compared_premise = self._compare(premise_embedding, p2h_attention)
compared_hypothesis = self._compare(hypothesis_embedding, h2p_attention)
## Step 3: Aggregate
# Equations 4 and 5.
# (batch_size, hidden_dim * 2)
aggregated_input = K.concatenate([K.sum(compared_premise, axis=1), K.sum(compared_hypothesis, axis=1)])
# (batch_size, hidden_dim)
input_to_scorer = apply_feed_forward(aggregated_input, self.aggregate_weights, activation)
# (batch_size, 2)
final_activation = activations.get(self.final_activation)
scores = final_activation(K.dot(input_to_scorer, self.scorer))
return scores
@staticmethod
def _attend(target_embedding, s2t_alignment):
'''
Takes target embedding, and source-target alignment attention and produces a weighted average of the
target embedding per each source word.
target_embedding: (batch_size, target_length, embed_dim)
s2t_alignment: (batch_size, source_length, target_length)
'''
# NOTE: This Layer was written before we had things like WeightedSum. We could probably
# implement this whole thing a lot more easily now, but I'm just replacing bits of it at a
# time.
return WeightedSum().call([target_embedding, s2t_alignment])
def _compare(self, source_embedding, s2t_attention):
'''
Takes word embeddings from a sentence, and aggregated representations of words aligned to each of those
words from another sentence, and returns a projection of their concatenation.
source_embedding: (batch_size, source_length, embed_dim)
s2t_attention: (batch_size, source_length, embed_dim)
'''
activation = activations.get(self.hidden_layer_activation)
comparison_input = K.concatenate([source_embedding, s2t_attention])
# Equation 3 in the paper.
compared_representation = apply_feed_forward(comparison_input, self.compare_weights, activation)
return compared_representation
@overrides
def get_config(self):
config = {
'num_hidden_layers': self.num_hidden_layers,
'hidden_layer_width': self.hidden_layer_width,
'hidden_layer_activation': self.hidden_layer_activation,
'final_activation': self.final_activation,
'output_dim': self.output_dim,
'initializer': self.initializer,
}
base_config = super(DecomposableAttentionEntailment, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/entailment_models/decomposable_attention.py |
'''
Word alignment entailment models operate on word level representations, and define alignment
as a function of how well the words in the premise align with those in the hypothesis. These
are different from the encoded sentence entailment models where both the premise and hypothesis
are encoded as single vectors and entailment functions are defined on top of them.
At this point this doesn't quite fit into the memory network setup because the model doesn't
operate on the encoded sentence representations, but instead consumes the word level
representations.
TODO(pradeep): Make this work with the memory network eventually.
'''
from keras import backend as K
from ..masked_layer import MaskedLayer
from ...tensors.masked_operations import masked_softmax, masked_batch_dot
from ...tensors.backend import last_dim_flatten
class WordAlignmentEntailment(MaskedLayer): # pylint: disable=abstract-method
'''
This is an abstract class for word alignment entailment. It defines an _align function.
'''
def __init__(self, **kwargs):
self.input_dim = None
super(WordAlignmentEntailment, self).__init__(**kwargs)
@staticmethod
def _align(source_embedding, target_embedding, source_mask, target_mask, normalize_alignment=True):
'''
Takes source and target sequence embeddings and returns a source-to-target alignment weights.
That is, for each word in the source sentence, returns a probability distribution over target_sequence
that shows how well each target word aligns (i.e. is similar) to it.
source_embedding: (batch_size, source_length, embed_dim)
target_embedding: (batch_size, target_length, embed_dim)
source_mask: None or (batch_size, source_length, 1)
target_mask: None or (batch_size, target_length, 1)
normalize_alignment (bool): Will apply a (masked) softmax over alignments is True.
Returns:
s2t_attention: (batch_size, source_length, target_length)
'''
source_dot_target = masked_batch_dot(source_embedding, target_embedding, source_mask, target_mask)
if normalize_alignment:
alignment_shape = K.shape(source_dot_target)
flattened_products_with_source = last_dim_flatten(source_dot_target)
if source_mask is None and target_mask is None:
flattened_s2t_attention = K.softmax(flattened_products_with_source)
elif source_mask is not None and target_mask is not None:
float_source_mask = K.cast(source_mask, 'float32')
float_target_mask = K.cast(target_mask, 'float32')
# (batch_size, source_length, target_length)
s2t_mask = K.expand_dims(float_source_mask, axis=-1) * K.expand_dims(float_target_mask, axis=1)
flattened_s2t_mask = last_dim_flatten(s2t_mask)
flattened_s2t_attention = masked_softmax(flattened_products_with_source, flattened_s2t_mask)
else:
# One of the two inputs is masked, and the other isn't. How did this happen??
raise NotImplementedError('Cannot handle only one of the inputs being masked.')
# (batch_size, source_length, target_length)
s2t_attention = K.reshape(flattened_s2t_attention, alignment_shape)
return s2t_attention
else:
return source_dot_target
| deep_qa-master | deep_qa/layers/entailment_models/word_alignment.py |
from .decomposable_attention import DecomposableAttentionEntailment
from .multiple_choice_tuple_entailment import MultipleChoiceTupleEntailment
entailment_models = { # pylint: disable=invalid-name
'decomposable_attention': DecomposableAttentionEntailment,
'multiple_choice_tuple_attention': MultipleChoiceTupleEntailment,
}
| deep_qa-master | deep_qa/layers/entailment_models/__init__.py |
from keras import backend as K
from .word_alignment import WordAlignmentEntailment
from ...tensors.backend import switch
class MultipleChoiceTupleEntailment(WordAlignmentEntailment):
'''A kind of decomposable attention where the premise (or background) is in
the form of SVO triples, and entailment is computed by finding the answer
in a multiple choice setting that aligns best with the tuples that align
with the question. This happens in two steps:
(1) We use the _align function from WordAlignmentEntailment to find the
premise tuples whose SV, or VO pairs align best with the question.
(2) We then use the _align function again to find the answer that aligns
best with the unaligned part of the tuples, weighed by how much they
partially align with the question in step 1.
TODO(pradeep): Also match S with question, VO with answer, O with question
and SV with answer.
'''
def __init__(self, **kwargs):
self.tuple_size = None
self.num_tuples = None
self.num_options = None
self.question_length = None
super(MultipleChoiceTupleEntailment, self).__init__(**kwargs)
def build(self, input_shape):
#NOTE: This layer currently has no trainable parameters.
super(MultipleChoiceTupleEntailment, self).build(input_shape)
# knowledge_shape: (batch_size, num_tuples, tuple_size, embed_dim)
# question_shape: (batch_size, question_length, embed_dim)
# answer_shape: (batch_size, num_options, embed_dim)
knowledge_shape, question_shape, answer_shape = input_shape
self.tuple_size = knowledge_shape[2]
if self.tuple_size != 3:
raise NotImplementedError("Only SVO tuples are currently supported.")
self.num_tuples = knowledge_shape[1]
self.question_length = question_shape[1]
self.num_options = answer_shape[1]
self.input_dim = knowledge_shape[-1]
def compute_output_shape(self, input_shape):
return (input_shape[0][0], self.num_options)
def compute_mask(self, x, mask=None):
# pylint: disable=unused-argument
return None
def call(self, x, mask=None):
# We assume the tuples are SVO and each slot is represented as vector.
# Moreover, we assume each answer option is encoded as a single vector.
# knowledge_embedding: (batch_size, num_tuples, tuple_size, embed_dim)
# question_embedding: (batch_size, question_length, embed_dim)
# answer_embedding: (batch_size, num_options, embed_dim)
knowledge_embedding, question_embedding, answer_embedding = x
if mask is None:
knowledge_mask = question_mask = answer_mask = None
else:
knowledge_mask, question_mask, answer_mask = mask
if knowledge_mask is None:
sv_knowledge_mask = vo_knowledge_mask = subj_knowledge_mask = obj_knowledge_mask = None
else:
# Take out the relevant parts for each part of the tuple and reshape SV and VO masks using
# batch_flatten.
# (batch_size, num_tuples*2)
sv_knowledge_mask = K.batch_flatten(knowledge_mask[:, :, :2])
# (batch_size, num_tuples*2)
vo_knowledge_mask = K.batch_flatten(knowledge_mask[:, :, 1:])
# (batch_size, num_tuples)
subj_knowledge_mask = knowledge_mask[:, :, 0]
# (batch_size, num_tuples)
obj_knowledge_mask = knowledge_mask[:, :, 2]
batch_size = K.shape(knowledge_embedding)[0]
sv_knowledge = K.reshape(knowledge_embedding[:, :, :2, :],
(batch_size, self.num_tuples*2, self.input_dim))
vo_knowledge = K.reshape(knowledge_embedding[:, :, 1:, :],
(batch_size, self.num_tuples*2, self.input_dim))
# (batch_size, num_tuples, embed_dim)
subj_knowledge = knowledge_embedding[:, :, 0, :]
# (batch_size, num_tuples, embed_dim)
obj_knowledge = knowledge_embedding[:, :, 2, :]
## Step A1: Align SV with question.
# Source is question, target is SV knowledge
# (batch_size, question_length, num_tuples*2)
sv_question_knowledge_alignment = self._align(question_embedding, sv_knowledge, question_mask,
sv_knowledge_mask, normalize_alignment=False)
# Sum probabilities over S and V slots. This is still a valid probability distribution.
# (batch_size, question_length, num_tuples)
sv_question_tuple_weights = K.sum(K.reshape(sv_question_knowledge_alignment,
(batch_size, self.question_length, self.num_tuples, 2)),
axis=-1)
# Average over question length. This is essentially the weights of tuples based on how well their
# S and V slots align to any word in the question.
# Insight: This is essentially \sum_{i} p_align(tuple | q_word_i) * p_imp(q_word_i), where q_word_i is
# the ith word in the question, p_align is the alignment weight and p_imp is the importance of the
# question word, and p_imp is uniform.
# (batch_size, num_tuples)
sv_tuple_weights = K.mean(sv_question_tuple_weights, axis=1)
## Step A2: Align answer with Obj.
# Source is obj knowledge, target is answer
# (batch_size, num_tuples, num_options)
obj_knowledge_answer_alignment = self._align(obj_knowledge, answer_embedding, obj_knowledge_mask,
answer_mask, normalize_alignment=False)
# (batch_size, num_tuples, num_options)
tiled_sv_tuple_weights = K.dot(K.expand_dims(sv_tuple_weights), K.ones((1, self.num_options)))
# Now we compute a weighted average over the tuples dimension, with the weights coming from how well
# the tuples align with the question.
# (batch_size, num_options)
obj_answer_weights = K.sum(tiled_sv_tuple_weights * obj_knowledge_answer_alignment, axis=1)
# Following steps are similar to what we did so far. Just substitute VO for SV and S for O.
## Step B1: Align VO with question
vo_question_knowledge_alignment = self._align(question_embedding, vo_knowledge, question_mask,
vo_knowledge_mask, normalize_alignment=False)
vo_question_tuple_weights = K.sum(K.reshape(vo_question_knowledge_alignment,
(batch_size, self.question_length, self.num_tuples, 2)),
axis=-1)
vo_tuple_weights = K.mean(vo_question_tuple_weights, axis=1)
## Step B2: Align answer with Subj
subj_knowledge_answer_alignment = self._align(subj_knowledge, answer_embedding, subj_knowledge_mask,
answer_mask, normalize_alignment=False)
tiled_vo_tuple_weights = K.dot(K.expand_dims(vo_tuple_weights), K.ones((1, self.num_options)))
subj_answer_weights = K.sum(tiled_vo_tuple_weights * subj_knowledge_answer_alignment, axis=1)
# We now select the element wise max of obj_answer_weights and subj_answer_weights as our final weights.
# (batch_size, num_options)
max_answer_weights = switch(K.greater(obj_answer_weights, subj_answer_weights),
obj_answer_weights, subj_answer_weights)
# Renormalizing max weights.
return K.softmax(max_answer_weights)
| deep_qa-master | deep_qa/layers/entailment_models/multiple_choice_tuple_entailment.py |
from typing import List, Tuple
from collections import defaultdict
import tensorflow
def pin_variable_device_scope(device, variable_device="/cpu:0"):
"""
Tensorflow device scopes can take functions which give a device
for a given op in the graph. Here, we use the device that is
passed to the scope *unless* the operation which is being created
in the graph is a Variable creation op; in this case, we place it
on the cpu.
"""
def _assign(graph_op):
node_def = graph_op if isinstance(graph_op, tensorflow.NodeDef) else graph_op.node_def
if node_def.op in ['Variable', 'VariableV2']:
return variable_device
else:
return device
return _assign
def average_gradients(tower_gradients: List[List[Tuple[tensorflow.Tensor, tensorflow.Tensor]]]):
"""
Given a list of (gradient, variable) pairs from the result of
a gradient calculation from multiple GPUs, calculate their
average.
"""
# Make a map from variables -> [gradients that are not none].
gradient_map = defaultdict(list)
for tower in tower_gradients:
for grad, variable in tower:
if grad is not None:
gradient_map[variable].append(grad)
average_gradient_list = []
for variable, gradients in gradient_map.items():
# variable is a tensor.
# gradients is a list of gradients for this tensor to average.
# Pick any one of the gradients to see if it is an IndexedSlice.
first_actual_grad = gradients[0]
if isinstance(first_actual_grad, tensorflow.IndexedSlices):
sparse_averaged_gradient = _get_sparse_gradient_average(gradients)
average_gradient_list.append((sparse_averaged_gradient, variable))
else:
dense_averaged_gradient = _get_dense_gradient_average(gradients)
average_gradient_list.append((dense_averaged_gradient, variable))
assert len(average_gradient_list) == len(gradient_map)
return average_gradient_list
def _get_dense_gradient_average(gradients: List[tensorflow.Tensor]):
"""
A normal tensor can just do a simple average. Here, we stack all the gradients into a
tensor and then average over the dimension which they were stacked into.
Parameters
----------
gradients: List[tensorflow.Tensor])
The list of gradients to average.
Returns
-------
An average gradient.
"""
grads_expanded = []
for grad in gradients:
# Add a 0 dimension to the gradients to represent the tower and
# append on a 'tower' dimension which we will average over.
grads_expanded.append(tensorflow.expand_dims(grad, 0))
# Average over the 'tower' dimension.
grad = tensorflow.concat(grads_expanded, 0)
mean_grad = tensorflow.reduce_mean(grad, 0)
return mean_grad
def _get_sparse_gradient_average(gradients: List[tensorflow.IndexedSlices]):
"""
If the gradient is an instance of an IndexedSlices then this is a sparse
gradient with attributes indices and values. To average, we
need to concat them individually and then create a new
IndexedSlices object. This case frequently occurs in the embedding layers
of neural network models, as for a given input, only some indices of the
embedding are updated, so performing sparse updates using IndexedSlices
is considerably more efficient.
Parameters
----------
gradients: List[tensorflow.IndexedSlices])
The list of sparse gradients to average.
Returns
-------
An average gradient.
"""
indices = []
values = []
first_actual_gradient = gradients[0]
for grad in gradients:
indices.append(grad.indices)
values.append(grad.values)
all_indices = tensorflow.concat(indices, 0)
avg_values = tensorflow.concat(values, 0) / len(gradients)
# NOTE(Mark): tf.unique has no GPU implementation in tensorflow,
# so if you use a network which requires sparse gradients for an op which
# occurs on the GPU (such as tf.gather, tf.scatter), this will be slow.
# This is not a problem for the embedding lookup, because this already happens
# on the CPU. See this issue:
# https://github.com/tensorflow/tensorflow/issues/10270
# Deduplicate across indices.
unique_indices, new_index_positions = tensorflow.unique(all_indices)
deduplicated_values = tensorflow.unsorted_segment_sum(
avg_values, new_index_positions,
tensorflow.shape(unique_indices)[0])
mean_grad = tensorflow.IndexedSlices(deduplicated_values,
unique_indices,
dense_shape=first_actual_gradient.dense_shape)
return mean_grad
def slice_batch(batch_inputs: List[tensorflow.Tensor], num_gpus: int):
"""
Given a list of Tensor inputs to a model, split each input into a list of
tensors of length num_gpus, where the first dimension of each element is
equal to the original dimension divided by the number of gpus.
Parameters
----------
batch_inputs: List[tensorflow.Tensor])
The list of model inputs to split up.
num_gpus: int
The number of gpus to split the inputs across.
Returns
-------
all_slices: List[List[tensorflow.Tensor]]
A list of lists of tensors split across their first dimension by num_gpus.
"""
all_slices = []
for placeholder in batch_inputs:
# splice placeholder into batches split across the number of gpus specified.
batch_size = int(int(placeholder.shape[0]) / num_gpus)
placeholder_slices = []
for i in range(num_gpus):
placeholder_slices.append(placeholder[(i * batch_size):((i + 1) * batch_size), ...])
all_slices.append(placeholder_slices)
return all_slices
| deep_qa-master | deep_qa/training/train_utils.py |
import logging
import os
from typing import Dict, List
from overrides import overrides
from keras.models import Model, Sequential
from keras.engine.training import _batch_shuffle, _make_batches, _slice_arrays
from keras.callbacks import History, CallbackList, ProgbarLogger, BaseLogger, Callback
import keras.backend as K
import tensorflow
import numpy
from .step import Step
from ..common.params import Params, ConfigurationError
from .train_utils import slice_batch
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class DeepQaModel(Model):
"""
This is a Model that adds functionality to Keras' ``Model`` class. In
particular, we use tensorflow optimisers directly in order to make use
of sparse gradient updates, which Keras does not handle. Additionally,
we provide some nicer summary functions which include mask information.
We are overriding key components of Keras here and you should probably
have a pretty good grip on the internals of Keras before you change
stuff below, as there could be unexpected consequences.
"""
# TODO(Mark): Tensorflow optimisers are not compatible with Keras' LearningRateScheduler.
def __init__(self, *args, **kwargs):
super(DeepQaModel, self).__init__(*args, **kwargs)
# We want to add a few things to the summary that's printed out by Keras. Unfortunately, Keras
# makes that very difficult. We have to copy large portions of code in order to make this
# work, because `print_summary()` is in `keras.utils.layer_utils`, instead of a member on
# `Container`...
@overrides
def summary(self, show_masks=False, **kwargs):
if show_masks:
self._summary_with_mask_info()
else:
self._keras_summary(**kwargs)
def _keras_summary(self):
super(DeepQaModel, self).summary()
def _summary_with_mask_info(self):
flattened_layers = getattr(self, 'flattened_layers', self.layers)
print_summary_with_masking(flattened_layers, getattr(self, 'container_nodes', None))
@overrides
def compile(self, params: Params): # pylint: disable=arguments-differ
# pylint: disable=attribute-defined-outside-init
"""
The only reason we are overriding this method is because keras automatically wraps
our tensorflow optimiser in a keras wrapper, which we don't want. We override the
only method in ``Model`` which uses this attribute, ``_make_train_function``, which
raises an error if compile is not called first.
As we move towards using a Tensorflow first optimisation loop, more things will be
added here which add functionality to the way Keras runs tensorflow Session calls.
"""
optimizer = params.get('optimizer')
self.num_gpus = params.pop('num_gpus', 0)
self.tensorboard_log = params.pop('tensorboard_log', None)
self.tensorboard_frequency = params.pop('tensorboard_frequency', 0)
self.gradient_clipping = params.pop("gradient_clipping", None).as_dict()
super(DeepQaModel, self).compile(**params.as_dict())
self.optimizer = optimizer
@overrides
def train_on_batch(self,
x: List[numpy.array],
y: List[numpy.array],
sample_weight: List[numpy.array]=None,
class_weight: Dict[int, numpy.array]=None):
"""
Runs a single gradient update on a single batch of data. We override this
method in order to provide multi-gpu training capability.
Parameters
----------
x: List[numpy.array], required
Numpy array of training data, or list of Numpy arrays if the model
has multiple inputs. If all inputs in the model are named, you can also
pass a dictionary mapping input names to Numpy arrays.
y: List[numpy.array], required
A Numpy array of labels, or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: List[numpy.array], optional (default = None)
optional array of the same length as x, containing weights to apply to
the model's loss for each sample. In the case of temporal data, you
can pass a 2D array with shape (samples, sequence_length), to apply a
different weight to every timestep of every sample. In this case you
should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping
class indices (integers) to a weight (float) to apply to the model's
loss for the samples from this class during training. This can be useful
to tell the model to "pay more attention" to samples from an under-represented class.
Returns
-------
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
inputs, targets, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
batch_inputs = inputs + targets + sample_weights + [1.]
else:
batch_inputs = inputs + targets + sample_weights
# Here is the main difference between a single gpu model and one split
# across multiple gpus. In our multiple gpu model, all of the inputs
# are replicated num_gpus times, so we need to split our large batch
# into the corresponding sets of smaller batches for each model.
if self.num_gpus > 1:
# The Keras learning phase is a global variable used across model towers.
# If it is present, we remove it before splitting up the inputs
# and add it back on afterwards.
if isinstance(batch_inputs[-1], float):
model_inputs = self._multi_gpu_batch(batch_inputs[:-1])
model_inputs.append(batch_inputs[-1])
else:
model_inputs = self._multi_gpu_batch(batch_inputs)
batch_inputs = model_inputs
self._make_train_function()
outputs = self.train_function(batch_inputs)
if len(outputs) == 1:
return outputs[0]
return outputs
@overrides
def _make_train_function(self):
# pylint: disable=attribute-defined-outside-init
"""
We override this method so that we can use tensorflow optimisers directly.
This is desirable as tensorflow handles gradients of sparse tensors efficiently.
"""
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
tensorflow.summary.scalar("total_loss", self.total_loss)
# Here we override Keras to use tensorflow optimizers directly.
self.global_step = tensorflow.train.get_or_create_global_step()
gradients = tensorflow.gradients(self.total_loss, self._collected_trainable_weights)
if self.gradient_clipping is not None:
# Don't pop from the gradient clipping dict here as
# if we call fit more than once we need it to still be there.
clip_type = self.gradient_clipping.get("type")
clip_value = self.gradient_clipping.get("value")
if clip_type == 'clip_by_norm':
gradients, _ = tensorflow.clip_by_global_norm(gradients, clip_value)
elif clip_type == 'clip_by_value':
gradients = [tensorflow.clip_by_value(x, -clip_value, clip_value) for x in gradients]
else:
raise ConfigurationError("{} is not a supported type of gradient clipping.".format(clip_type))
zipped_grads_with_weights = zip(gradients, self._collected_trainable_weights)
# pylint: disable=no-member
training_updates = self.optimizer.apply_gradients(zipped_grads_with_weights,
global_step=self.global_step)
# pylint: enable=no-member
updates = self.updates + [training_updates]
outputs = [self.total_loss] + self.metrics_tensors
# Gets loss and metrics. Updates weights at each call.
if self.tensorboard_log is not None:
train_summary_writer = tensorflow.summary.FileWriter(os.path.join(self.tensorboard_log, "train"))
else:
train_summary_writer = None
self.train_function = Step(inputs, outputs, self.global_step, train_summary_writer,
self.tensorboard_frequency, updates=updates)
@overrides
def _make_test_function(self):
# pylint: disable=attribute-defined-outside-init
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
if not hasattr(self, 'global_step'):
self.global_step = tensorflow.train.get_or_create_global_step()
self.test_function = Step(inputs, [self.total_loss] + self.metrics_tensors,
self.global_step, updates=self.state_updates)
@overrides
def _make_predict_function(self):
# pylint: disable=attribute-defined-outside-init
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
if not hasattr(self, 'global_step'):
self.global_step = tensorflow.train.get_or_create_global_step()
self.predict_function = Step(inputs, self.outputs, self.global_step,
updates=self.state_updates)
@overrides
def _fit_loop(self,
f: callable,
ins: List[numpy.array],
out_labels: List[str]=None,
batch_size: int=32,
epochs: int=100,
verbose: int=1,
callbacks: List[Callback]=None,
val_f: callable=None,
val_ins: List[numpy.array]=None,
shuffle: bool=True,
callback_metrics: List[str]=None,
initial_epoch: int=0):
"""
Abstract fit function which preprocesses and batches
data before training a model. We override this keras backend
function to support multi-gpu training via splitting a large
batch size across multiple gpus. This function is broadly the
same as the Keras backend version aside from this - changed elements
have corresponding comments attached.
Note that this should not be called directly - it is used by calling
model.fit().
Assume that step_function returns a list, labeled by out_labels.
Parameters
----------
f: A callable ``Step`` or a Keras ``Function``, required.
A DeepQA Step or Keras Function returning a list of tensors.
ins: List[numpy.array], required.
The list of tensors to be fed to ``step_function``.
out_labels: List[str], optional (default = None).
The display names of the outputs of ``step_function``.
batch_size: int, optional (default = 32).
The integer batch size.
epochs: int, optional (default = 100).
Number of times to iterate over the data.
verbose: int, optional, (default = 1)
Verbosity mode, 0, 1 or 2.
callbacks: List[Callback], optional (default = None).
A list of Keras callbacks to be called during training.
val_f: A callable ``Step`` or a Keras ``Function``, optional (default = None).
The Keras function to call for validation.
val_ins: List[numpy.array], optional (default)
A list of tensors to be fed to ``val_f``.
shuffle: bool, optional (default = True).
whether to shuffle the data at the beginning of each epoch
callback_metrics: List[str], optional, (default = None).
A list of strings, the display names of the validation metrics.
passed to the callbacks. They should be the concatenation of list the display
names of the outputs of ``f`` and the list of display names of the outputs of ``f_val``.
initial_epoch: int, optional (default = 0).
The epoch at which to start training (useful for resuming a previous training run).
Returns
-------
A Keras ``History`` object.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if ins and hasattr(ins[0], 'shape'):
num_train_samples = ins[0].shape[0]
else:
# May happen if we are running `fit` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `fit` over a single batch.
num_train_samples = batch_size
verbose = 2
index_array = numpy.arange(num_train_samples)
out_labels = out_labels or []
callbacks, callback_model = self._prepare_callbacks(callbacks, val_ins, epochs, batch_size,
num_train_samples, callback_metrics,
do_validation, verbose)
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
numpy.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
# Here is the main difference between a single gpu model and one split
# across multiple gpus. In our multiple gpu model, all of the inputs
# are replicated num_gpus times, so we need to split our large batch
# into the corresponding sets of smaller batches for each model.
if self.num_gpus > 1:
# The Keras learning phase is a global variable used across model towers.
# If it is present, we remove it before splitting up the inputs
# and add it back on afterwards.
if isinstance(ins_batch[-1], float):
model_inputs = self._multi_gpu_batch(ins_batch[:-1])
model_inputs.append(ins_batch[-1])
else:
model_inputs = self._multi_gpu_batch(ins_batch)
ins_batch = model_inputs
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for label, output in zip(out_labels, outs):
batch_logs[label] = output
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
# If we are using multiple gpus, our batch size will be
# scaled up accordingly. However, validation will run
# on a single gpu, so we divide by the number of gpus
# to avoid OOM errors.
if self.num_gpus > 1:
val_batch_size = int(batch_size/self.num_gpus) # pylint: disable=no-member
else:
val_batch_size = batch_size
val_outs = self._test_loop(val_f, val_ins,
batch_size=val_batch_size,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for label, output in zip(out_labels, val_outs):
epoch_logs['val_' + label] = output
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training: # pylint: disable=no-member
break
callbacks.on_train_end()
return self.history
def _multi_gpu_batch(self, variable_list):
# Splits up and orders a list of inputs for a single
# model into a single list of inputs for that model
# in a towered fashion, with each input split across the batch size.
split_batch = slice_batch(variable_list, self.num_gpus) # pylint: disable=no-member
ordered_var_list = []
for single_model_variables in zip(*split_batch):
ordered_var_list.extend(single_model_variables)
return ordered_var_list
def _prepare_callbacks(self,
callbacks: List[Callback],
val_ins: List[numpy.array],
epochs: int,
batch_size: int,
num_train_samples: int,
callback_metrics: List[str],
do_validation: bool,
verbose: int):
"""
Sets up Keras callbacks to perform various monitoring functions during training.
"""
self.history = History() # pylint: disable=attribute-defined-outside-init
callbacks = [BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [ProgbarLogger()]
callbacks = CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models).
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self # pylint: disable=redefined-variable-type
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
return callbacks, callback_model
def print_summary_with_masking(layers, relevant_nodes=None):
line_length = 150
positions = [40, 60, 68, 98, 124, 150]
headers = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to', 'Input mask', 'Output mask']
print('_' * line_length)
print_row(headers, positions)
print('=' * line_length)
for i, layer in enumerate(layers):
print_layer_summary(layer, relevant_nodes, positions)
if i == len(layers) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print('Total params: %s' % count_total_params(layers))
print('_' * line_length)
def print_row(fields, positions):
line = ''
for field, position in zip(fields, positions):
line += str(field)
line = line[:position - 1]
line += ' ' * (position - len(line))
print(line)
def print_layer_summary(layer, relevant_nodes, positions):
try:
output_shape = layer.output_shape
except Exception: # pylint: disable=broad-except
output_shape = 'multiple'
connections = []
input_masks = []
output_masks = []
for node_index, node in enumerate(layer.inbound_nodes):
input_mask = layer.get_input_mask_at(node_index)
if isinstance(input_mask, list):
input_masks.extend(input_mask)
else:
input_masks.append(input_mask)
output_masks.append(layer.get_output_mask_at(node_index))
if relevant_nodes:
node_key = layer.name + '_ib-' + str(node_index)
if node_key not in relevant_nodes:
# node is node part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = str(node.node_indices[i])
inbound_tensor_index = str(node.tensor_indices[i])
connections.append(inbound_layer + '[' + inbound_node_index + '][' + inbound_tensor_index + ']')
name = layer.name
cls_name = layer.__class__.__name__
first_connection = '' if not connections else connections[0]
first_input_mask = '' if not input_masks else input_masks[0]
first_output_mask = '' if not output_masks else output_masks[0]
fields = [
name + ' (' + cls_name + ')',
output_shape,
layer.count_params(),
first_connection,
first_input_mask,
first_output_mask,
]
print_row(fields, positions)
rows_needed = max(len(connections), len(output_masks), len(input_masks))
for i in range(1, rows_needed):
connection = '' if i >= len(connections) else connections[i]
input_mask = '' if i >= len(input_masks) else input_masks[i]
output_mask = '' if i >= len(output_masks) else output_masks[i]
fields = ['', '', '', connection, input_mask, output_mask]
print_row(fields, positions)
def count_total_params(layers, layer_set=None):
if layer_set is None:
layer_set = set()
total_params = 0
for layer in layers:
if layer in layer_set:
continue
layer_set.add(layer)
if isinstance(layer, Model) or isinstance(layer, Sequential):
total_params += count_total_params(layer.layers, layer_set)
else:
total_params += layer.count_params()
return total_params
| deep_qa-master | deep_qa/training/models.py |
from copy import deepcopy
from typing import Any, Dict, List, Tuple
import logging
import dill as pickle
from keras import backend as K
from keras.layers import Dense, Dropout, Layer, TimeDistributed, Embedding
from overrides import overrides
import numpy
import tensorflow
from ..common.checks import ConfigurationError
from ..common.params import Params
from ..common.util import clean_layer_name
from ..data import tokenizers, DataIndexer, DataGenerator, IndexedDataset, TextDataset
from ..data.embeddings import PretrainedEmbeddings
from ..data.instances import Instance, TextInstance
from ..data.datasets import concrete_datasets
from ..layers.encoders import encoders, set_regularization_params, seq2seq_encoders
from .trainer import Trainer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TextTrainer(Trainer):
# pylint: disable=line-too-long
"""
This is a Trainer that deals with word sequences as its fundamental data type (any TextDataset
or TextInstance subtype is fine). That means we have to deal with padding, with converting
words (or characters) to indices, and encoding word sequences. This class adds methods on top
of Trainer to deal with all of that stuff.
This class has five kinds of methods:
(1) protected methods that are overriden from :class:`~deep_qa.training.trainer.Trainer`, and
which you shouldn't need to worry about
(2) utility methods for building models, intended for use by subclasses
(3) abstract methods that determine a few key points of behavior in concrete subclasses (e.g.,
what your input data type is)
(4) model-specific methods that you `might` have to override, depending on what your model
looks like - similar to (3), but simple models don't need to override these
(5) private methods that you shouldn't need to worry about
There are two main ways you're intended to interact with this class, then: by calling the
utility methods when building your model, and by customizing the behavior of your concrete
model by using the parameters to this class.
Parameters
----------
embeddings : Dict[str, Any], optional (default=50 dim word embeddings, 8 dim character
embeddings, 0.5 dropout on both)
These parameters specify the kind of embeddings to use for words, character, tags, or
whatever you want to embed. This dictionary behaves similarly to the ``encoder`` and
``seq2seq_encoder`` parameter dictionaries. Valid keys are ``dimension``, ``dropout``,
``pretrained_file``, ``fine_tune``, and ``project``. The value for ``dimension`` is an
``int`` specifying the dimensionality of the embedding (default 50 for words, 8 for
characters); ``dropout`` is a float, specifying the amount of dropout to use on the
embedding layer (default ``0.5``); ``pretrained_file`` is a (string) path to a glove-formatted file
containing pre-trained embeddings; ``fine_tune`` is a boolean specifying whether the
pretrained embeddings should be trainable (default ``False``); and ``project`` is a boolean
specifying whether to add a projection layer after the embedding layer (only really useful
in conjunction with pre-trained embeddings, to get them into a lower-dimensional space;
default ``False``).
data_generator: Dict[str, Any], optional (default=None)
If not ``None``, we will pass these parameters to a :class:`DataGenerator` object to create
data batches, instead of creating one big array for all of our training data. See
:class:`DataGenerator` for the available options here. Note that in order to take full
advantage of the capabilities of a ``DataGenerator``, you should make sure your model
correctly implements :func:`~TextTrainer._set_padding_lengths`,
:func:`~TextTrainer.get_padding_lengths`,
:func:`~TextTrainer.get_padding_memory_scaling`, and
:func:`~TextTrainer.get_instance_sorting_keys`. Also note that some of the things
``DataGenerator`` does can change the behavior of your learning algorithm, so you should
think carefully about how exactly you want batches to be structured before you choose these
parameters.
num_sentence_words: int, optional (default=None)
Upper limit on length of word sequences in the training data. Ignored during testing (we
use the value set at training time, either from this parameter or from a loaded model). If
this is not set, we'll calculate a max length from the data.
num_word_characters: int, optional (default=None)
Upper limit on length of words in the training data. Only applicable for "words and
characters" text encoding.
tokenizer: Dict[str, Any], optional (default={})
Which tokenizer to use for ``TextInstances``. See
:mod:``deep_qa.data.tokenizers.tokenizer`` for more information.
encoder: Dict[str, Dict[str, Any]], optional (default={'default': {}})
These parameters specify the kind of encoder used to encode any word sequence input. An
encoder takes a sequence of vectors and returns a single vector.
If given, this must be a dict, where each key is a name that can be used for encoders in
the model, and the value corresponding to the key is a set of parameters that will be
passed on to the constructor of the encoder. We will use the "type" key in this dict
(which must match one of the keys in `encoders`) to determine the type of the encoder, then
pass the remaining args to the encoder constructor.
Hint: Use ``"lstm"`` or ``"cnn"`` for sentences, ``"treelstm"`` for logical forms, and
``"bow"`` for either.
encoder_fallback_behavior: string, optional (default="crash")
Determines the behavior when an encoder is asked for by name, but you have not given
parameters for an encoder with that name. See ``_get_encoder`` for more information.
seq2seq_encoder: Dict[str, Dict[str, Any]], optional (default={'default': {'encoder_params': {}, 'wrapper_params: {}}})
Like ``encoder``, except seq2seq encoders return a sequence of vectors instead of a single
vector (the difference between our "encoders" and "seq2seq encoders" is the difference in
Keras between ``LSTM()`` and ``LSTM(return_sequences=True)``).
seq2seq_encoder_fallback_behavior: string, optional (default="crash")
Determines the behavior when a seq2seq encoder is asked for by name, but you have not given
parameters for an encoder with that name. See ``_get_seq2seq_encoder`` for more
information.
"""
# pylint: enable=line-too-long
def __init__(self, params: Params):
self.embedding_params = params.pop('embeddings',
{'words': {'dim': 50, 'dropout': 0.5},
'characters': {'dim': 8, 'dropout': 0.5}})
data_generator_params = params.pop('data_generator', None)
if data_generator_params is not None:
self.data_generator = DataGenerator(self, data_generator_params)
else:
self.data_generator = None
self.dataset_params = params.pop("dataset", {})
dataset_type_key = self.dataset_params.pop_choice("type", list(concrete_datasets.keys()),
default_to_first_choice=True)
self.dataset_type = concrete_datasets[dataset_type_key]
self.num_sentence_words = params.pop('num_sentence_words', None)
self.num_word_characters = params.pop('num_word_characters', None)
tokenizer_params = params.pop('tokenizer', {})
tokenizer_choice = tokenizer_params.pop_choice('type', list(tokenizers.keys()),
default_to_first_choice=True)
self.tokenizer = tokenizers[tokenizer_choice](tokenizer_params)
# Note that the way this works is a little odd - we need each Instance object to do the
# right thing when we call instance.words() and instance.to_indexed_instance(). So we set
# a class variable on TextInstance so that _all_ TextInstance objects use the setting that
# we read here.
TextInstance.tokenizer = self.tokenizer
self.encoder_params = params.pop('encoder', {'default': {}})
fallback_choices = ['crash', 'use default encoder', 'use default params']
self.encoder_fallback_behavior = params.pop_choice('encoder_fallback_behavior', fallback_choices,
default_to_first_choice=True)
self.seq2seq_encoder_params = params.pop('seq2seq_encoder',
{'default': {"encoder_params": {},
"wrapper_params": {}}})
self.seq2seq_encoder_fallback_behavior = params.pop_choice('seq2seq_encoder_fallback_behavior',
fallback_choices,
default_to_first_choice=True)
super(TextTrainer, self).__init__(params)
self.name = "TextTrainer"
self.data_indexer = DataIndexer()
# These keep track of which names you've used to get embeddings and encoders, so that we
# reuse layers that you want to reuse.
self.embedding_layers = {}
self.encoder_layers = {}
self.seq2seq_encoder_layers = {}
###########################
# Overriden Trainer methods - you shouldn't have to worry about these, though for some
# advanced uses you might override some of them, especially _get_custom_objects.
###########################
@overrides
def create_data_arrays(self, dataset: IndexedDataset, batch_size: int=None):
if batch_size is None:
batch_size = self.batch_size
if self.data_generator is not None:
return self.data_generator.create_generator(dataset, batch_size)
else:
dataset.pad_instances(self.get_padding_lengths())
return dataset.as_training_data()
@overrides
def load_dataset_from_files(self, files: List[str]):
"""
This method assumes you have a TextDataset that can be read from a single file. If you
have something more complicated, you'll need to override this method (though, a solver that
has background information could call this method, then do additional processing on the
rest of the list, for instance).
"""
dataset_params = deepcopy(self.dataset_params)
return self.dataset_type.read_from_file(files[0], self._instance_type(), dataset_params)
@overrides
def score_dataset(self, dataset: TextDataset):
"""
See the superclass docs (:func:`Trainer.score_dataset`) for usage info. Just a note here
that we `do not` use data generators for this method, even if you've said elsewhere that
you want to use them, so that we can easily return the labels for the data. This means
that we'll do whole-dataset padding, and this could be slow. We could probably fix this,
but it's good enough for now.
"""
# TODO(matt): for some reason the reference to the super class docs above isn't getting
# linked properly. I'm guessing it's because of an indexing issue in sphinx, but I
# couldn't figure it out. Once that works, it can be changed to "See :func:`the superclass
# docs <Trainer.score_dataset>` for usage info").
indexed_dataset = dataset.to_indexed_dataset(self.data_indexer)
# Because we're not using data generators here, we need to save and hide
# `self.data_generator`. TODO(matt): it _should_ be as easy as iterating over the data
# again to pull out the labels, so we can still use data generators, but I'm waiting on
# implementing that.
data_generator = self.data_generator
self.data_generator = None
inputs, labels = self.create_data_arrays(indexed_dataset)
predictions = self.model.predict(inputs)
self.data_generator = data_generator
return predictions, labels
@overrides
def set_model_state_from_dataset(self, dataset: TextDataset):
logger.info("Fitting data indexer word dictionary.")
self.data_indexer.fit_word_dictionary(dataset)
@overrides
def set_model_state_from_indexed_dataset(self, dataset: IndexedDataset):
self._set_padding_lengths(dataset.padding_lengths())
def _dataset_indexing_kwargs(self) -> Dict[str, Any]:
return {'data_indexer': self.data_indexer}
@overrides
def _set_params_from_model(self):
self._set_padding_lengths_from_model()
@overrides
def _save_auxiliary_files(self):
super(TextTrainer, self)._save_auxiliary_files()
data_indexer_file = open("%s_data_indexer.pkl" % self.model_prefix, "wb")
pickle.dump(self.data_indexer, data_indexer_file)
data_indexer_file.close()
@overrides
def _load_auxiliary_files(self):
super(TextTrainer, self)._load_auxiliary_files()
data_indexer_file = open("%s_data_indexer.pkl" % self.model_prefix, "rb")
self.data_indexer = pickle.load(data_indexer_file)
data_indexer_file.close()
@overrides
def _overall_debug_output(self, output_dict: Dict[str, numpy.array]) -> str:
"""
We'll do something different here: if "embedding" is in output_dict, we'll output the
embedding matrix at the top of the debug file. Note that this could be _huge_ - you should
only do this for debugging on very simple datasets.
"""
result = super(TextTrainer, self)._overall_debug_output(output_dict)
if any('embedding' in layer_name for layer_name in output_dict.keys()):
embedding_layers = set([n for n in output_dict.keys() if 'embedding' in n])
for embedding_layer in embedding_layers:
if '_projection' in embedding_layer:
continue
if embedding_layer.startswith('combined_'):
continue
result += self.__render_embedding_matrix(embedding_layer.replace("_embedding", ""))
return result
@overrides
def _uses_data_generators(self):
return self.data_generator is not None
@classmethod
def _get_custom_objects(cls):
custom_objects = super(TextTrainer, cls)._get_custom_objects()
for value in encoders.values():
if value.__name__ not in ['LSTM']:
custom_objects[value.__name__] = value
for name, layer in TextInstance.tokenizer.get_custom_objects().items():
custom_objects[name] = layer
return custom_objects
#################
# Utility methods - meant to be called by subclasses, not overriden
#################
def _get_sentence_shape(self, sentence_length: int=None) -> Tuple[int]:
"""
Returns a tuple specifying the shape of a tensor representing a sentence. This is not
necessarily just (self.num_sentence_words,), because different text_encodings lead to
different tensor shapes. If you have an input that is a sequence of words, you need to
call this to get the shape to pass to an ``Input`` layer. If you don't, your model won't
work correctly for all tokenizers.
"""
if sentence_length is None:
# This can't be the default value for the function argument, because
# self.num_sentence_words will not have been set at class creation time.
sentence_length = self.num_sentence_words
return self.tokenizer.get_sentence_shape(sentence_length, self.num_word_characters)
def _embed_input(self, input_layer: Layer, embedding_suffix: str=""):
"""
This function embeds a word sequence input, using an embedding defined by
``embedding_suffix``. You should call this function in your ``_build_model`` method any time
you want to convert word indices into word embeddings. Note that if this is used in
conjunction with ``_get_sentence_shape``, we will do the correct thing for whatever
:class:`~deep_qa.data.tokenizers.tokenizer.Tokenizer` you use. The actual input to this
might be words and characters, and we might actually do a concatenation of a word embedding
and a character-level encoder. All of this is handled transparently to your concrete model
subclass, if you use the API correctly, calling ``_get_sentence_shape()`` to get the shape
for your ``Input`` layer, and passing that input layer into this ``_embed_input()`` method.
We need to take the input Layer here, instead of just returning a Layer that you can use as
you wish, because we might have to apply several layers to the input, depending on the
parameters you specified for embedding things. So we return, essentially,
``embedding(input_layer)``.
The input layer can have arbitrary shape, as long as it ends with a word sequence. For
example, you could pass in a single sentence, a set of sentences, or a set of sets of
sentences, and we will handle them correctly.
Internally, we will create a dictionary mapping embedding names to embedding layers, so if
you have several things you want to embed with the same embedding layer, be sure you use
the same name each time (or just don't pass a name, which accomplishes the same thing). If
for some reason you want to have different embeddings for different inputs, use a different
name for the embedding.
In this function, we pass the work off to self.tokenizer, which might need to do some
additional processing to actually give you a word embedding (e.g., if your text encoder
uses both words and characters, we need to run the character encoder and concatenate the
result with a word embedding).
Note that the ``embedding_suffix`` parameter is a `suffix` to whatever name the tokenizer
will give to the embeddings it creates. Typically, the tokenizer will use the name
``words``, though it could also use ``characters``, or something else. So if you pass
``_A`` for ``embedding_suffix``, you will end up with actual embedding names like
``words_A`` and ``characters_A``. These are the keys you need to specify in your parameter
file, for embedding sizes etc. When constructing actual ``Embedding``
layers, we will further append the string ``_embedding``, so the layer would be named
``words_A_embedding``.
"""
return self.tokenizer.embed_input(input_layer,
self.__get_embedded_input,
self,
embedding_suffix)
def _get_encoder(self, name="default", fallback_behavior: str=None):
"""
This method is intended to be used in your ``_build_model`` implementation, any time you
want to convert a sequence of vectors into a single vector. The encoder ``name``
corresponds to entries in the ``encoder`` parameter passed to the constructor of this
object, allowing you to customize the kind and behavior of the encoder just through
parameters.
A sentence encoder takes as input a sequence of word embeddings, and returns as output a
single vector encoding the sentence. This is typically either a simple RNN or an LSTM, but
could be more complex, if the "sentence" is actually a logical form.
Parameters
----------
name : str, optional (default="default")
The name of the encoder. Multiple calls to ``_get_encoder`` using the same name will
return the same encoder. To get parameters for creating the encoder, we look in
``self.encoder_params``, which is specified by the ``encoder`` parameter in
``self.__init__``. If ``name`` is not a key in ``self.encoder_params``, the behavior
is defined by the ``fallback_behavior`` parameter.
fallback_behavior : str, optional (default=None)
Determines what to do when ``name`` is not a key in ``self.encoder_params``. If you
pass ``None`` (the default), we will use ``self.encoder_fallback_behavior``, specified
by the ``encoder_fallback_behavior`` parameter to ``self.__init__``. There are three
options:
- ``"crash"``: raise an error. This is the default for
``self.encoder_fallback_behavior``. The intention is to help you find bugs - if you
specify a particular encoder name in ``self._build_model`` without giving a fallback
behavior, you probably wanted to use a particular set of parameters, so we crash if
they are not provided.
- ``"use default params"``: In this case, we return a new encoder created with
``self.encoder_params["default"]``.
- ``"use default encoder"``: In this case, we `reuse` the encoder created with
``self.encoder_params["default"]``. This effectively changes the ``name`` parameter
to ``"default"`` when the given ``name`` is not in ``self.encoder_params``.
"""
if fallback_behavior is None:
fallback_behavior = self.encoder_fallback_behavior
if name in self.encoder_layers:
# If we've already created this encoder, we can just return it.
return self.encoder_layers[name]
if name not in self.encoder_params and name != "default":
# If we haven't, we need to check that we _can_ create it, and decide _how_ to create
# it.
if fallback_behavior == "crash":
raise ConfigurationError("You asked for a named encoder (" + name + "), but "
"did not provide parameters for that encoder")
elif fallback_behavior == "use default encoder":
name = "default"
params = deepcopy(self.encoder_params.get(name, {}))
elif fallback_behavior == "use default params":
params = deepcopy(self.encoder_params["default"])
else:
raise ConfigurationError("Unrecognized fallback behavior: " + fallback_behavior)
else:
params = deepcopy(self.encoder_params.get(name, {}))
if name not in self.encoder_layers:
# We need to check if we've already created this again, because in some cases we change
# the name in the logic above.
encoder_layer_name = name + "_encoder"
new_encoder = self.__get_new_encoder(params, encoder_layer_name)
self.encoder_layers[name] = new_encoder
return self.encoder_layers[name]
def _get_seq2seq_encoder(self, name="default", fallback_behavior: str=None):
"""
This method is intended to be used in your ``_build_model`` implementation, any time you
want to convert a sequence of vectors into another sequence of vector. The encoder
``name`` corresponds to entries in the ``encoder`` parameter passed to the constructor of
this object, allowing you to customize the kind and behavior of the encoder just through
parameters.
A seq2seq encoder takes as input a sequence of vectors, and returns as output a sequence of
vectors. This method is essentially identical to ``_get_encoder``, except that it gives an
encoder that returns a sequence of vectors instead of a single vector.
Parameters
----------
name : str, optional (default="default")
The name of the encoder. Multiple calls to ``_get_seq2seq_encoder`` using the same
name will return the same encoder. To get parameters for creating the encoder, we look
in ``self.seq2seq_encoder_params``, which is specified by the ``seq2seq_encoder``
parameter in ``self.__init__``. If ``name`` is not a key in
``self.seq2seq_encoder_params``, the behavior is defined by the ``fallback_behavior``
parameter.
fallback_behavior : str, optional (default=None)
Determines what to do when ``name`` is not a key in ``self.seq2seq_encoder_params``.
If you pass ``None`` (the default), we will use
``self.seq2seq_encoder_fallback_behavior``, specified by the
``seq2seq_encoder_fallback_behavior`` parameter to ``self.__init__``. There are three
options:
- ``"crash"``: raise an error. This is the default for
``self.seq2seq_encoder_fallback_behavior``. The intention is to help you find bugs -
if you specify a particular encoder name in ``self._build_model`` without giving a
fallback behavior, you probably wanted to use a particular set of parameters, so we
crash if they are not provided.
- ``"use default params"``: In this case, we return a new encoder created with
``self.seq2seq_encoder_params["default"]``.
- ``"use default encoder"``: In this case, we `reuse` the encoder created with
``self.seq2seq_encoder_params["default"]``. This effectively changes the ``name``
parameter to ``"default"`` when the given ``name`` is not in
``self.seq2seq_encoder_params``.
"""
if fallback_behavior is None:
fallback_behavior = self.seq2seq_encoder_fallback_behavior
if name in self.seq2seq_encoder_layers:
# If we've already created this encoder, we can just return it.
return self.seq2seq_encoder_layers[name]
if name not in self.seq2seq_encoder_params:
# If we haven't, we need to check that we _can_ create it, and decide _how_ to create
# it.
if fallback_behavior == "crash":
raise ConfigurationError("You asked for a named seq2seq encoder (" + name + "), "
"but did not provide parameters for that encoder")
elif fallback_behavior == "use default encoder":
name = "default"
params = deepcopy(self.seq2seq_encoder_params[name])
elif fallback_behavior == "use default params":
params = deepcopy(self.seq2seq_encoder_params["default"])
else:
raise ConfigurationError("Unrecognized fallback behavior: " + fallback_behavior)
else:
params = deepcopy(self.seq2seq_encoder_params[name])
if name not in self.seq2seq_encoder_layers:
# We need to check if we've already created this again, because in some cases we change
# the name in the logic above.
encoder_layer_name = name + "_encoder"
new_encoder = self.__get_new_seq2seq_encoder(params, encoder_layer_name)
self.seq2seq_encoder_layers[name] = new_encoder
return self.seq2seq_encoder_layers[name]
def _set_text_lengths_from_model_input(self, input_slice):
"""
Given an input slice (a tuple) from a model representing the max length of the sentences
and the max length of each words, set the padding max lengths. This gets called when
loading a model, and is necessary to get padding correct when using loaded models.
Subclasses need to call this in their ``_set_padding_lengths_from_model`` method.
Parameters
----------
input_slice : tuple
A slice from a concrete model class that represents an input word sequence. The tuple
must be of length one or two, and the first dimension should correspond to the length
of the sentences while the second dimension (if provided) should correspond to the
max length of the words in each sentence.
"""
if len(input_slice) > 2:
raise ValueError("Length of input tuple must be "
"2 or 1, got input tuple of "
"length {}".format(len(input_slice)))
self.num_sentence_words = input_slice[0]
if len(input_slice) == 2:
self.num_word_characters = input_slice[1]
##################
# Abstract methods - you MUST override these
##################
def _instance_type(self) -> Instance:
"""
When reading datasets, what :class:`~deep_qa.data.instances.instance.Instance` type should
we create? The ``Instance`` class contains code that creates actual numpy arrays, so this
instance type determines the inputs that you will get to your model, and the outputs that
are used for training.
"""
raise NotImplementedError
def _set_padding_lengths_from_model(self):
"""
This gets called when loading a saved model. It is analogous to ``_set_padding_lengths``,
but needs to set all of the values set in that method just by inspecting the loaded model.
If we didn't have this, we would not be able to correctly pad data after loading a model.
"""
# TODO(matt): I wonder if we can be fancy here and remove this method, instead using
# `self._instance_type` to figure out what this should be ourselves, or delegating it to
# the `Instance` type. But that might run into issues with dynamic padding, though,
# actually - how can the `Instance` know which things you want your model to pad
# dynamically?
raise NotImplementedError
########################
# Model-specific methods - if you do anything complicated, you probably need to override these,
# but simple models might be able to get by with just the default implementation. Some of
# these methods are also callable by non-TextTrainer objects, so that we can separate out the
# DataGenerator and other functionality.
########################
def get_instance_sorting_keys(self) -> List[str]: # pylint: disable=no-self-use
"""
If we're using dynamic padding, we want to group the instances by padding length, so that
we minimize the amount of padding necessary per batch. This variable sets what exactly
gets sorted by. We'll call
:func:`~deep_qa.data.instances.instance.IndexedInstance.get_padding_lengths()` on each
instance, pull out these keys, and sort by them in the order specified. You'll want to
override this in your model class if you have more complex models.
The default implementation is to sort first by ``num_sentence_words``, then by
``num_word_characters`` (if applicable).
"""
sorting_keys = ['num_sentence_words']
if isinstance(self.tokenizer, tokenizers['words and characters']):
# TODO(matt): This is a bit brittle, because other tokenizers might need similar
# handling. We could consider adding an API call to Tokenizer classes to get this kind
# of information. If we find ourselves adding more tokenizers, it might be worth it.
sorting_keys.append('num_word_characters')
return sorting_keys
def get_padding_lengths(self) -> Dict[str, int]:
"""
This is about padding. Any solver will have some number of things that need padding in
order to make consistently-sized data arrays, like the length of a sentence. This method
returns a dictionary of all of those things, mapping a length key to an int.
If any of the entries in this dictionary is ``None``, the padding code will calculate a
padding length from the data itself. This could either be a good idea or a bad idea - if
you have outliers in your data, you could be wasting a whole lot of memory and computation
time if you pad the whole dataset to the size of the outlier. On the other hand, if you do
batch-specific padding, this can save you a whole lot of time, if you group batches by
similar lengths.
Here we return the lengths that are applicable to encoding words and sentences. If you
have additional padding dimensions, call super().get_padding_lengths() and then update the
dictionary.
"""
return self.tokenizer.get_padding_lengths(self.num_sentence_words, self.num_word_characters)
def _set_padding_lengths(self, dataset_padding_lengths: Dict[str, int]):
"""
This is about padding. Any model will have some number of things that need padding in
order to make a consistent set of input arrays, like the length of a sentence. This method
sets those variables given a dictionary of lengths from a dataset.
Note that you might choose not to update some of these lengths, either because you want to
keep the model flexible to allow for dynamic (batch-specific) padding, or because you've
set a hard limit in the class parameters and don't want to change it.
"""
if self.data_generator is not None and self.data_generator.dynamic_padding:
return
if self.num_sentence_words is None:
self.num_sentence_words = dataset_padding_lengths.get('num_sentence_words', None)
if self.num_word_characters is None:
self.num_word_characters = dataset_padding_lengths.get('num_word_characters', None)
# pylint: disable=no-self-use,unused-argument
def get_padding_memory_scaling(self, padding_lengths: Dict[str, int]) -> int:
"""
This method is for computing adaptive batch sizes. We assume that memory usage is a
function that looks like this: :math:`M = b * O(p) * c`, where :math:`M` is the memory
usage, :math:`b` is the batch size, :math:`c` is some constant that depends on how much GPU
memory you have and various model hyperparameters, and :math:`O(p)` is a function outlining
how memory usage asymptotically varies with the padding lengths. Our approach will be to
let the user effectively set :math:`\\frac{M}{c}` using the
``adaptive_memory_usage_constant`` parameter in :class:`DataGenerator`. The model (this
method) specifies :math:`O(p)`, so we can solve for the batch size :math:`b`. The more
specific you get in specifying :math:`O(p)` in this function, the better a job we can do in
optimizing memory usage.
Parameters
----------
padding_lengths: Dict[str, int]
Dictionary containing padding lengths, mapping keys like ``num_sentence_words`` to
ints. This method computes a function of these ints.
Returns
-------
O(p): int
The big-O complexity of the model, evaluated with the specific ints given in
``padding_lengths`` dictionary.
"""
# This is a RuntimeError instead of a NotImplementedError because it's not required to
# implement this method to have a valid TextTrainer. You only have to implement it if you
# want to use adaptive batch sizes.
raise RuntimeError("You need to implement this method for your model!")
# pylint: enable=no-self-use,unused-argument
#################
# Private methods - you can't to override these. If you find yourself needing to, we can
# consider making them protected instead.
#################
def __get_embedded_input(self,
input_layer: Layer,
embedding_name: str,
vocab_name: str='words'):
"""
This function does most of the work for self._embed_input. We pass this method to the
tokenizer, so it can get whatever embedding layers it needs.
We allow for multiple vocabularies, e.g., if you want to embed both characters and words
with separate embedding matrices.
"""
if embedding_name not in self.embedding_layers:
self.embedding_layers[embedding_name] = self.__get_new_embedding(embedding_name, vocab_name)
embedding_layer, projection_layer, dropout = self.embedding_layers[embedding_name]
embedded_input = embedding_layer(input_layer)
layer_name = clean_layer_name(input_layer.name, strip_numerics_after_underscores=False)
if projection_layer is not None:
# 1 here to account for batch_size, which we don't need
# to TimeDistribute.
for i in range(1, K.ndim(input_layer)):
projection_layer_name = layer_name + "/" + projection_layer.name + "_{}".format(i)
projection_layer = TimeDistributed(projection_layer, name=projection_layer_name)
embedded_input = projection_layer(embedded_input)
if dropout > 0.0:
embedded_input = Dropout(dropout)(embedded_input)
return embedded_input
def __get_new_embedding(self, name: str, vocab_name: str='words'):
"""
Creates an Embedding Layer (and possibly also a Dense projection Layer) based on the
parameters you've passed to the TextTrainer. These could be pre-trained embeddings or not,
could include a projection or not, and so on.
Parameters
----------
name : ``str``
The name of the embedding. This needs to correspond to one of the keys in the
``embeddings`` parameter dictionary passed to the constructor.
"""
embedding_params = self.embedding_params.pop(name)
with tensorflow.device("/cpu:0"):
pretrained_file = embedding_params.pop('pretrained_file', None)
projection_layer = None
if pretrained_file:
embedding_layer = PretrainedEmbeddings.get_embedding_layer(
pretrained_file,
self.data_indexer,
embedding_params.pop('fine_tune', False),
name=name + '_embedding')
if embedding_params.pop('project', False):
# This projection layer is not time distributed, because we handle it later
# in __get_embedded_input - this allows us to more easily reuse embeddings
# for inputs with different shapes, as Keras sets layer attributes such as
# input shape the first time the layer is called, which is overly restrictive
# in the case of sharing embedding lookup tables.
projection_layer = Dense(units=embedding_params.pop('dimension'), name=name + "_projection")
else:
embedding_dimension = embedding_params.pop('dimension', None)
if embedding_dimension is not None and embedding_dimension != embedding_layer.output_dim:
raise ConfigurationError("You have specified both 'pretrained_file' "
" and 'dimension' in your embedding parameters, but "
"the 'project' argument was either False or unset and the "
"dimension you specified was not equal to the pretrained"
" embedding size. Refusing to continue without clarification"
" of parameters.")
else:
embedding_layer = Embedding(
input_dim=self.data_indexer.get_vocab_size(vocab_name),
output_dim=embedding_params.pop('dimension'),
mask_zero=True, # this handles padding correctly
name=name + '_embedding')
if embedding_params.pop('project', False):
raise ConfigurationError("You are projecting randomly initialised embeddings. Change "
" 'project' to false or add pretrained_file to your config. ")
dropout = embedding_params.pop('dropout', 0.5)
# We now should have popped all parameters from this
# embedding scope, so we check for any which remain.
embedding_params.assert_empty("embedding with name {}".format(name))
return embedding_layer, projection_layer, dropout
def __get_new_encoder(self, params: Params, name: str):
encoder_type = params.pop_choice("type", list(encoders.keys()),
default_to_first_choice=True)
params["name"] = name
params.setdefault("units", self.embedding_layers['words'][0].output_dim)
set_regularization_params(encoder_type, params)
return encoders[encoder_type](**params)
def __get_new_seq2seq_encoder(self, params: Params, name="seq2seq_encoder"):
encoder_params = params["encoder_params"]
wrapper_params = params["wrapper_params"]
wrapper_params["name"] = name
seq2seq_encoder_type = encoder_params.pop_choice("type", list(seq2seq_encoders.keys()),
default_to_first_choice=True)
encoder_params.setdefault("units", self.embedding_layers['words'][0].output_dim)
set_regularization_params(seq2seq_encoder_type, encoder_params)
return seq2seq_encoders[seq2seq_encoder_type](**params)
def __render_embedding_matrix(self, embedding_name: str) -> str:
result = 'Embedding matrix for %s:\n' % embedding_name
embedding_weights = self.embedding_layers[embedding_name][0].get_weights()[0]
for i in range(self.data_indexer.get_vocab_size()):
word = self.data_indexer.get_word_from_index(i)
word_vector = '[' + ' '.join('%.4f' % x for x in embedding_weights[i]) + ']'
result += '%s\t%s\n' % (word, word_vector)
result += '\n'
return result
| deep_qa-master | deep_qa/training/text_trainer.py |
from .text_trainer import TextTrainer
from .trainer import Trainer
| deep_qa-master | deep_qa/training/__init__.py |
r"""
It turns out that Keras' design is somewhat crazy\*, and there is no list of
optimizers that you can just import from Keras. So, this module specifies a
list, and a helper function or two for dealing with optimizer parameters.
Unfortunately, this means that we have a list that must be kept in sync with
Keras. Oh well.
\* Have you seen their get_from_module() method? See here:
https://github.com/fchollet/keras/blob/6e42b0e4a77fb171295b541a6ae9a3a4a79f9c87/keras/utils/generic_utils.py#L10.
That method means I could pass in 'clip_norm' as an optimizer, and it would try
to use that function as an optimizer. It also means there is no simple list of
implemented optimizers I can grab.
\* I should also note that Keras is an incredibly useful library that does a lot
of things really well. It just has a few quirks...
"""
import logging
from typing import Union
# pylint: disable=no-name-in-module
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
# pylint: enable=no-name-in-module
from ..common.params import Params
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
optimizers = { # pylint: disable=invalid-name
'sgd': GradientDescentOptimizer,
'rmsprop': RMSPropOptimizer,
'adagrad': AdagradOptimizer,
'adadelta': AdadeltaOptimizer,
'adam': AdamOptimizer
}
def optimizer_from_params(params: Union[Params, str]):
"""
This method converts from a parameter object like we use in our Trainer
code into an optimizer object suitable for use with Keras. The simplest
case for both of these is a string that shows up in `optimizers` above - if
`params` is just one of those strings, we return it, and everyone is happy.
If not, we assume `params` is a Dict[str, Any], with a "type" key, where
the value for "type" must be one of those strings above. We take the rest
of the parameters and pass them to the optimizer's constructor.
"""
if isinstance(params, str):
optimizer = params
params = {}
else:
optimizer = params.pop_choice("type", optimizers.keys())
return optimizers[optimizer](**params)
| deep_qa-master | deep_qa/training/optimizers.py |
from typing import List
import tensorflow
import numpy
import keras.backend as K
class Step:
"""
Runs a computation graph.
Parameters
----------
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
"""
def __init__(self,
inputs: List,
outputs: List,
global_step: tensorflow.Variable,
summary_writer: tensorflow.summary.FileWriter=None,
summary_frequency: int=10,
updates=None):
updates = updates or []
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` to a TensorFlow backend function '
'should be a list or tuple.')
if not isinstance(outputs, (list, tuple)):
raise TypeError('`outputs` of a TensorFlow backend function '
'should be a list or tuple.')
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a TensorFlow backend function '
'should be a list or tuple.')
self.inputs = list(inputs)
self.outputs = list(outputs)
self.summary_writer = summary_writer
self.summary_frequency = summary_frequency
self.global_step = global_step
self.summary_operation = tensorflow.summary.merge_all()
with tensorflow.control_dependencies(self.outputs):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
variable, new_value = update
updates_ops.append(tensorflow.assign(variable, new_value))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = tensorflow.group(*updates_ops)
def __call__(self, inputs):
current_step = K.eval(self.global_step)
run_summary = ((self.summary_frequency > 0)
and (current_step % self.summary_frequency == 0)
and (self.summary_writer is not None))
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` should be a list or tuple.')
feed_dict = {}
for tensor, value in zip(self.inputs, inputs):
if K.is_sparse(tensor):
sparse_coo = value.tocoo()
indices = numpy.concatenate((numpy.expand_dims(sparse_coo.row, 1),
numpy.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
feed_dict[tensor] = value
fetches = self.outputs + [self.updates_op]
if run_summary:
fetches += [self.summary_operation]
session = K.get_session()
returned_fetches = session.run(fetches, feed_dict=feed_dict)
if run_summary:
self.summary_writer.add_summary(returned_fetches[-1], current_step)
self.summary_writer.flush()
return returned_fetches[:len(self.outputs)]
| deep_qa-master | deep_qa/training/step.py |
from keras import backend as K
from ..tensors.backend import VERY_NEGATIVE_NUMBER, VERY_LARGE_NUMBER
def ranking_loss(y_pred, y_true):
"""
Using this loss trains the model to give scores to all correct elements in y_true that are
higher than all scores it gives to incorrect elements in y_true.
For example, let ``y_true = [0, 0, 1, 1, 0]``, and let ``y_pred = [-1, 1, 2, 0, -2]``. We will
find the lowest score assigned to correct elements in ``y_true`` (``0`` in this case), and the
highest score assigned to incorrect elements in ``y_true`` (``1`` in this case). We will then
compute a sigmoided loss given these values: ``-K.sigmoid(0 - 1)`` (we're minimizing the loss,
so the negative sign in front of the sigmoid means we want the correct element to have a higher
score than the incorrect element).
Note that the way we do this uses ``K.max()`` and ``K.min()`` over the elements in ``y_true``,
which means that if you have a lot of values in here, you'll only get gradients backpropping
through two of them (the ones on the margin). This could be an inefficient use of your
computation time. Think carefully about the data that you're using with this loss function.
Because of the way masking works with Keras loss functions, also, you need to be sure that any
masked elements in ``y_pred`` have very negative values before they get passed into this loss
function.
"""
correct_elements = y_pred + (1.0 - y_true) * VERY_LARGE_NUMBER
lowest_scoring_correct = K.min(correct_elements, axis=-1)
incorrect_elements = y_pred + y_true * VERY_NEGATIVE_NUMBER
highest_scoring_incorrect = K.max(incorrect_elements, axis=-1)
return K.mean(-K.sigmoid(lowest_scoring_correct - highest_scoring_incorrect))
def ranking_loss_with_margin(y_pred, y_true):
"""
Using this loss trains the model to give scores to all correct elements in y_true that are
higher than all scores it gives to incorrect elements in y_true, plus a margin.
For example, let ``y_true = [0, 0, 1, 1, 0]``, and let ``y_pred = [-1, 1, 2, 0, -2]``. We will
find the lowest score assigned to correct elements in ``y_true`` (``0`` in this case), and the
highest score assigned to incorrect elements in ``y_true`` (``1`` in this case). We will then
compute a hinge loss given these values: ``K.maximum(0.0, 1 + 1 - 0)``.
Note that the way we do this uses ``K.max()`` and ``K.min()`` over the elements in ``y_true``,
which means that if you have a lot of values in here, you'll only get gradients backpropping
through two of them (the ones on the margin). This could be an inefficient use of your
computation time. Think carefully about the data that you're using with this loss function.
Because of the way masking works with Keras loss functions, also, you need to be sure that any
masked elements in ``y_pred`` have very negative values before they get passed into this loss
function.
"""
correct_elements = y_pred + (1.0 - y_true) * VERY_LARGE_NUMBER
lowest_scoring_correct = K.min(correct_elements, axis=-1)
incorrect_elements = y_pred + y_true * VERY_NEGATIVE_NUMBER
highest_scoring_incorrect = K.max(incorrect_elements, axis=-1)
return K.mean(K.maximum(0.0, 1.0 + highest_scoring_incorrect - lowest_scoring_correct))
| deep_qa-master | deep_qa/training/losses.py |
import logging
import os
from typing import Any, Dict, List, Tuple
import numpy
from keras.callbacks import CallbackList, EarlyStopping, LambdaCallback, ModelCheckpoint
from keras.models import model_from_json
from ..data.datasets import Dataset, IndexedDataset
from ..common.checks import ConfigurationError
from ..common.params import Params
from ..data.instances.instance import Instance
from ..layers.wrappers import OutputMask
from .models import DeepQaModel
from .optimizers import optimizer_from_params
from .multi_gpu import compile_parallel_model
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Trainer:
"""
A Trainer object specifies data, a model, and a way to train the model with the data. Here we
group all of the common code related to these things, making only minimal assumptions about
what kind of data you're using or what the structure of your model is.
The main benefits of this class are having a common place for setting parameters related to
training, actually running the training with those parameters, and code for saving and loading
models.
The intended use of this class is that you construct a subclass that defines a model,
overriding the abstract methods and (optionally) some of the protected methods in this class.
Thus there are four kinds of methods in this class: (1) public methods, that are typically only
used by ``deep_qa/run.py`` (or some other driver that you create), (2) abstract methods
(beginning with ``_``), which `must` be overridden by any concrete subclass, (3) protected
methods (beginning with ``_``) that you are meant to override in concrete subclasses, and (4)
private methods (beginning with ``__``) that you should not need to mess with. We only include
the first three in the public docs.
Parameters
----------
train_files: List[str], optional (default=None)
The files containing the data that should be used for training. See
:func:`~Trainer.load_dataset_from_files()` for more information.
validation_files: List[str], optional (default=None)
The files containing the data that should be used for validation, if you do not want to use
a split of the training data for validation. The default of None means to just use the
`validation_split` parameter to split the training data for validation.
test_files: List[str], optional (default=None)
The files containing the data that should be used for evaluation. The default of None
means to just not perform test set evaluation.
max_training_instances: int, optional (default=None)
Upper limit on the number of training instances. If this is set, and we get more than
this, we will truncate the data. Mostly useful for testing things out on small datasets
before running them on large datasets.
max_validation_instances: int, optional (default=None)
Upper limit on the number of validation instances, analogous to ``max_training_instances``.
max_test_instances: int, optional (default=None)
Upper limit on the number of test instances, analogous to ``max_training_instances``.
train_steps_per_epoch: int, optional (default=None)
If :func:`~Trainer.create_data_arrays` returns a generator instead of actual arrays, how
many steps should we run from this generator before declaring an "epoch" finished? The
default here is reasonable - if this is None, we will set it from the data.
validation_steps: int, optional (default=None)
Like ``train_steps_per_epoch``, but for validation data.
test_steps: int, optional (default=None)
Like ``train_steps_per_epoch``, but for test data.
save_models: bool, optional (default=True)
Should we save the models that we train? If this is True, you are required to also set the
model_serialization_prefix parameter, or the code will crash.
model_serialization_prefix: str, optional (default=None)
Prefix for saving and loading model files. Must be set if ``save_models`` is ``True``.
num_gpus: int, optional (default=1) Number of GPUs to use. In DeepQa we use Data Parallelism,
meaning that we create copies of the full model for each GPU, allowing the batch size of
your model to be scaled depending on the number of GPUs. Note that using multiple GPUs
effectively increases your batch size by the number of GPUs you have, meaning that other
code which depends on the batch size will be effected - for example, if you are using
dynamic padding, the batches will be larger and hence more padded, as the dataset is
chunked into fewer overall batches.
batch_size: int, optional (default=32)
Batch size to use when training.
num_epochs: int, optional (default=20)
Number of training epochs.
validation_split: float, optional (default=0.1)
Amount of training data to use for validation. If ``validation_files`` is not set, we will
split the training data into train/dev, using this proportion as dev. If
``validation_files`` is set, this parameter gets ignored.
optimizer: str or Dict[str, Any], optional (default='adam')
If this is a str, it must correspond to an optimizer available in Keras (see the list in
:mod:`deep_qa.training.optimizers`). If it is a dictionary, it must contain a "type" key,
with a value that is one of the optimizers in that list. The remaining parameters in the
dict are passed as kwargs to the optimizer's constructor.
loss: str, optional (default='categorical_crossentropy')
The loss function to pass to ``model.fit()``. This is currently limited to only loss
functions that are available as strings in Keras. If you want to use a custom loss
function, simply override ``self.loss`` in the constructor of your model, after the call to
``super().__init__``.
metrics: List[str], optional (default=['accuracy'])
The metrics to evaluate and print after each epoch of training. This is currently limited
to only loss functions that are available as strings in Keras. If you want to use a custom
metric, simply override ``self.metrics`` in the constructor of your model, after the call
to ``super().__init__``.
validation_metric: str, optional (default='val_acc')
Metric to monitor on the validation data for things like early stopping and saving the best
model.
patience: int, optional (default=1)
Number of epochs to be patient before early stopping. I.e., if the ``validation_metric``
does not improve for this many epochs, we will stop training.
fit_kwargs: Dict[str, Any], optional (default={})
A dict of additional arguments to Keras' ``model.fit()`` method, in case you want to set
something that we don't already have options for. These get added to the options already
captured by other arguments.
tensorboard_log: str, optional (default=None)
If set, we will output tensorboard log information here.
tensorboard_histogram_freq: int, optional (default=0)
Tensorboard histogram frequency: note that activating the tensorboard histgram (frequency >
0) can drastically increase model training time. Please set frequency with consideration
to desired runtime.
debug: Dict[str, Any], optional (default={})
This should be a dict, containing the following keys:
- "layer_names", which has as a value a list of names that must match layer names in the
model built by this Trainer.
- "data", which has as a value either "training", "validation", or a list of file names.
If you give "training" or "validation", we'll use those datasets, otherwise we'll load
data from the provided files. Note that currently "validation" only works if you provide
validation files, not if you're just using Keras to split the training data.
- "masks", an optional key that functions identically to "layer_names", except we output
the mask at each layer given here.
show_summary_with_masking_info: bool, optional (default=False)
This is a debugging setting, mostly - we have written a custom model.summary() method that
supports showing masking info, to help understand what's going on with the masks.
"""
def __init__(self, params: Params):
self.name = "Trainer"
# Data specification parameters.
self.train_files = params.pop('train_files', None)
self.validation_files = params.pop('validation_files', None)
self.test_files = params.pop('test_files', None)
self.max_training_instances = params.pop('max_training_instances', None)
self.max_validation_instances = params.pop('max_validation_instances', None)
self.max_test_instances = params.pop('max_test_instances', None)
# Data generator parameters.
self.train_steps_per_epoch = params.pop('train_steps_per_epoch', None)
self.validation_steps = params.pop('train_steps_per_epoch', None)
self.test_steps = params.pop('train_steps_per_epoch', None)
# Model serialization parameters.
self.save_models = params.pop('save_models', True)
self.model_prefix = params.pop('model_serialization_prefix', None)
if self.model_prefix:
parent_directory = os.path.dirname(self.model_prefix)
os.makedirs(parent_directory, exist_ok=True)
# `model.fit()` parameters.
self.num_gpus = params.pop("num_gpus", 1)
self.validation_split = params.pop('validation_split', 0.1)
self.batch_size = params.pop('batch_size', 32)
# If you've got more than one gpu, we make a mega batch, which then
# gets split across the number of gpus you have.
if self.num_gpus > 1:
self.batch_size *= self.num_gpus
self.num_epochs = params.pop('num_epochs', 20)
self.optimizer = optimizer_from_params(params.pop('optimizer', 'adam'))
self.gradient_clipping = params.pop('gradient_clipping', {'type': 'clip_by_norm', "value": 10})
self.loss = params.pop('loss', 'categorical_crossentropy')
self.metrics = params.pop('metrics', ['accuracy'])
self.validation_metric = params.pop('validation_metric', 'val_acc')
self.patience = params.pop('patience', 1)
self.fit_kwargs = params.pop('fit_kwargs', {})
# Debugging / logging / misc parameters.
self.tensorboard_log = params.pop('tensorboard_log', None)
self.tensorboard_frequency = params.pop('tensorboard_frequency', 0)
self.debug_params = params.pop('debug', {})
self.show_summary_with_masking = params.pop('show_summary_with_masking_info', False)
# We've now processed all of the parameters, and we're the base class, so there should not
# be anything left.
params.assert_empty("Trainer")
# Model-specific member variables that will get set and used later.
self.model = None
self.debug_model = None
# Should we update state when loading the training data in `self.train()`? Generally, yes,
# you need to do this. But if you've loaded a pre-trained model, the model state has
# already been frozen, and trying to update things like the vocabulary will break things.
# So we set this to false when loading a saved model.
self.update_model_state_with_training_data = True
# Training-specific member variables that will get set and used later.
self.best_epoch = -1
# We store the datasets used for training and validation, both before processing and after
# processing, in case a subclass wants to modify it between epochs for whatever reason.
self.training_dataset = None
self.training_arrays = None
self.validation_dataset = None
self.validation_arrays = None
self.test_dataset = None
self.test_arrays = None
self.debug_dataset = None
self.debug_arrays = None
################
# Public methods
################
def can_train(self):
return self.train_files is not None
def load_data_arrays(self,
data_files: List[str],
batch_size: int=None,
max_instances: int=None) -> Tuple[Dataset, numpy.array, numpy.array]:
"""
Loads a :class:`Dataset` from a list of files, then converts it into numpy arrays for
both inputs and outputs, returning all three of these to you. This literally just calls
``self.load_dataset_from_files``, then ``self.create_data_arrays``; it's just a convenience
method if you want to do both of these at the same time, and also lets you truncate the
dataset if you want.
Note that if you have any kind of state in your model that depends on a training dataset
(e.g., a vocabulary, or padding dimensions) those must be set prior to calling this method.
Parameters
----------
data_files: List[str]
The files to load. These will get passed to ``self.load_dataset_from_files()``, which
subclasses must implement.
batch_size: int, optional (default = None)
Optionally pass a specific batch size to load the data arrays with. If this is not
specified, we use the default self.batch_size attribute. This is a parameter so
you can specify different batch sizes for training vs validation, for instance, which
is useful if you are doing multi-gpu training.
max_instances: int, optional (default=None)
If not ``None``, we will restrict the dataset to only this many instances. This is
mostly useful for testing models out on subsets of your data.
Returns
-------
dataset: Dataset
A :class:`Dataset` object containing the instances read from the data files
input_arrays: numpy.array
An array or tuple of arrays suitable to be passed as inputs ``x`` to Keras'
``model.fit(x, y)``, ``model.evaluate(x, y)`` or ``model.predict(x)`` methods
label_arrays: numpy.array
An array or tuple of arrays suitable to be passed as outputs ``y`` to Keras'
``model.fit(x, y)`` or ``model.evaluate(x, y)`` methods
"""
if batch_size is None:
batch_size = self.batch_size
logger.info("Loading data from %s", str(data_files))
dataset = self.load_dataset_from_files(data_files)
if max_instances is not None:
logger.info("Truncating the dataset to %d instances", max_instances)
dataset = dataset.truncate(max_instances)
logger.info("Indexing dataset")
indexing_kwargs = self._dataset_indexing_kwargs()
indexed_dataset = dataset.to_indexed_dataset(**indexing_kwargs)
data_arrays = self.create_data_arrays(indexed_dataset, batch_size)
return (dataset, data_arrays)
def train(self):
'''
Trains the model.
All training parameters have already been passed to the constructor, so we need no
arguments to this method.
'''
logger.info("Running training (%s)", self.name)
# First we need to prepare the data that we'll use for training. For the training data, we
# might need to update model state based on this dataset, so we handle it differently than
# we do the validation and training data.
self.training_dataset = self.load_dataset_from_files(self.train_files)
if self.max_training_instances:
self.training_dataset = self.training_dataset.truncate(self.max_training_instances)
if self.update_model_state_with_training_data:
self.set_model_state_from_dataset(self.training_dataset)
logger.info("Indexing training data")
indexing_kwargs = self._dataset_indexing_kwargs()
indexed_training_dataset = self.training_dataset.to_indexed_dataset(**indexing_kwargs)
if self.update_model_state_with_training_data:
self.set_model_state_from_indexed_dataset(indexed_training_dataset)
self.training_arrays = self.create_data_arrays(indexed_training_dataset, self.batch_size)
if self._uses_data_generators():
self.train_steps_per_epoch = self.data_generator.last_num_batches # pylint: disable=no-member
if self.validation_files:
batch_size_for_validation = self.batch_size / self.num_gpus if self.num_gpus > 1 else None
self.validation_dataset, self.validation_arrays = self.load_data_arrays(self.validation_files,
self.max_validation_instances,
batch_size_for_validation)
if self._uses_data_generators():
self.validation_steps = self.data_generator.last_num_batches # pylint: disable=no-member
# Then we build the model and compile it.
logger.info("Building the model")
if self.num_gpus <= 1:
self.model = self._build_model()
self.model.compile(self.__compile_kwargs())
else:
if self._uses_data_generators():
if self.data_generator.adaptive_batch_sizes: # pylint: disable=no-member
raise ConfigurationError("Multi-gpu training is currently only supported for "
"training which does not utilise adaptive batching."
"Please remove 'adaptive_batch_sizes'from your "
"configuration file to proceed.")
self.model = compile_parallel_model(self._build_model, self.__compile_kwargs())
self.model.summary(show_masks=self.show_summary_with_masking)
if self.debug_params:
# Get the list of layers whose outputs will be visualized as per the
# solver definition and build a debug model.
debug_layer_names = self.debug_params['layer_names']
debug_masks = self.debug_params.get('masks', [])
debug_data = self.debug_params['data']
if debug_data == "training":
self.debug_dataset = self.training_dataset
self.debug_arrays = self.training_arrays
elif debug_data == "validation":
# NOTE: This currently only works if you've specified specific validation data, not
# if you are just splitting the training data for validation.
self.debug_dataset = self.validation_dataset
self.debug_arrays = self.validation_arrays
else:
# If the `data` param is not "training" or "validation", we assume it's a list of
# file names.
self.debug_dataset, self.debug_arrays = self.load_data_arrays(debug_data)
self.debug_model = self.__build_debug_model(debug_layer_names, debug_masks)
# Now we actually train the model using various Keras callbacks to control training.
callbacks = self._get_callbacks()
kwargs = {'epochs': self.num_epochs, 'callbacks': [callbacks], 'batch_size': self.batch_size}
# We'll check for explicit validation data first; if you provided this, you definitely
# wanted to use it for validation. self.validation_split is non-zero by default,
# so you may have left it above zero on accident.
if self.validation_arrays is not None:
kwargs['validation_data'] = self.validation_arrays
elif self.validation_split > 0.0 and not self._uses_data_generators():
kwargs['validation_split'] = self.validation_split
# Add the user-specified arguments to fit.
kwargs.update(self.fit_kwargs)
# We now pass all the arguments to the model's fit function, which does all of the training.
if not self._uses_data_generators():
history = self.model.fit(self.training_arrays[0], self.training_arrays[1], **kwargs)
else:
# If the data was produced by a generator, we have a bit more work to do to get the
# arguments right.
kwargs.pop('batch_size')
kwargs['steps_per_epoch'] = self.train_steps_per_epoch
if self.validation_arrays is not None and self._uses_data_generators():
kwargs['validation_steps'] = self.validation_steps
history = self.model.fit_generator(self.training_arrays, **kwargs)
# After finishing training, we save the best weights and
# any auxillary files, such as the model config.
self.best_epoch = int(numpy.argmax(history.history[self.validation_metric]))
if self.save_models:
self.__save_best_model()
self._save_auxiliary_files()
# If there are test files, we evaluate on the test data.
if self.test_files:
self.evaluate_model(self.test_files, self.max_test_instances)
def load_model(self, epoch: int=None):
"""
Loads a serialized model, using the ``model_serialization_prefix`` that was passed to the
constructor. If epoch is not None, we try to load the model from that epoch. If epoch is
not given, we load the best saved model.
"""
logger.info("Loading serialized model")
# Loading serialized model
model_config_file = open("%s_config.json" % self.model_prefix)
model_config_json = model_config_file.read()
model_config_file.close()
self.model = model_from_json(model_config_json,
custom_objects=self._get_custom_objects())
if epoch is not None:
model_file = "%s_weights_epoch=%d.h5" % (self.model_prefix, epoch)
else:
model_file = "%s_weights.h5" % self.model_prefix
logger.info("Loading weights from file %s", model_file)
self.model.load_weights(model_file)
self.model.summary(show_masks=self.show_summary_with_masking)
self._load_auxiliary_files()
self._set_params_from_model()
self.model.compile(self.__compile_kwargs())
self.update_model_state_with_training_data = False
def evaluate_model(self, data_files: List[str], max_instances: int=None):
# We call self.load_model() first, to be sure that we load the best model we have, if we've
# trained for a while.
self.load_model()
_, arrays = self.load_data_arrays(data_files, max_instances)
logger.info("Evaluting model on the test set.")
if not self._uses_data_generators():
scores = self.model.evaluate(arrays[0], arrays[1])
else:
steps = self.data_generator.last_num_batches # pylint: disable=no-member
scores = self.model.evaluate_generator(arrays, steps)
for idx, metric in enumerate(self.model.metrics_names):
print("{}: {}".format(metric, scores[idx]))
##################
# Abstract methods - you MUST override these
##################
def score_dataset(self, dataset: Dataset) -> Tuple[numpy.array, numpy.array]:
"""
Takes a ``Dataset``, indexes it, and returns the output of evaluating the model on all
instances, and labels for the instances from the data, if they were given. The specifics of
the numpy array that are returned depend on the model and the instance type in the dataset.
Parameters
----------
dataset: Dataset
A ``Dataset`` read by `:func:`~Trainer.load_dataset_from_files()`.
Returns
-------
predictions: numpy.array
Predictions for each ``Instance`` in the ``Dataset``. This could actually be a
tuple/list of arrays, if your model has multiple outputs
labels: numpy.array
The labels for each ``Instance`` in the ``Dataset``, if there were any (this will be
``None`` if there were no labels). We return this so you can easily compute metrics
over these predictions if you wish. It's hard to get numpy arrays with the labels from
a non-indexed-and-padded ``Dataset``, so we return it here so you don't have to do any
funny business to get the label array.
"""
raise NotImplementedError
def load_dataset_from_files(self, files: List[str]) -> Dataset:
"""
Given a list of file inputs, load a raw dataset from the files. This is a list because
some datasets are specified in more than one file (e.g., a file containing the instances,
and a file containing background information about those instances).
"""
raise NotImplementedError
def set_model_state_from_dataset(self, dataset: Dataset):
"""
Given a raw :class:`Dataset` object, set whatever model state is necessary. The most
obvious use case for this is for computing a vocabulary in
:class:`~deep_qa.training.text_trainer.TextTrainer`. Note that this is not an
:class:`IndexedDataset`, and you should not make it one. Use
:func:`~Trainer.set_model_state_from_indexed_dataset()` for setting state that depends on
the data having already been indexed; otherwise you'll duplicate the work of doing the
indexing.
"""
raise NotImplementedError
def set_model_state_from_indexed_dataset(self, dataset: IndexedDataset):
"""
Given an :class:`IndexedDataset`, set whatever model state is necessary. This is typically
stuff around padding.
"""
raise NotImplementedError
def create_data_arrays(self, dataset: IndexedDataset,
batch_size: int=None) -> Tuple[numpy.array, numpy.array]:
"""
Takes a raw dataset and converts it into training inputs and labels that can be used to
either train a model or make predictions. Depending on parameters passed to the
constructor of this ``Trainer``, this could either return two actual array objects, or a
single generator that generates batches of two array objects.
Parameters
----------
dataset: Dataset
A ``Dataset`` of the same format as read by ``load_dataset_from_files()`` (we will
call this directly with the output from that method, in fact)
batch_size: int, optional (default = None)
The batch size with which the dataset should be created. If this is None,
the default self.batch_size will be used.
Returns
-------
input_arrays: numpy.array or Tuple[numpy.array]
label_arrays: numpy.array, Tuple[numpy.array], or None
generator: a Python generator returning Tuple[input_arrays, label_arrays]
If this is returned, it is the only return value. We `either` return a
``Tuple[input_arrays, label_arrays]``, `or` this generator.
"""
raise NotImplementedError
def _build_model(self) -> DeepQaModel:
"""Constructs and returns a DeepQaModel (which is a wrapper around a Keras Model) that will
take the output of self._get_training_data as input, and produce as output a true/false
decision for each input. Note that in the multiple gpu case, this function will be
called multiple times for the different GPUs. As such, you should be wary of this function
having side effects unrelated to building a computation graph.
The returned model will be used to call model.fit(train_input, train_labels).
"""
raise NotImplementedError
def _set_params_from_model(self):
"""
Called after a model is loaded, this lets you update member variables that contain model
parameters, like max sentence length, that are not stored as weights in the model object.
This is necessary if you want to process a new data instance to be compatible with the
model for prediction, for instance.
"""
raise NotImplementedError
def _dataset_indexing_kwargs(self) -> Dict[str, Any]:
"""
In order to index a dataset, we may need some parameters (e.g., an object that stores the
vocabulary of your model, in order to convert words into indices). You can pass those
here, or return an emtpy dictionary if there's nothing. These will get passed to
:func:`Dataset.to_indexed_dataset`.
"""
raise NotImplementedError
###################
# Protected methods - you CAN override these, if you want
###################
def _get_callbacks(self):
"""
Returns a set of Callbacks which are used to perform various functions within Keras' .fit method.
Here, we use an early stopping callback to add patience with respect to the validation metric and
a Lambda callback which performs the model specific callbacks which you might want to build into
a model, such as re-encoding some background knowledge.
Additionally, there is also functionality to create Tensorboard log files. These can be visualised
using 'tensorboard --logdir /path/to/log/files' after training.
"""
early_stop = EarlyStopping(monitor=self.validation_metric, patience=self.patience)
model_callbacks = LambdaCallback(on_epoch_begin=lambda epoch, logs: self._pre_epoch_hook(epoch),
on_epoch_end=lambda epoch, logs: self._post_epoch_hook(epoch))
callbacks = [early_stop, model_callbacks]
if self.debug_params:
debug_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:
self.__debug(self.debug_params["layer_names"],
self.debug_params.get("masks", []), epoch))
callbacks.append(debug_callback)
return CallbackList(callbacks)
# Some witchcraft is happening here - we don't specify the epoch replacement variable
# checkpointing string, because Keras does that within the callback if we specify it here.
if self.save_models:
checkpointing = ModelCheckpoint(self.model_prefix + "_weights_epoch={epoch:d}.h5",
save_best_only=True, save_weights_only=True,
monitor=self.validation_metric)
callbacks.append(checkpointing)
return CallbackList(callbacks)
def _pre_epoch_hook(self, epoch: int):
"""
This method gets called before each epoch of training. If you want to do any kind of
processing in between epochs (e.g., updating the training data for whatever reason), here
is your chance to do so.
"""
pass
def _post_epoch_hook(self, epoch: int):
"""
This method gets called directly after model.fit(), before making any early stopping
decisions. If you want to modify anything after each iteration (e.g., computing a
different kind of validation loss to use for early stopping, or just computing and printing
accuracy on some other held out data), you can do that here. If you require extra parameters,
use calls to local methods rather than passing new parameters, as this hook is run via a
Keras Callback, which is fairly strict in it's interface.
"""
pass
def _output_debug_info(self, output_dict: Dict[str, numpy.array], epoch: int):
logger.info("Outputting debug results")
debug_output_file = open("%s_debug_%d.txt" % (self.model_prefix, epoch), "w")
overall_debug_info = self._overall_debug_output(output_dict)
debug_output_file.write(overall_debug_info)
for instance_index, instance in enumerate(self.debug_dataset.instances):
instance_output_dict = {}
for layer_name, output in output_dict.items():
if layer_name == 'masks':
instance_output_dict['masks'] = {}
for mask_name, mask_output in output.items():
instance_output_dict['masks'][mask_name] = mask_output[instance_index]
else:
instance_output_dict[layer_name] = output[instance_index]
instance_info = self._instance_debug_output(instance, instance_output_dict)
debug_output_file.write(instance_info + '\n')
debug_output_file.close()
def _overall_debug_output(self, output_dict: Dict[str, numpy.array]) -> str: # pylint: disable=unused-argument
return "Number of instances: %d\n" % len(self.debug_dataset.instances)
def _instance_debug_output(self, instance: Instance, outputs: Dict[str, numpy.array]) -> str:
"""
This method takes an Instance and all of the debug outputs for that Instance, puts them
into some human-readable format, and returns that as a string. `outputs` will have one key
corresponding to each item in the `debug.layer_names` parameter given to the constructor of
this object.
The default here is `pass` instead of `raise NotImplementedError`, because you're not
required to implement debugging for your model.
"""
pass
def _load_auxiliary_files(self):
"""
Called during model loading. If you have some auxiliary pickled object, such as an object
storing the vocabulary of your model, you can load it here.
"""
pass
def _save_auxiliary_files(self):
"""
Called after training. If you have some auxiliary object, such as an object storing
the vocabulary of your model, you can save it here. The model config is saved by default.
"""
model_config = self.model.to_json()
model_config_file = open("%s_config.json" % (self.model_prefix), "w")
print(model_config, file=model_config_file)
model_config_file.close()
def _uses_data_generators(self): # pylint: disable=no-self-use
"""
Training models with Keras requires a different API if you produce data in batches uses a
generator or if you just provide one big numpy array with all of your data, which Keras has
to split into batches. This method tells us which Keras API we should use. If your model
class produces data using a generator, return ``True`` here; otherwise, return ``False``.
The default implementation just returns ``False.``
"""
return False
@classmethod
def _get_custom_objects(cls):
"""
If you've used any Layers that Keras doesn't know about, you need to specify them in this
dictionary, so we can load them correctly.
"""
return {
"DeepQaModel": DeepQaModel
}
#################
# Private methods - you can't to override these. If you find yourself needing to, we can
# consider making them protected instead.
#################
def __save_best_model(self):
"""
Copies the weights from the best epoch to a final weight file.
The point of this is so that the input/output spec of the NNSolver is simpler. Someone
calling this as a subroutine doesn't have to worry about which epoch ended up being the
best, they can just use the final weight file. You can still use models from other epochs
if you really want to.
"""
from shutil import copyfile
epoch_weight_file = "%s_weights_epoch=%d.h5" % (self.model_prefix, self.best_epoch)
final_weight_file = "%s_weights.h5" % self.model_prefix
copyfile(epoch_weight_file, final_weight_file)
logger.info("Saved the best model to %s", final_weight_file)
def __build_debug_model(self, debug_layer_names: List[str], debug_masks: List[str]):
"""
Here we build a very simple kind of debug model: one that takes the same inputs as
self.model, and runs the model up to some particular layers, and outputs the values at
those layers.
In addition, you can optionally specify some number of layers for which you want to output
the mask computed by that layer.
If you want something more complicated, override this method.
"""
debug_inputs = self.model.get_input_at(0) # list of all input_layers
debug_output_dict = {}
layer_names = set(debug_layer_names)
mask_names = set(debug_masks)
for layer in self.model.layers:
if layer.name in layer_names:
debug_output_dict[layer.name] = layer.get_output_at(0)
layer_names.remove(layer.name)
if layer.name in mask_names:
mask = OutputMask()(layer.get_output_at(0))
debug_output_dict['mask_for_' + layer.name] = mask
mask_names.remove(layer.name)
if len(layer_names) != 0 or len(mask_names):
raise ConfigurationError("Unmatched debug layer names: " + str(layer_names | mask_names))
# The outputs need to be in the same order as `debug_layer_names`, or downstream code will
# have issues.
debug_outputs = [debug_output_dict[name] for name in debug_layer_names]
debug_outputs.extend([debug_output_dict['mask_for_' + name] for name in debug_masks])
return DeepQaModel(input=debug_inputs, output=debug_outputs)
def __debug(self, debug_layer_names: List[str], debug_masks: List[str], epoch: int):
"""
Runs the debug model and saves the results to a file.
"""
logger.info("Running debug model")
# Shows intermediate outputs of the model on validation data
outputs = self.debug_model.predict(self.debug_arrays[0])
output_dict = {}
if len(debug_layer_names) == 1:
output_dict[debug_layer_names[0]] = outputs
else:
for layer_name, output in zip(debug_layer_names, outputs[:len(debug_layer_names)]):
output_dict[layer_name] = output
for layer_name, output in zip(debug_masks, outputs[len(debug_layer_names):]):
if 'masks' not in output_dict:
output_dict['masks'] = {}
output_dict['masks'][layer_name] = output
self._output_debug_info(output_dict, epoch)
def __compile_kwargs(self):
"""
Because we call model.compile() in a few different places in the code, and we have a few
member variables that we use to set arguments for model.compile(), we group those arguments
together here, to only specify them once.
"""
# TODO(mark): factor all compile kwargs into a single dict in the json config.
return Params({
'tensorboard_log': self.tensorboard_log,
'tensorboard_frequency': self.tensorboard_frequency,
'gradient_clipping': self.gradient_clipping,
'loss': self.loss,
'optimizer': self.optimizer,
'metrics': self.metrics,
'num_gpus': self.num_gpus
})
| deep_qa-master | deep_qa/training/trainer.py |
from typing import Callable
import os
from copy import deepcopy
import tensorflow
import keras.backend as K
from .train_utils import pin_variable_device_scope, average_gradients
from .models import DeepQaModel
from .step import Step
from ..common.params import Params, ConfigurationError
def compile_parallel_model(model_builder: Callable[[], DeepQaModel],
compile_arguments: Params) -> DeepQaModel:
"""
This function compiles a multi-gpu version of your model. This is done using data
parallelism, by making N copies of the model on the different GPUs, all of which
share parameters. Gradients are updated synchronously, using the average gradient
from all of the outputs of the various models. This effectively allows you to scale
a model up to batch_sizes which cannot fit on a single GPU.
This method returns a "primary" copy of the model, which has had its training
function which is run by Keras overridden to be a training function which trains
all of the towers of the model. The other towers never have their training functions
initialised or used and are completely hidden from the user. The returned model
can be serialised in the same way as any other model and has no dependency on
multiple gpus being available when it is loaded.
Note that by calling this function, the model_builder function will be called multiple times
for the different GPUs. As such, you should be wary of this function having side
effects unrelated to building a computation graph.
Parameters
----------
model_builder: Callable[any, DeepQaModel], required.
A function which returns an uncompiled DeepQaModel.
compile_arguments: Params, required
Model parameters which are passed to compile. These should be the same as if you
were building a single GPU model, with the exception of the ``num_gpus`` field.
Returns
-------
The "primary" copy of the DeepQaModel, which holds the training function which
trains all of the copies of the model.
"""
optimizer = compile_arguments.get("optimizer")
num_gpus = compile_arguments.get("num_gpus")
gradient_clipping = compile_arguments.get("gradient_clipping", None)
tower_models = []
tower_gradients = []
global_step = tensorflow.train.get_or_create_global_step()
train_loss = tensorflow.get_variable('train_loss', [],
initializer=tensorflow.constant_initializer(0.0),
trainable=False)
# Place a copy of the model on each GPU, each getting a slice of the batch.
for gpu_index in range(num_gpus):
with tensorflow.device(pin_variable_device_scope('/gpu:%d' % gpu_index)):
with tensorflow.name_scope('tower_%d' % gpu_index): # pylint: disable=not-context-manager
# This is a new model object every time.
model = model_builder()
compile_kwargs = deepcopy(compile_arguments)
model.compile(compile_kwargs)
loss = model.total_loss
tower_models.append(model)
grads = optimizer.compute_gradients(loss)
tower_gradients.append(grads)
train_loss += loss
grads_and_variables = average_gradients(tower_gradients)
gradients, variables = list(zip(*grads_and_variables))
if gradient_clipping is not None:
clip_type = gradient_clipping.pop("type")
clip_value = gradient_clipping.pop("value")
if clip_type == 'clip_by_norm':
gradients, _ = tensorflow.clip_by_global_norm(gradients, clip_value)
elif clip_type == 'clip_by_value':
gradients = [tensorflow.clip_by_value(x, -clip_value, clip_value) for x in gradients]
else:
raise ConfigurationError("{} is not a supported type of gradient clipping.".format(clip_type))
train_operation = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)
train_summary = tensorflow.summary.scalar('train_loss', train_loss/ num_gpus)
summary_operations = [train_summary]
# any metrics that keras has collected
merged_metrics = []
if tower_models[0].metrics is not None:
# merge the metrics across GPUs
for i in range(len(tower_models[0].metrics)):
name = tower_models[0].metrics[0]
tensor = tensorflow.reduce_mean([mm.metrics_tensors[i] for mm in tower_models])
summary_operations.append(tensorflow.summary.scalar(name, tensor))
merged_metrics.append(tensor)
inputs = []
updates = []
for model in tower_models:
# pylint: disable=protected-access
model_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)
# pylint: enable=protected-access
inputs.extend(model_inputs)
updates.extend(model.updates)
# Just check any one, as we just made copies of them.
if tower_models[0].uses_learning_phase and \
not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
primary_model = tower_models[0]
if primary_model.tensorboard_log is not None:
train_summary_writer = tensorflow.summary.FileWriter(os.path.join(primary_model.tensorboard_log, "train"))
else:
train_summary_writer = None
# Add the multi-gpu update operation.
updates += [train_operation]
# Gets loss and metrics. Updates weights at each call.
primary_model.train_function = Step(inputs,
[train_loss] + merged_metrics,
global_step,
summary_writer=train_summary_writer,
summary_frequency=primary_model.tensorboard_frequency,
updates=updates)
return primary_model
| deep_qa-master | deep_qa/training/multi_gpu.py |
"""
These are utility functions that are similar to calls to Keras' backend. Some of these are here
because a current function in keras.backend is broken, some are things that just haven't been
implemented.
"""
import keras.backend as K
import tensorflow as tf
VERY_LARGE_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_NEGATIVE_NUMBER = -VERY_LARGE_NUMBER
def switch(cond, then_tensor, else_tensor):
"""
Keras' implementation of K.switch currently uses tensorflow's switch function, which only
accepts scalar value conditions, rather than boolean tensors which are treated in an
elementwise function. This doesn't match with Theano's implementation of switch, but using
tensorflow's where, we can exactly retrieve this functionality.
"""
cond_shape = cond.get_shape()
input_shape = then_tensor.get_shape()
if cond_shape[-1] != input_shape[-1] and cond_shape[-1] == 1:
# This happens when the last dim in the input is an embedding dimension. Keras usually does not
# mask the values along that dimension. Theano broadcasts the value passed along this dimension,
# but TF does not. Using K.dot() since cond can be a tensor.
cond = K.dot(tf.cast(cond, tf.float32), tf.ones((1, input_shape[-1])))
return tf.where(tf.cast(cond, dtype=tf.bool), then_tensor, else_tensor)
def very_negative_like(tensor):
return K.ones_like(tensor) * VERY_NEGATIVE_NUMBER
def last_dim_flatten(input_tensor):
'''
Takes a tensor and returns a matrix while preserving only the last dimension from the input.
'''
input_ndim = K.ndim(input_tensor)
shuffle_pattern = (input_ndim - 1,) + tuple(range(input_ndim - 1))
dim_shuffled_input = K.permute_dimensions(input_tensor, shuffle_pattern)
return K.transpose(K.batch_flatten(dim_shuffled_input))
def tile_vector(vector, matrix):
"""
NOTE: If your matrix has known shape (i.e., the relevant dimension from `K.int_shape(matrix) is
not None`), you should just use `K.repeat_elements(vector)` instead of this. This method
works, however, when the number of rows in your matrix is unknown at graph compilation time.
This method takes a (collection of) vector(s) (shape: (batch_size, vector_dim)), and tiles that
vector a number of times, giving a matrix of shape (batch_size, tile_length, vector_dim). (I
say "vector" and "matrix" here because I'm ignoring the batch_size). We need the matrix as
input so we know what the tile_length is - the matrix is otherwise ignored.
This is necessary in a number of places in the code. For instance, if you want to do a dot
product of a vector with all of the vectors in a matrix, the most efficient way to do that is
to tile the vector first, then do an element-wise product with the matrix, then sum out the
last mode. So, we capture this functionality here.
This is not done as a Keras Layer, however; if you want to use this function, you'll need to do
it _inside_ of a Layer somehow, either in a Lambda or in the call() method of a Layer you're
writing.
"""
# Tensorflow can't use unknown sizes at runtime, so we have to make use of the broadcasting
# ability of TF and Theano instead to create the tiled sentence encoding.
# Shape: (tile_length, batch_size, vector_dim)
k_ones = K.permute_dimensions(K.ones_like(matrix), [1, 0, 2])
# Now we have a (tile_length, batch_size, vector_dim)*(batch_size, vector_dim)
# elementwise multiplication which is broadcast. We then reshape back.
tiled_vector = K.permute_dimensions(k_ones * vector, [1, 0, 2])
return tiled_vector
def tile_scalar(scalar, vector):
"""
NOTE: If your vector has known shape (i.e., the relevant dimension from `K.int_shape(vector) is
not None`), you should just use `K.repeat_elements(scalar)` instead of this. This method
works, however, when the number of entries in your vector is unknown at graph compilation time.
This method takes a (collection of) scalar(s) (shape: (batch_size, 1)), and tiles that
scala a number of times, giving a vector of shape (batch_size, tile_length). (I say "scalar"
and "vector" here because I'm ignoring the batch_size). We need the vector as input so we know
what the tile_length is - the vector is otherwise ignored.
This is not done as a Keras Layer, however; if you want to use this function, you'll need to do
it _inside_ of a Layer somehow, either in a Lambda or in the call() method of a Layer you're
writing.
TODO(matt): we could probably make a more general `tile_tensor` method, which can do this for
any dimenionsality. There is another place in the code where we do this with a matrix and a
tensor; all three of these can probably be one function.
"""
# Tensorflow can't use unknown sizes at runtime, so we have to make use of the broadcasting
# ability of TF and Theano instead to create the tiled sentence encoding.
# Shape: (tile_length, batch_size)
k_ones = K.permute_dimensions(K.ones_like(vector), [1, 0])
# Now we have a (tile_length, batch_size) * (batch_size, 1) elementwise multiplication which is
# broadcast. We then reshape back.
tiled_scalar = K.permute_dimensions(k_ones * K.squeeze(scalar, axis=1), [1, 0])
return tiled_scalar
def hardmax(unnormalized_attention, knowledge_length):
"""
A similar operation to softmax, except all of the weight is placed on the mode of the
distribution. So, e.g., this function transforms [.34, .2, -1.4] -> [1, 0, 0].
TODO(matt): we really should have this take an optional mask...
"""
# (batch_size, knowledge_length)
max_values = K.max(unnormalized_attention, axis=1, keepdims=True)
tiled_max_values = K.tile(max_values, (1, knowledge_length))
# We now have a matrix where every column in each row has the max knowledge score value from
# the corresponding row in the unnormalized attention matrix. Next, we will compare that
# all-max matrix with the original input, resulting in ones where the column equals max and
# zero everywhere else.
# Shape: (batch_size, knowledge_length)
max_attention = K.equal(unnormalized_attention, tiled_max_values)
# Needs to be cast to be compatible with TensorFlow
return K.cast(max_attention, 'float32')
def apply_feed_forward(input_tensor, weights, activation):
'''
Takes an input tensor, sequence of weights and an activation and builds an MLP.
This can also be achieved by defining a sequence of Dense layers in Keras, but doing this
might be desirable if the operation needs to be done within the call method of a more complex
layer. Moreover, we are not applying biases here. The input tensor can have any number of
dimensions. But the last dimension, and the sequence of weights are expected to be compatible.
'''
current_tensor = input_tensor
for weight in weights:
current_tensor = activation(K.dot(current_tensor, weight))
return current_tensor
def l1_normalize(tensor_to_normalize, mask=None):
"""
Normalize a tensor by its L1 norm. Takes an optional mask.
When the vector to be normalized is all 0's we return the uniform
distribution (taking masking into account, so masked values are still 0.0).
When the vector to be normalized is completely masked, we return the
uniform distribution over the max padding length of the tensor.
See the tests for concrete examples of the aforementioned behaviors.
Parameters
----------
tensor_to_normalize : Tensor
Tensor of shape (batch size, x) to be normalized, where
x is arbitrary.
mask: Tensor, optional
Tensor of shape (batch size, x) indicating which elements
of ``tensor_to_normalize`` are padding and should
not be considered when normalizing.
Returns
-------
normalized_tensor : Tensor
Normalized tensor with shape (batch size, x).
"""
if mask is None:
mask = K.ones_like(tensor_to_normalize)
# We cast the mask to float32 to prevent dtype
# issues when multiplying it with other things
mask = K.cast(mask, "float32")
# We apply the mask to the tensor and take the sum
# of the values in each row.
row_sum = K.sum(mask * tensor_to_normalize, axis=-1, keepdims=True)
# We divide the tensor by the sum of the elements in the rows,
# and then apply the mask. This is the result a naive
# implementation would yield; we instead return the uniform distribution
# in a host of special cases (see the docstring and tests for more detail).
normal_result = (tensor_to_normalize / row_sum) * mask
mask_row_sum = K.sum(mask, axis=1, keepdims=True)
# The number of non-masked elements in the tensor to normalize.
# If all the elements in the tensor to normalize are masked,
# we set it to be the number of elements in the tensor to normalize.
divisor = K.sum(switch(mask_row_sum, mask, K.ones_like(mask)), axis=1,
keepdims=True)
# This handles the case where mask is all 0 and all values are 0.
# If the sum of mask_row_sum and row_sum is 0, make mask all ones,
# else just keep the mask as it is.
temp_mask = switch(mask_row_sum+row_sum, mask, K.ones_like(mask))
uniform = (K.ones_like(mask)/(divisor)) * temp_mask
normalized_tensors = switch(row_sum, normal_result, uniform)
return normalized_tensors
| deep_qa-master | deep_qa/tensors/backend.py |
deep_qa-master | deep_qa/tensors/__init__.py |
|
from keras import backend as K
from .backend import switch
def masked_batch_dot(tensor_a, tensor_b, mask_a, mask_b):
'''
The simplest case where this function is applicable is the following:
tensor_a: (batch_size, a_length, embed_dim)
tensor_b: (batch_size, b_length, embed_dim)
mask_a: None or (batch_size, a_length)
mask_b: None or (batch_size, b_length)
Returns:
a_dot_b: (batch_size, a_length, b_length), with zeros for masked elements.
This function will also work for larger tensors, as long as `abs(K.ndim(tensor_a) -
K.ndim(tensor_b)) < 1` (this is due to the limitations of `K.batch_dot`). We always assume the
dimension to perform the dot is the last one, and that the masks have one fewer dimension than
the tensors.
'''
if K.ndim(tensor_a) < K.ndim(tensor_b):
# To simplify the logic below, we'll make sure that tensor_a is always the bigger one.
tensor_a, tensor_b = tensor_b, tensor_a
mask_a, mask_b = mask_b, mask_a
a_dot_axis = K.ndim(tensor_a) - 1
b_dot_axis = K.ndim(tensor_b) - 1
if b_dot_axis < a_dot_axis:
tensor_b = K.expand_dims(tensor_b, axis=-1)
# (batch_size, a_length, b_length)
a_dot_b = K.batch_dot(tensor_a, tensor_b, axes=(a_dot_axis, b_dot_axis))
if b_dot_axis < a_dot_axis:
a_dot_b = K.squeeze(a_dot_b, axis=-1)
if mask_a is None and mask_b is None:
return a_dot_b
elif mask_a is None:
# (batch_size, a_length)
mask_a = K.sum(K.ones_like(tensor_a), axis=-1)
elif mask_b is None:
# (batch_size, b_length)
sum_axis = -1
if b_dot_axis < a_dot_axis:
sum_axis -= 1
mask_b = K.sum(K.ones_like(tensor_b), axis=sum_axis)
# Casting masks to float since we TF would complain if we multiplied bools.
float_mask_a = K.cast(mask_a, 'float32')
float_mask_b = K.cast(mask_b, 'float32')
if b_dot_axis < a_dot_axis:
float_mask_b = K.expand_dims(float_mask_b, axis=-1)
else:
float_mask_a = K.expand_dims(float_mask_a, axis=-1)
float_mask_b = K.expand_dims(float_mask_b, axis=-2)
# (batch_size, a_length, b_length)
a2b_mask = float_mask_a * float_mask_b
result = switch(a2b_mask, a_dot_b, K.zeros_like(a_dot_b))
return result
def masked_softmax(vector, mask):
"""
`K.softmax(vector)` does not work if some elements of `vector` should be masked. This performs
a softmax on just the non-masked portions of `vector` (passing None in for the mask is also
acceptable; you'll just get a regular softmax).
We assume that both `vector` and `mask` (if given) have shape (batch_size, vector_dim).
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorial cross-entropy loss.
"""
# We calculate masked softmax in a numerically stable fashion, as done
# in https://github.com/rkadlec/asreader/blob/master/asreader/custombricks/softmax_mask_bricks.py
if mask is not None:
# Here we get normalized log probabilities for
# enhanced numerical stability.
mask = K.cast(mask, "float32")
input_masked = mask * vector
shifted = mask * (input_masked - K.max(input_masked, axis=1,
keepdims=True))
# We add epsilon to avoid numerical instability when
# the sum in the log yields 0.
normalization_constant = K.log(K.sum(mask * K.exp(shifted), axis=1,
keepdims=True) + K.epsilon())
normalized_log_probabilities = mask * (shifted - normalization_constant)
unmasked_probabilities = K.exp(normalized_log_probabilities)
return switch(mask, unmasked_probabilities, K.zeros_like(unmasked_probabilities))
else:
# There is no mask, so we use the provided ``K.softmax`` function.
return K.softmax(vector)
| deep_qa-master | deep_qa/tensors/masked_operations.py |
"""
Similarity functions take a pair of tensors with the same shape, and compute a similarity function
on the vectors in the last dimension. For example, the tensors might both have shape
`(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two
vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a
tensor of shape `(batch_size, sentence_length)`.
The similarity function could be as simple as a dot product, or it could be a more complex,
parameterized function. The SimilarityFunction class exposes an API for a Layer that wants to
allow for multiple similarity functions, such as for initializing and returning weights.
If you want to compute a similarity between tensors of different sizes, you need to first tile them
in the appropriate dimensions to make them the same before you can use these functions. The
Attention and MatrixAttention layers do this.
"""
from typing import List
from keras import activations, initializers
class SimilarityFunction:
def __init__(self, name: str, initialization: str='glorot_uniform', activation: str='linear'):
self.name = name
self.init = initializers.get(initialization)
self.activation = activations.get(activation)
def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:
"""
Called in a `Layer.build()` method that uses this SimilarityFunction, here we both
initialize whatever weights are necessary for this similarity function, and return them so
they can be included in `Layer.trainable_weights`.
Parameters
----------
tensor_1_dim : int
The last dimension (typically ``embedding_dim``) of the first input tensor. We need
this so we can initialize weights appropriately.
tensor_2_dim : int
The last dimension (typically ``embedding_dim``) of the second input tensor. We need
this so we can initialize weights appropriately.
"""
raise NotImplementedError
def compute_similarity(self, tensor_1, tensor_2):
"""
Takes two tensors of the same shape, such as (batch_size, length_1, length_2,
embedding_dim). Computes a (possibly parameterized) similarity on the final dimension and
returns a tensor with one less dimension, such as (batch_size, length_1, length_2).
"""
raise NotImplementedError
| deep_qa-master | deep_qa/tensors/similarity_functions/similarity_function.py |
from typing import List
from keras import backend as K
from overrides import overrides
from ...common.checks import ConfigurationError
from .similarity_function import SimilarityFunction
class Linear(SimilarityFunction):
"""
This similarity function performs a dot product between a vector of weights and some
combination of the two input vectors. The combination done is configurable.
If the two vectors are `x` and `y`, we allow the following kinds of combinations: `x`, `y`,
`x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations is performed elementwise.
You can list as many combinations as you want, comma separated. For example, you might give
"x,y,x*y" as the `combination` parameter to this class. The computed similarity function would
then be `w^T [x; y; x*y] + b`, where `w` is a vector of weights, `b` is a bias parameter, and
`[;]` is vector concatenation.
Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the
similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can
accomplish that with this class by using "x*y" for `combination`.
"""
def __init__(self, combination: str='x,y', **kwargs):
super(Linear, self).__init__(**kwargs)
self.combinations = combination.split(',')
self.num_combinations = len(self.combinations)
self.weight_vector = None
self.bias = None
@overrides
def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:
combined_dim = self._get_combined_dim(tensor_1_dim, tensor_2_dim)
self.weight_vector = K.variable(self.init((combined_dim, 1)), name=self.name + "_weights")
self.bias = K.variable(self.init((1,)), name=self.name + "_bias")
return [self.weight_vector, self.bias]
@overrides
def compute_similarity(self, tensor_1, tensor_2):
combined_tensors = self._combine_tensors(tensor_1, tensor_2)
dot_product = K.squeeze(K.dot(combined_tensors, self.weight_vector), axis=-1)
return self.activation(dot_product + self.bias)
def _combine_tensors(self, tensor_1, tensor_2):
combined_tensor = self._get_combination(self.combinations[0], tensor_1, tensor_2)
for combination in self.combinations[1:]:
to_concatenate = self._get_combination(combination, tensor_1, tensor_2)
combined_tensor = K.concatenate([combined_tensor, to_concatenate], axis=-1)
return combined_tensor
def _get_combination(self, combination: str, tensor_1, tensor_2):
if combination == 'x':
return tensor_1
elif combination == 'y':
return tensor_2
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = self._get_combination(combination[0], tensor_1, tensor_2)
second_tensor = self._get_combination(combination[2], tensor_1, tensor_2)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def _get_combined_dim(self, tensor_1_dim: int, tensor_2_dim: int) -> int:
combination_dims = [self._get_combination_dim(combination, tensor_1_dim, tensor_2_dim)
for combination in self.combinations]
return sum(combination_dims)
def _get_combination_dim(self, combination: str, tensor_1_dim: int, tensor_2_dim: int) -> int:
if combination == 'x':
return tensor_1_dim
elif combination == 'y':
return tensor_2_dim
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = self._get_combination_dim(combination[0], tensor_1_dim, tensor_2_dim)
second_tensor_dim = self._get_combination_dim(combination[2], tensor_1_dim, tensor_2_dim)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError("Tensor dims must match for operation \"{}\"".format(operation))
return first_tensor_dim
| deep_qa-master | deep_qa/tensors/similarity_functions/linear.py |
from typing import List
from keras import backend as K
from overrides import overrides
from .similarity_function import SimilarityFunction
class Bilinear(SimilarityFunction):
"""
This similarity function performs a bilinear transformation of the two input vectors. This
function has a matrix of weights W and a bias b, and the similarity between two vectors x and y
is computed as `x^T W y + b`.
"""
def __init__(self, **kwargs):
super(Bilinear, self).__init__(**kwargs)
self.weight_matrix = None
self.bias = None
@overrides
def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:
self.weight_matrix = K.variable(self.init((tensor_1_dim, tensor_2_dim)),
name=self.name + "_weights")
self.bias = K.variable(self.init((1,)), name=self.name + "_bias")
return [self.weight_matrix, self.bias]
@overrides
def compute_similarity(self, tensor_1, tensor_2):
dot_product = K.sum(K.dot(tensor_1, self.weight_matrix) * tensor_2, axis=-1)
return self.activation(dot_product + self.bias)
| deep_qa-master | deep_qa/tensors/similarity_functions/bilinear.py |
from collections import OrderedDict
from .bilinear import Bilinear
from .dot_product import DotProduct
from .linear import Linear
from .cosine_similarity import CosineSimilarity
# The first item added here will be used as the default in some cases.
similarity_functions = OrderedDict() # pylint: disable=invalid-name
similarity_functions['dot_product'] = DotProduct
similarity_functions['bilinear'] = Bilinear
similarity_functions['linear'] = Linear
similarity_functions['cosine_similarity'] = CosineSimilarity
| deep_qa-master | deep_qa/tensors/similarity_functions/__init__.py |
from typing import List
from keras import backend as K
from overrides import overrides
from ...common.checks import ConfigurationError
from .similarity_function import SimilarityFunction
class CosineSimilarity(SimilarityFunction):
"""
This similarity function simply computes the cosine similarity between each pair of vectors. It has
no parameters.
"""
def __init__(self, **kwargs):
super(CosineSimilarity, self).__init__(**kwargs)
@overrides
def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:
if tensor_1_dim != tensor_2_dim:
raise ConfigurationError("Tensor dims must match for cosine product similarity, but "
"were {} and {}".format(tensor_1_dim, tensor_2_dim))
return []
@overrides
def compute_similarity(self, tensor_1, tensor_2):
return K.sum(K.l2_normalize(tensor_1, axis=-1) * K.l2_normalize(tensor_2, axis=-1),
axis=-1)
| deep_qa-master | deep_qa/tensors/similarity_functions/cosine_similarity.py |
from typing import List
from keras import backend as K
from overrides import overrides
from ...common.checks import ConfigurationError
from .similarity_function import SimilarityFunction
class DotProduct(SimilarityFunction):
"""
This similarity function simply computes the dot product between each pair of vectors. It has
no parameters.
"""
def __init__(self, **kwargs):
super(DotProduct, self).__init__(**kwargs)
@overrides
def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:
if tensor_1_dim != tensor_2_dim:
raise ConfigurationError("Tensor dims must match for dot product similarity, but "
"were {} and {}".format(tensor_1_dim, tensor_2_dim))
return []
@overrides
def compute_similarity(self, tensor_1, tensor_2):
return K.sum(tensor_1 * tensor_2, axis=-1)
| deep_qa-master | deep_qa/tensors/similarity_functions/dot_product.py |
from .entailment import concrete_models as entailment_models
from .sequence_tagging import concrete_models as sequence_tagging_models
from .reading_comprehension import concrete_models as reading_comprehension_models
from .text_classification import concrete_models as text_classification_models
concrete_models = {} # pylint: disable=invalid-name
__concrete_task_models = [ # pylint: disable=invalid-name
entailment_models,
sequence_tagging_models,
reading_comprehension_models,
text_classification_models,
]
for models_for_task in __concrete_task_models:
for model_name, model_class in models_for_task.items():
if model_name in concrete_models:
raise RuntimeError("Duplicate model name found: " + model_name)
concrete_models[model_name] = model_class
| deep_qa-master | deep_qa/models/__init__.py |
from keras.layers import Dense, Input, TimeDistributed
from overrides import overrides
from ...common.params import Params
from ...data.instances.sequence_tagging import concrete_instances
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class SimpleTagger(TextTrainer):
"""
This ``SimpleTagger`` simply encodes a sequence of text with some number of stacked
``seq2seq_encoders``, then predicts a tag at each index.
Parameters
----------
num_stacked_rnns : int, optional (default: ``1``)
The number of ``seq2seq_encoders`` that we should stack on top of each other before
predicting tags.
instance_type : str
Specifies the particular subclass of ``TaggedSequenceInstance`` to use for loading data,
which in turn defines things like how the input data is formatted and tokenized.
"""
def __init__(self, params: Params):
self.num_stacked_rnns = params.pop('num_stacked_rnns', 1)
instance_type_choice = params.pop_choice("instance_type", concrete_instances.keys())
self.instance_type = concrete_instances[instance_type_choice]
super(SimpleTagger, self).__init__(params)
@overrides
def _instance_type(self): # pylint: disable=no-self-use
return self.instance_type
@overrides
def _build_model(self):
# shape: (batch_size, text_length)
text_input = Input(shape=self._get_sentence_shape(), dtype='int32', name='text_input')
# shape: (batch_size, text_length, embedding_dim)
text_embedding = self._embed_input(text_input)
for i in range(self.num_stacked_rnns):
encoder = self._get_seq2seq_encoder(name="encoder_{}".format(i),
fallback_behavior="use default params")
# shape still (batch_size, text_length, embedding_dim)
text_embedding = encoder(text_embedding)
# The -2 below is because we are ignoring the padding and unknown tokens that the
# DataIndexer has by default.
predicted_tags = TimeDistributed(Dense(self.data_indexer.get_vocab_size('tags') - 2,
activation='softmax'))(text_embedding)
return DeepQaModel(input=text_input, output=predicted_tags)
@overrides
def _set_padding_lengths_from_model(self):
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1:])
| deep_qa-master | deep_qa/models/sequence_tagging/simple_tagger.py |
from .simple_tagger import SimpleTagger
concrete_models = { # pylint: disable=invalid-name
'SimpleTagger': SimpleTagger,
}
| deep_qa-master | deep_qa/models/sequence_tagging/__init__.py |
from typing import Dict
from keras.layers import Input
from overrides import overrides
from ...data.instances.entailment.snli_instance import SnliInstance
from ...training.text_trainer import TextTrainer
from ...layers.entailment_models import DecomposableAttentionEntailment
from ...training.models import DeepQaModel
from ...common.params import Params
class DecomposableAttention(TextTrainer):
'''
This ``TextTrainer`` implements the Decomposable Attention model described in "A Decomposable
Attention Model for Natural Language Inference", by Parikh et al., 2016, with some optional
enhancements before the decomposable attention actually happens. Specifically, Parikh's
original model took plain word embeddings as input to the decomposable attention; we allow
other operations the transform these word embeddings, such as running a biLSTM on them, before
running the decomposable attention layer.
Inputs:
- A "text" sentence, with shape (batch_size, sentence_length)
- A "hypothesis" sentence, with shape (batch_size, sentence_length)
Outputs:
- An entailment decision per input text/hypothesis pair, in {entails, contradicts, neutral}.
Parameters
----------
num_seq2seq_layers : int, optional (default=0)
After getting a word embedding, how many stacked seq2seq encoders should we use before
doing the decomposable attention? The default of 0 recreates the original decomposable
attention model.
share_encoders : bool, optional (default=True)
Should we use the same seq2seq encoder for the text and hypothesis, or different ones?
decomposable_attention_params : Dict[str, Any], optional (default={})
These parameters get passed to the
:class:`~deep_qa.layers.entailment_models.decomposable_attention.DecomposableAttentionEntailment`
layer object, and control things like the number of output labels, number of hidden layers
in the entailment MLPs, etc. See that class for a complete description of options here.
'''
def __init__(self, params: Params):
self.num_seq2seq_layers = params.pop('num_seq2seq_layers', 0)
self.share_encoders = params.pop('share_encoders', True)
self.decomposable_attention_params = params.pop('decomposable_attention_params', {})
super(DecomposableAttention, self).__init__(params)
@overrides
def _instance_type(self):
return SnliInstance
@overrides
def _build_model(self):
text_input = Input(shape=self._get_sentence_shape(), dtype='int32', name="text_input")
hypothesis_input = Input(shape=self._get_sentence_shape(), dtype='int32', name="hypothesis_input")
text_embedding = self._embed_input(text_input)
hypothesis_embedding = self._embed_input(hypothesis_input)
for i in range(self.num_seq2seq_layers):
text_encoder_name = "hidden_{}".format(i) if self.share_encoders else "text_{}".format(i)
text_encoder = self._get_seq2seq_encoder(name=text_encoder_name,
fallback_behavior="use default params")
text_embedding = text_encoder(text_embedding)
hypothesis_encoder_name = "hidden_{}".format(i) if self.share_encoders else "hypothesis_{}".format(i)
hypothesis_encoder = self._get_seq2seq_encoder(name=hypothesis_encoder_name,
fallback_behavior="use default params")
hypothesis_embedding = hypothesis_encoder(hypothesis_embedding)
entailment_layer = DecomposableAttentionEntailment(**self.decomposable_attention_params)
entailment_probabilities = entailment_layer([text_embedding, hypothesis_embedding])
return DeepQaModel(inputs=[text_input, hypothesis_input], outputs=entailment_probabilities)
@overrides
def get_padding_memory_scaling(self, padding_lengths: Dict[str, int]) -> int:
return padding_lengths['num_sentence_words'] ** 2
@overrides
def _set_padding_lengths_from_model(self):
print("Model input shape:", self.model.get_input_shape_at(0))
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[0][1:])
@classmethod
def _get_custom_objects(cls):
custom_objects = super(DecomposableAttention, cls)._get_custom_objects()
custom_objects["DecomposableAttentionEntailment"] = DecomposableAttentionEntailment
return custom_objects
| deep_qa-master | deep_qa/models/entailment/decomposable_attention.py |
from .decomposable_attention import DecomposableAttention
concrete_models = { # pylint: disable=invalid-name
'DecomposableAttention': DecomposableAttention,
}
| deep_qa-master | deep_qa/models/entailment/__init__.py |
from typing import Dict, List
from keras.layers import Dense, Input, Concatenate, TimeDistributed
from overrides import overrides
from ...data.instances.reading_comprehension import CharacterSpanInstance
from ...layers import ComplexConcat, Highway
from ...layers.attention import MatrixAttention, MaskedSoftmax, WeightedSum
from ...layers.backend import Max, RepeatLike, Repeat
from ...training import TextTrainer
from ...training.models import DeepQaModel
from ...common.params import Params
class BidirectionalAttentionFlow(TextTrainer):
"""
This class implements Minjoon Seo's `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_
for answering reading comprehension questions (ICLR 2017).
The basic layout is pretty simple: encode words as a combination of word embeddings and a
character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of
attentions to put question information into the passage word representations (this is the only
part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and
do a softmax over span start and span end.
Parameters
----------
num_hidden_seq2seq_layers : int, optional (default: ``2``)
At the end of the model, we add a few stacked biLSTMs (or similar), to give the model some
depth. This parameter controls how many deep layers we should use.
num_passage_words : int, optional (default: ``None``)
If set, we will truncate (or pad) all passages to this length. If not set, we will pad all
passages to be the same length as the longest passage in the data.
num_question_words : int, optional (default: ``None``)
Same as ``num_passage_words``, but for the number of words in the question. (default:
``None``)
num_highway_layers : int, optional (default: ``2``)
After constructing a word embedding, but before the first biLSTM layer, Min has some
``Highway`` layers operating on the word embedding layer. This parameter specifies how
many of those to do. (default: ``2``)
highway_activation : string, optional (default: ``'relu'``)
Specifies the activation function to use for the ``Highway`` layers mentioned above. Any
Keras activation function is acceptable here.
similarity_function : Dict[str, Any], optional (default: ``{'type': 'linear', 'combination': 'x,y,x*y'}``)
Specifies the similarity function to use when computing a similarity matrix between
question words and passage words. By default we use the function Min used in his paper.
Notes
-----
Min's code uses tensors of shape ``(batch_size, num_sentences, sentence_length)`` to represent
the passage, splitting it up into sentences, where here we just have one long passage sequence.
I was originally afraid this might mean he applied the biLSTM on each sentence independently,
but it looks like he flattens it to our shape before he does any actual operations on it. So,
I `think` this is implementing pretty much exactly what he did, but I'm not totally certain.
"""
def __init__(self, params: Params):
# There are a couple of defaults from TextTrainer that we want to override: we want to
# default to using joint word and character-level embeddings, and we want to use a CNN
# encoder to get a character-level encoding. We set those here.
params.setdefault('tokenizer', {'type': 'words and characters'})
encoder_params = params.pop('encoder', {'default': {}}).as_dict()
encoder_params.setdefault('word', {'type': 'cnn', 'ngram_filter_sizes': [5], 'num_filters': 100})
params['encoder'] = encoder_params
self.num_hidden_seq2seq_layers = params.pop('num_hidden_seq2seq_layers', 2)
self.num_passage_words = params.pop('num_passage_words', None)
self.num_question_words = params.pop('num_question_words', None)
self.num_highway_layers = params.pop('num_highway_layers', 2)
self.highway_activation = params.pop('highway_activation', 'relu')
self.similarity_function_params = params.pop('similarity_function',
{'type': 'linear', 'combination': 'x,y,x*y'}).as_dict()
# We have two outputs, so using "val_acc" doesn't work.
params.setdefault('validation_metric', 'val_loss')
super(BidirectionalAttentionFlow, self).__init__(params)
@overrides
def _build_model(self):
# PART 1:
# First we create input layers and pass the inputs through an embedding.
question_input = Input(shape=self._get_sentence_shape(self.num_question_words),
dtype='int32', name="question_input")
passage_input = Input(shape=self._get_sentence_shape(self.num_passage_words),
dtype='int32', name="passage_input")
# Shape: (batch_size, num_question_words, embedding_dim * 2) (embedding_dim * 2 because,
# by default in this class, self._embed_input concatenates a word embedding with a
# character-level encoder).
question_embedding = self._embed_input(question_input)
# Shape: (batch_size, num_passage_words, embedding_dim * 2)
passage_embedding = self._embed_input(passage_input)
# Min's model has some highway layers here, with relu activations. Note that highway
# layers don't change the tensor's shape. We need to have two different `TimeDistributed`
# layers instantiated here, because Keras doesn't like it if a single `TimeDistributed`
# layer gets applied to two inputs with different numbers of time steps.
for i in range(self.num_highway_layers):
highway_layer = Highway(activation=self.highway_activation, name='highway_{}'.format(i))
question_layer = TimeDistributed(highway_layer, name=highway_layer.name + "_qtd")
question_embedding = question_layer(question_embedding)
passage_layer = TimeDistributed(highway_layer, name=highway_layer.name + "_ptd")
passage_embedding = passage_layer(passage_embedding)
# Then we pass the question and passage through a seq2seq encoder (like a biLSTM). This
# essentially pushes phrase-level information into the embeddings of each word.
phrase_layer = self._get_seq2seq_encoder(name="phrase",
fallback_behavior="use default params")
# Shape: (batch_size, num_question_words, embedding_dim * 2)
encoded_question = phrase_layer(question_embedding)
# Shape: (batch_size, num_passage_words, embedding_dim * 2)
encoded_passage = phrase_layer(passage_embedding)
# PART 2:
# Now we compute a similarity between the passage words and the question words, and
# normalize the matrix in a couple of different ways for input into some more layers.
matrix_attention_layer = MatrixAttention(similarity_function=self.similarity_function_params,
name='passage_question_similarity')
# Shape: (batch_size, num_passage_words, num_question_words)
passage_question_similarity = matrix_attention_layer([encoded_passage, encoded_question])
# Shape: (batch_size, num_passage_words, num_question_words), normalized over question
# words for each passage word.
passage_question_attention = MaskedSoftmax()(passage_question_similarity)
# Shape: (batch_size, num_passage_words, embedding_dim * 2)
weighted_sum_layer = WeightedSum(name="passage_question_vectors", use_masking=False)
passage_question_vectors = weighted_sum_layer([encoded_question, passage_question_attention])
# Min's paper finds, for each document word, the most similar question word to it, and
# computes a single attention over the whole document using these max similarities.
# Shape: (batch_size, num_passage_words)
question_passage_similarity = Max(axis=-1)(passage_question_similarity)
# Shape: (batch_size, num_passage_words)
question_passage_attention = MaskedSoftmax()(question_passage_similarity)
# Shape: (batch_size, embedding_dim * 2)
weighted_sum_layer = WeightedSum(name="question_passage_vector", use_masking=False)
question_passage_vector = weighted_sum_layer([encoded_passage, question_passage_attention])
# Then he repeats this question/passage vector for every word in the passage, and uses it
# as an additional input to the hidden layers above.
repeat_layer = RepeatLike(axis=1, copy_from_axis=1)
# Shape: (batch_size, num_passage_words, embedding_dim * 2)
tiled_question_passage_vector = repeat_layer([question_passage_vector, encoded_passage])
# Shape: (batch_size, num_passage_words, embedding_dim * 8)
complex_concat_layer = ComplexConcat(combination='1,2,1*2,1*3', name='final_merged_passage')
final_merged_passage = complex_concat_layer([encoded_passage,
passage_question_vectors,
tiled_question_passage_vector])
# PART 3:
# Having computed a combined representation of the document that includes attended question
# vectors, we'll pass this through a few more bi-directional encoder layers, then predict
# the span_begin word. Hard to find a good name for this; Min calls this part of the
# network the "modeling layer", so we'll call this the `modeled_passage`.
modeled_passage = final_merged_passage
for i in range(self.num_hidden_seq2seq_layers):
hidden_layer = self._get_seq2seq_encoder(name="hidden_seq2seq_{}".format(i),
fallback_behavior="use default params")
modeled_passage = hidden_layer(modeled_passage)
# To predict the span word, we pass the merged representation through a Dense layer without
# output size 1 (basically a dot product of a vector of weights and the passage vectors),
# then do a softmax to get a position.
span_begin_input = Concatenate()([final_merged_passage, modeled_passage])
span_begin_weights = TimeDistributed(Dense(units=1))(span_begin_input)
# Shape: (batch_size, num_passage_words)
span_begin_probabilities = MaskedSoftmax(name="span_begin_softmax")(span_begin_weights)
# PART 4:
# Given what we predicted for span_begin, we'll pass it through a final encoder layer and
# predict span_end. NOTE: I'm following what Min did in his _code_, not what it says he
# did in his _paper_. The equations in his paper do not mention that he did this last
# weighted passage representation and concatenation before doing the final biLSTM (though
# his figure makes it clear this is what he intended; he just wrote the equations wrong).
# Shape: (batch_size, num_passage_words, embedding_dim * 2)
sum_layer = WeightedSum(name="passage_weighted_by_predicted_span", use_masking=False)
repeat_layer = RepeatLike(axis=1, copy_from_axis=1)
passage_weighted_by_predicted_span = repeat_layer([sum_layer([modeled_passage,
span_begin_probabilities]),
encoded_passage])
span_end_representation = ComplexConcat(combination="1,2,3,2*3")([final_merged_passage,
modeled_passage,
passage_weighted_by_predicted_span])
final_seq2seq = self._get_seq2seq_encoder(name="final_seq2seq",
fallback_behavior="use default params")
span_end_representation = final_seq2seq(span_end_representation)
span_end_input = Concatenate()([final_merged_passage, span_end_representation])
span_end_weights = TimeDistributed(Dense(units=1))(span_end_input)
span_end_probabilities = MaskedSoftmax(name="span_end_softmax")(span_end_weights)
return DeepQaModel(inputs=[question_input, passage_input],
outputs=[span_begin_probabilities, span_end_probabilities])
@overrides
def _instance_type(self): # pylint: disable=no-self-use
return CharacterSpanInstance
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
padding_lengths = super(BidirectionalAttentionFlow, self).get_padding_lengths()
padding_lengths['num_passage_words'] = self.num_passage_words
padding_lengths['num_question_words'] = self.num_question_words
return padding_lengths
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
super(BidirectionalAttentionFlow, self)._set_padding_lengths(padding_lengths)
if self.data_generator is not None and self.data_generator.dynamic_padding:
return
if self.num_passage_words is None:
self.num_passage_words = padding_lengths['num_passage_words']
if self.num_question_words is None:
self.num_question_words = padding_lengths['num_question_words']
@overrides
def _set_padding_lengths_from_model(self):
self.num_question_words = self.model.get_input_shape_at(0)[0][1]
self.num_passage_words = self.model.get_input_shape_at(0)[1][1]
# We need to pass this slice of the passage input shape to the superclass
# mainly to set self.num_word_characters. The decision of whether to pass
# the passage input or the question input is arbitrary, as the
# two word lengths are guaranteed to be the same and BiDAF ignores
# self.num_sentence_words.
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1][1:])
@overrides
def get_instance_sorting_keys(self) -> List[str]: # pylint: disable=no-self-use
return ['num_passage_words', 'num_question_words']
@overrides
def get_padding_memory_scaling(self, padding_lengths: Dict[str, int]) -> int:
num_passage_words = padding_lengths['num_passage_words']
num_question_words = padding_lengths['num_question_words']
return num_passage_words * num_question_words
@classmethod
@overrides
def _get_custom_objects(cls):
custom_objects = super(BidirectionalAttentionFlow, cls)._get_custom_objects()
custom_objects["ComplexConcat"] = ComplexConcat
custom_objects["MaskedSoftmax"] = MaskedSoftmax
custom_objects["MatrixAttention"] = MatrixAttention
custom_objects["Max"] = Max
custom_objects["Repeat"] = Repeat
custom_objects["RepeatLike"] = RepeatLike
custom_objects["WeightedSum"] = WeightedSum
return custom_objects
@staticmethod
def get_best_span(span_begin_probs, span_end_probs):
if len(span_begin_probs.shape) > 2 or len(span_end_probs.shape) > 2:
raise ValueError("Input shapes must be (X,) or (1,X)")
if len(span_begin_probs.shape) == 2:
assert span_begin_probs.shape[0] == 1, "2D input must have an initial dimension of 1"
span_begin_probs = span_begin_probs.flatten()
if len(span_end_probs.shape) == 2:
assert span_end_probs.shape[0] == 1, "2D input must have an initial dimension of 1"
span_end_probs = span_end_probs.flatten()
max_span_probability = 0
best_word_span = (0, 1)
begin_span_argmax = 0
for j, _ in enumerate(span_begin_probs):
val1 = span_begin_probs[begin_span_argmax]
val2 = span_end_probs[j]
if val1 * val2 > max_span_probability:
best_word_span = (begin_span_argmax, j)
max_span_probability = val1 * val2
# We need to update best_span_argmax here _after_ we've checked the current span
# position, so that we don't allow things like (1, 1), which are empty spans. We've
# added a special stop symbol to the end of the passage, so this still allows for all
# valid spans over the passage.
if val1 < span_begin_probs[j]:
val1 = span_begin_probs[j]
begin_span_argmax = j
return (best_word_span[0], best_word_span[1])
| deep_qa-master | deep_qa/models/reading_comprehension/bidirectional_attention.py |
from typing import Dict
from overrides import overrides
from keras.layers import Input, Dropout, Concatenate
from ...data.instances.reading_comprehension.mc_question_passage_instance import McQuestionPassageInstance
from ...common.checks import ConfigurationError
from ...layers.backend import BatchDot
from ...layers.attention import Attention, MaskedSoftmax, GatedAttention
from ...layers import OptionAttentionSum, Overlap, L1Normalize
from ...layers import VectorMatrixSplit, BiGRUIndexSelector
from ...common.params import Params
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class GatedAttentionReader(TextTrainer):
"""
This TextTrainer implements the Gated Attention Reader model described in
"Gated-Attention Readers for Text Comprehension" by Dhingra et. al 2016. It encodes
the document with a variable number of gated attention layers, and then encodes
the query. It takes the dot product of these two final encodings to generate an
attention over the words in the document, and it then selects the option with the
highest summed or mean weight as the answer.
Parameters
----------
multiword_option_mode: str, optional (default="mean")
Describes how to calculate the probability of options
that contain multiple words. If "mean", the probability of
the option is taken to be the mean of the probabilities of
its constituent words. If "sum", the probability of the option
is taken to be the sum of the probabilities of its constituent
words.
num_gated_attention_layers: int, optional (default=3)
The number of gated attention layers to pass the document
embedding through. Must be at least 1.
cloze_token: str, optional (default=None)
If not None, the string that represents the cloze token in a cloze question.
Used to calculate the attention over the document, as the model does it
differently for cloze vs non-cloze datasets.
gating_function: str, optional (default="*")
The gating function to use in the Gated Attention layer. ``"*"`` is for
elementwise multiplication, ``"+"`` is for elementwise addition, and
``"|"`` is for concatenation.
gated_attention_dropout: float, optional (default=0.3)
The proportion of units to drop out after each gated attention layer.
qd_common_feature: boolean, optional (default=True)
Whether to use the question-document common word feature. This feature simply
indicates, for each word in the document, whether it appears in the query
and has been shown to improve reading comprehension performance.
"""
def __init__(self, params: Params):
self.max_question_length = params.pop('max_question_length', None)
self.max_passage_length = params.pop('max_passage_length', None)
self.max_option_length = params.pop('max_option_length', None)
self.num_options = params.pop('num_options', None)
# either "mean" or "sum"
self.multiword_option_mode = params.pop('multiword_option_mode', "mean")
# number of gated attention layers to use
self.num_gated_attention_layers = params.pop('num_gated_attention_layers', 3)
# gating function to use, either "*", "+", or "|"
self.gating_function = params.pop('gating_function', "*")
# dropout proportion after each gated attention layer.
self.gated_attention_dropout = params.pop('gated_attention_dropout', 0.3)
# If you are using the model on a cloze (fill in the blank) dataset,
# indicate what token indicates the blank.
self.cloze_token = params.pop('cloze_token', None)
self.cloze_token_index = None
# use the question document common word feature
self.use_qd_common_feature = params.pop('qd_common_feature', True)
super(GatedAttentionReader, self).__init__(params)
@overrides
def _build_model(self):
"""
The basic outline here is that we'll pass the questions and the
document / passage (think of this as a collection of possible answer
choices) into a word embedding layer.
"""
# get the index of the cloze token, if applicable
if self.cloze_token is not None:
self.cloze_token_index = self.data_indexer.get_word_index(self.cloze_token)
# First we create input layers and pass the question and document
# through embedding layers.
# shape: (batch size, question_length)
question_input_shape = self._get_sentence_shape(self.max_question_length)
question_input = Input(shape=question_input_shape,
dtype='int32', name="question_input")
# if using character embeddings, split off the question word indices.
if len(question_input_shape) > 1:
question_indices = VectorMatrixSplit(split_axis=-1)(question_input)[0]
else:
question_indices = question_input
# shape: (batch size, document_length)
document_input_shape = self._get_sentence_shape(self.max_passage_length)
document_input = Input(shape=self._get_sentence_shape(self.max_passage_length),
dtype='int32',
name="document_input")
# if using character embeddings, split off the document word indices.
if len(document_input_shape) > 1:
document_indices = VectorMatrixSplit(split_axis=-1)(document_input)[0]
else:
document_indices = document_input
# shape: (batch size, number of options, num words in option)
options_input_shape = ((self.num_options,) +
self._get_sentence_shape(self.max_option_length))
options_input = Input(shape=options_input_shape,
dtype='int32', name="options_input")
# if using character embeddings, split off the option word indices.
if len(options_input_shape) > 2:
options_indices = VectorMatrixSplit(split_axis=-1)(options_input)[0]
else:
options_indices = options_input
# shape: (batch size, question_length, embedding size)
question_embedding = self._embed_input(question_input)
# shape: (batch size, document_length, embedding size)
document_embedding = self._embed_input(document_input)
# We pass the question and document embedding through a variable
# number of gated-attention layers.
if self.num_gated_attention_layers < 1:
raise ConfigurationError("Need at least one gated attention layer.")
for i in range(self.num_gated_attention_layers-1):
# Note that the size of the last dimension of the input
# is not necessarily the embedding size in the second gated
# attention layer and beyond.
# We encode the question embeddings with a seq2seq encoder.
question_encoder = self._get_seq2seq_encoder(name="question_{}".format(i))
# shape: (batch size, question_length, 2*seq2seq hidden size)
encoded_question = question_encoder(question_embedding)
# We encode the document embeddings with a seq2seq encoder.
# Note that this is not the same encoder as used for the question.
document_encoder = self._get_seq2seq_encoder(name="document_{}".format(i))
# shape: (batch size, document_length, 2*seq2seq hidden size)
encoded_document = document_encoder(document_embedding)
# (batch size, document length, question length)
qd_attention = BatchDot()([encoded_document, encoded_question])
# (batch size, document length, question length)
normalized_qd_attention = MaskedSoftmax()(qd_attention)
gated_attention_layer = GatedAttention(self.gating_function,
name="gated_attention_{}".format(i))
# shape: (batch size, document_length, 2*seq2seq hidden size)
document_embedding = gated_attention_layer([encoded_document,
encoded_question,
normalized_qd_attention])
gated_attention_dropout = Dropout(self.gated_attention_dropout)
# shape: (batch size, document_length, 2*seq2seq hidden size)
document_embedding = gated_attention_dropout(document_embedding)
# Last Layer
if self.use_qd_common_feature:
# get the one-hot features for common occurence
# shape (batch size, document_indices, 2)
qd_common_feature = Overlap()([document_indices,
question_indices])
# We concatenate qd_common_feature with the document embeddings.
# shape: (batch size, document_length, (2*seq2seq hidden size) + 2)
document_embedding = Concatenate()([document_embedding, qd_common_feature])
# We encode the document embeddings with a final seq2seq encoder.
document_encoder = self._get_seq2seq_encoder(name="document_final")
# shape: (batch size, document_length, 2*seq2seq hidden size)
final_encoded_document = document_encoder(document_embedding)
if self.cloze_token is None:
# Get a final encoding of the question from a biGRU that does not return
# the sequence, and use it to calculate attention over the document.
final_question_encoder = self._get_encoder(name="question_final")
# shape: (batch size, 2*seq2seq hidden size)
final_encoded_question = final_question_encoder(question_embedding)
else:
# We get a final encoding of the question by concatenating the forward
# and backward GRU at the index of the cloze token.
final_question_encoder = self._get_seq2seq_encoder(name="question_final")
# each are shape (batch size, question_length, seq2seq hidden size)
encoded_question_f, encoded_question_b = final_question_encoder(question_embedding)
# extract the gru outputs at the cloze token from the forward and
# backwards passes
index_selector = BiGRUIndexSelector(self.cloze_token_index)
final_encoded_question = index_selector([question_indices,
encoded_question_f,
encoded_question_b])
# take the softmax of the document_embedding after it has been passed
# through gated attention layers to get document probabilities
# shape: (batch size, document_length)
document_probabilities = Attention(name='question_document_softmax')([final_encoded_question,
final_encoded_document])
# We sum together the weights of words that match each option
# and use the multiword_option_mode to determine how to calculate
# the total probability of the option.
options_sum_layer = OptionAttentionSum(self.multiword_option_mode,
name="options_probability_sum")
# shape: (batch size, num_options)
options_probabilities = options_sum_layer([document_indices,
document_probabilities,
options_indices])
# We normalize the option_probabilities by dividing it by its L1 norm.
l1_norm_layer = L1Normalize()
# shape: (batch size, num_options)
option_normalized_probabilities = l1_norm_layer(options_probabilities)
return DeepQaModel(input=[question_input, document_input, options_input],
output=option_normalized_probabilities)
@overrides
def _instance_type(self):
"""
Return the instance type that the model trains on.
"""
return McQuestionPassageInstance
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
Return a dictionary with the appropriate padding lengths.
"""
padding_lengths = super(GatedAttentionReader, self).get_padding_lengths()
padding_lengths['num_question_words'] = self.max_question_length
padding_lengths['num_passage_words'] = self.max_passage_length
padding_lengths['num_option_words'] = self.max_option_length
padding_lengths['num_options'] = self.num_options
return padding_lengths
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
"""
Set the padding lengths of the model.
"""
# TODO(nelson): superclass complains that there is no
# num_sentence_words key, so we set it to None here.
# We should probably patch up / organize the API.
padding_lengths["num_sentence_words"] = None
super(GatedAttentionReader, self)._set_padding_lengths(padding_lengths)
if self.max_question_length is None:
self.max_question_length = padding_lengths['num_question_words']
if self.max_passage_length is None:
self.max_passage_length = padding_lengths['num_passage_words']
if self.max_option_length is None:
self.max_option_length = padding_lengths['num_option_words']
if self.num_options is None:
self.num_options = padding_lengths['num_options']
@overrides
def _set_padding_lengths_from_model(self):
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1][1:])
self.max_question_length = self.model.get_input_shape_at(0)[0][1]
self.max_passage_length = self.model.get_input_shape_at(0)[1][1]
self.num_options = self.model.get_input_shape_at(0)[2][1]
self.max_option_length = self.model.get_input_shape_at(0)[2][2]
@classmethod
def _get_custom_objects(cls):
custom_objects = super(GatedAttentionReader, cls)._get_custom_objects()
custom_objects["Attention"] = Attention
custom_objects["BatchDot"] = BatchDot
custom_objects["BiGRUIndexSelector"] = BiGRUIndexSelector
custom_objects["GatedAttention"] = GatedAttention
custom_objects["L1Normalize"] = L1Normalize
custom_objects["MaskedSoftmax"] = MaskedSoftmax
custom_objects["OptionAttentionSum"] = OptionAttentionSum
custom_objects["Overlap"] = Overlap
custom_objects["VectorMatrixSplit"] = VectorMatrixSplit
return custom_objects
| deep_qa-master | deep_qa/models/reading_comprehension/gated_attention_reader.py |
from .attention_sum_reader import AttentionSumReader
from .bidirectional_attention import BidirectionalAttentionFlow
from .gated_attention_reader import GatedAttentionReader
concrete_models = { # pylint: disable=invalid-name
'AttentionSumReader': AttentionSumReader,
'BidirectionalAttentionFlow': BidirectionalAttentionFlow,
'GatedAttentionReader': GatedAttentionReader,
}
| deep_qa-master | deep_qa/models/reading_comprehension/__init__.py |
from typing import Dict
from overrides import overrides
from keras.layers import Input
from ...data.instances.reading_comprehension import McQuestionPassageInstance
from ...layers import L1Normalize
from ...layers import OptionAttentionSum
from ...layers.attention import Attention
from ...training import TextTrainer
from ...common.params import Params
from ...training.models import DeepQaModel
class AttentionSumReader(TextTrainer):
"""
This TextTrainer implements the Attention Sum Reader model described by
Kadlec et. al 2016. It takes a question and document as input, encodes the
document and question words with two separate Bidirectional GRUs, and then
takes the dot product of the question embedding with the document embedding
of each word in the document. This creates an attention over words in the
document, and it then selects the option with the highest summed or mean
weight as the answer.
"""
def __init__(self, params: Params):
self.max_question_length = params.pop('max_question_length', None)
self.max_passage_length = params.pop('max_passage_length', None)
self.max_option_length = params.pop('max_option_length', None)
self.num_options = params.pop('num_options', None)
# either "mean" or "sum"
self.multiword_option_mode = params.pop('multiword_option_mode', "mean")
super(AttentionSumReader, self).__init__(params)
@overrides
def _build_model(self):
"""
The basic outline here is that we'll pass the questions and the
document / passage (think of this as a collection of possible answer
choices) into a word embedding layer.
Then, we run the word embeddings from the document (a sequence) through
a bidirectional GRU and output a sequence that is the same length as
the input sequence size. For each time step, the output item
("contextual embedding") is the concatenation of the forward and
backward hidden states in the bidirectional GRU encoder at that time
step.
To get the encoded question, we pass the words of the question into
another bidirectional GRU. This time, the output encoding is a vector
containing the concatenation of the last hidden state in the forward
network with the last hidden state of the backward network.
We then take the dot product of the question embedding with each of the
contextual embeddings for the words in the documents. We sum up all the
occurences of a word ("total attention"), and pick the word with the
highest total attention in the document as the answer.
"""
# First we create input layers and pass the inputs through embedding layers.
# shape: (batch size, question_length)
question_input = Input(shape=self._get_sentence_shape(self.max_question_length),
dtype='int32', name="question_input")
# shape: (batch size, document_length)
document_input = Input(shape=self._get_sentence_shape(self.max_passage_length),
dtype='int32',
name="document_input")
# shape: (batch size, num_options, options_length)
options_input = Input(shape=(self.num_options,) + self._get_sentence_shape(self.max_option_length),
dtype='int32', name="options_input")
# shape: (batch size, question_length, embedding size)
question_embedding = self._embed_input(question_input)
# shape: (batch size, document_length, embedding size)
document_embedding = self._embed_input(document_input)
# We encode the question embeddings with some encoder.
question_encoder = self._get_encoder()
# shape: (batch size, 2*embedding size)
encoded_question = question_encoder(question_embedding)
# We encode the document with a seq2seq encoder. Note that this is not the same encoder as
# used for the question.
# TODO(nelson): Enable using the same encoder for both document and question. (This would
# be hard in our current code; you would need a method to transform an encoder into a
# seq2seq encoder.)
document_encoder = self._get_seq2seq_encoder()
# shape: (batch size, document_length, 2*embedding size)
encoded_document = document_encoder(document_embedding)
# Here we take the dot product of `encoded_question` and each word
# vector in `encoded_document`.
# shape: (batch size, max document length in words)
document_probabilities = Attention(name='question_document_softmax')([encoded_question,
encoded_document])
# We sum together the weights of words that match each option.
options_sum_layer = OptionAttentionSum(self.multiword_option_mode,
name="options_probability_sum")
# shape: (batch size, num_options)
options_probabilities = options_sum_layer([document_input,
document_probabilities, options_input])
# We normalize the option_probabilities by dividing each
# element by L1 norm (sum) of the whole tensor.
l1_norm_layer = L1Normalize()
# shape: (batch size, num_options)
option_normalized_probabilities = l1_norm_layer(options_probabilities)
return DeepQaModel(input=[question_input, document_input, options_input],
output=option_normalized_probabilities)
@overrides
def _instance_type(self):
"""
Return the instance type that the model trains on.
"""
return McQuestionPassageInstance
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
Return a dictionary with the appropriate padding lengths.
"""
padding_lengths = super(AttentionSumReader, self).get_padding_lengths()
padding_lengths['num_question_words'] = self.max_question_length
padding_lengths['num_passage_words'] = self.max_passage_length
padding_lengths['num_option_words'] = self.max_option_length
padding_lengths['num_options'] = self.num_options
return padding_lengths
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
"""
Set the padding lengths of the model.
"""
# TODO(nelson): superclass complains that there is no
# num_sentence_words key, so we set it to None here.
# We should probably patch up / organize the API.
padding_lengths["num_sentence_words"] = None
super(AttentionSumReader, self)._set_padding_lengths(padding_lengths)
if self.max_question_length is None:
self.max_question_length = padding_lengths['num_question_words']
if self.max_passage_length is None:
self.max_passage_length = padding_lengths['num_passage_words']
if self.max_option_length is None:
self.max_option_length = padding_lengths['num_option_words']
if self.num_options is None:
self.num_options = padding_lengths['num_options']
@overrides
def _set_padding_lengths_from_model(self):
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1][1:])
self.max_question_length = self.model.get_input_shape_at(0)[0][1]
self.max_passage_length = self.model.get_input_shape_at(0)[1][1]
self.num_options = self.model.get_input_shape_at(0)[2][1]
self.max_option_length = self.model.get_input_shape_at(0)[2][2]
@classmethod
def _get_custom_objects(cls):
custom_objects = super(AttentionSumReader, cls)._get_custom_objects()
custom_objects["Attention"] = Attention
custom_objects["L1Normalize"] = L1Normalize
custom_objects["OptionAttentionSum"] = OptionAttentionSum
return custom_objects
| deep_qa-master | deep_qa/models/reading_comprehension/attention_sum_reader.py |
from overrides import overrides
from keras.layers import Dense, Dropout, Input
from ...data.instances.text_classification.text_classification_instance import TextClassificationInstance
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
from ...common.params import Params
class ClassificationModel(TextTrainer):
"""
A TextTrainer that simply takes word sequences as input (could be either sentences or logical
forms), encodes the sequence using a sentence encoder, then uses a few dense layers to decide
on some classification label for the text sequence (currently hard-coded for a binary
classification decision, but that's easy to fix if we need to).
We don't really expect this model to work for question answering - it's just a sentence
classification model. The best it can do is basically to learn word cooccurrence information,
similar to how the Salience solver works, and I'm not at all confident that this does that job
better than Salience. We've implemented this mostly as a simple baseline.
Note that this also can't actually answer questions at this point. You have to do some
post-processing to get from true/false decisions to question answers, and I removed that from
TextTrainer to make the code simpler.
"""
def __init__(self, params: Params):
super(ClassificationModel, self).__init__(params)
@overrides
def _build_model(self):
'''
train_input: numpy array: int32 (samples, num_words). Left padded arrays of word indices
from sentences in training data
'''
# Step 1: Convert the sentence input into sequences of word vectors.
sentence_input = Input(shape=self._get_sentence_shape(), dtype='int32', name="sentence_input")
word_embeddings = self._embed_input(sentence_input)
# Step 2: Pass the sequences of word vectors through the sentence encoder to get a sentence
# vector..
sentence_encoder = self._get_encoder()
sentence_encoding = sentence_encoder(word_embeddings)
# Add a dropout after LSTM.
regularized_sentence_encoding = Dropout(0.2)(sentence_encoding)
# Step 3: Find p(true | proposition) by passing the outputs from LSTM through an MLP with
# ReLU layers.
projection_layer = Dense(int(self.embedding_layers['words'][0].output_dim / 2),
activation='relu',
name='projector')
softmax_layer = Dense(2, activation='softmax', name='softmax')
output_probabilities = softmax_layer(projection_layer(regularized_sentence_encoding))
# Step 4: Define crossentropy against labels as the loss.
return DeepQaModel(inputs=sentence_input, outputs=output_probabilities)
def _instance_type(self):
return TextClassificationInstance
@overrides
def _set_padding_lengths_from_model(self):
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1:])
| deep_qa-master | deep_qa/models/text_classification/classification_model.py |
from .classification_model import ClassificationModel
concrete_models = { # pylint: disable=invalid-name
'ClassificationModel': ClassificationModel,
}
| deep_qa-master | deep_qa/models/text_classification/__init__.py |
deep_qa-master | deep_qa/testing/__init__.py |
|
# pylint: disable=invalid-name,protected-access
from copy import deepcopy
from unittest import TestCase
import codecs
import gzip
import logging
import os
import shutil
from keras import backend as K
import numpy
from numpy.testing import assert_allclose
from deep_qa.common.checks import log_keras_version_info
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.common.params import Params
class DeepQaTestCase(TestCase): # pylint: disable=too-many-public-methods
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
TRAIN_BACKGROUND = TEST_DIR + 'train_background'
VALIDATION_BACKGROUND = TEST_DIR + 'validation_background'
SNLI_FILE = TEST_DIR + 'snli_file'
PRETRAINED_VECTORS_FILE = TEST_DIR + 'pretrained_glove_vectors_file'
PRETRAINED_VECTORS_GZIP = TEST_DIR + 'pretrained_glove_vectors_file.gz'
def setUp(self):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.DEBUG)
log_keras_version_info()
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.TEST_DIR)
TextInstance.tokenizer = tokenizers["words"](Params({}))
K.clear_session()
def get_model_params(self, additional_arguments=None):
params = Params({})
params['save_models'] = False
params['model_serialization_prefix'] = self.TEST_DIR
params['train_files'] = [self.TRAIN_FILE]
params['validation_files'] = [self.VALIDATION_FILE]
params['embeddings'] = {'words': {'dimension': 6}, 'characters': {'dimension': 2}}
params['encoder'] = {"default": {'type': 'bow'}}
params['num_epochs'] = 1
params['validation_split'] = 0.0
if additional_arguments:
for key, value in additional_arguments.items():
params[key] = deepcopy(value)
return params
def get_model(self, model_class, additional_arguments=None):
params = self.get_model_params(additional_arguments)
return model_class(params)
def ensure_model_trains_and_loads(self, model_class, args: Params):
args['save_models'] = True
# Our loading tests work better if you're not using data generators. Unless you
# specifically request it in your test, we'll avoid using them here, and if you _do_ use
# them, we'll skip some of the stuff below that isn't compatible.
args.setdefault('data_generator', None)
model = self.get_model(model_class, args)
model.train()
# load the model that we serialized
loaded_model = self.get_model(model_class, args)
loaded_model.load_model()
# verify that original model and the loaded model predict the same outputs
if model._uses_data_generators():
# We shuffle the data in the data generator. Instead of making that logic more
# complicated, we'll just pass on the loading tests here. See comment above.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(model.validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
# We should get the same result if we index the data from the original model and the loaded
# model.
_, indexed_validation_arrays = loaded_model.load_data_arrays(model.validation_files)
if model._uses_data_generators():
# As above, we'll just pass on this.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(indexed_validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
return model, loaded_model
@staticmethod
def one_hot(index, length):
vector = numpy.zeros(length)
vector[index] = 1
return vector
def write_snli_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ttext 1\thypothesis1\tentails\n')
train_file.write('2\ttext 2\thypothesis2\tcontradicts\n')
train_file.write('3\ttext3\thypothesis3\tentails\n')
train_file.write('4\ttext 4\thypothesis4\tneutral\n')
train_file.write('5\ttext5\thypothesis 5\tentails\n')
train_file.write('6\ttext6\thypothesis6\tcontradicts\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\ttext 1 with extra words\thypothesis1\tentails\n')
validation_file.write('2\ttext 2\tlonger hypothesis 2\tcontradicts\n')
validation_file.write('3\ttext3\thypothesis withreallylongfakeword\tentails\n')
def write_sequence_tagging_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('cats###N\tare###V\tanimals###N\t.###N\n')
train_file.write('dogs###N\tare###V\tanimals###N\t.###N\n')
train_file.write('snakes###N\tare###V\tanimals###N\t.###N\n')
train_file.write('birds###N\tare###V\tanimals###N\t.###N\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('horses###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('blue###N\tcows###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('monkeys###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('caterpillars###N\tare###V\tanimals###N\t.###N\n')
def write_verb_semantics_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
train_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
train_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
validation_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
validation_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
def write_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq1a1\t0\n')
validation_file.write('2\tq1a2\t1\n')
validation_file.write('3\tq1a3\t0\n')
validation_file.write('4\tq1a4\t0\n')
validation_file.write('5\tq2a1\t0\n')
validation_file.write('6\tq2a2\t0\n')
validation_file.write('7\tq2a3\t1\n')
validation_file.write('8\tq2a4\t0\n')
validation_file.write('9\tq3a1\t0\n')
validation_file.write('10\tq3a2\t0\n')
validation_file.write('11\tq3a3\t0\n')
validation_file.write('12\tq3a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence1\t0\n')
train_file.write('2\tsentence2 word2 word3\t1\n')
train_file.write('3\tsentence3 word2\t0\n')
train_file.write('4\tsentence4\t1\n')
train_file.write('5\tsentence5\t0\n')
train_file.write('6\tsentence6\t0\n')
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as test_file:
test_file.write('1\ttestsentence1\t0\n')
test_file.write('2\ttestsentence2 word2 word3\t1\n')
test_file.write('3\ttestsentence3 word2\t0\n')
test_file.write('4\ttestsentence4\t1\n')
test_file.write('5\ttestsentence5 word4\t0\n')
test_file.write('6\ttestsentence6\t0\n')
def write_additional_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq4a1\t0\n')
validation_file.write('2\tq4a2\t1\n')
validation_file.write('3\tq4a3\t0\n')
validation_file.write('4\tq4a4\t0\n')
validation_file.write('5\tq5a1\t0\n')
validation_file.write('6\tq5a2\t0\n')
validation_file.write('7\tq5a3\t1\n')
validation_file.write('8\tq5a4\t0\n')
validation_file.write('9\tq6a1\t0\n')
validation_file.write('10\tq6a2\t0\n')
validation_file.write('11\tq6a3\t0\n')
validation_file.write('12\tq6a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence7\t0\n')
train_file.write('2\tsentence8 word4 word5\t1\n')
train_file.write('3\tsentence9 word4\t0\n')
train_file.write('4\tsentence10\t1\n')
train_file.write('5\tsentence11 word3 word2\t0\n')
train_file.write('6\tsentence12\t0\n')
def write_question_answer_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion1\tanswer1###answer2\t0\n')
with codecs.open(self.VALIDATION_BACKGROUND, 'w', 'utf-8') as validation_background:
validation_background.write('1\tvb1\tvb2\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ta b e i d\tanswer 1###answer2\t0\n')
train_file.write('2\ta b c d\tanswer3###answer4\t1\n')
train_file.write('3\te d w f d s a b\tanswer5###answer6###answer9\t2\n')
train_file.write('4\te fj k w q\tanswer7###answer8\t0\n')
with codecs.open(self.TRAIN_BACKGROUND, 'w', 'utf-8') as train_background:
train_background.write('1\tsb1\tsb2\n')
train_background.write('2\tsb3\n')
train_background.write('3\tsb4\n')
train_background.write('4\tsb5\tsb6\n')
def write_who_did_what_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tHe went to the store to buy goods, because he wanted to.'
'\tHe bought xxxxx\tgoods###store\t0\n')
validation_file.write('1\tShe hiking on the weekend with her friend.'
'\tShe went xxxxx\thiking###friend###weekend###her friend\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tFred hit the ball with the bat.\tHe hit the ball with the xxxxx\tbat###ball\t0\n')
train_file.write('1\tShe walked the dog today.\tThe xxxxx was walked today.\tShe###dog###today\t1\n')
train_file.write('1\tHe kept typing at his desk.\tHe typed at his xxxxx\tdesk###kept\t0\n')
train_file.write('1\tThe pup at the bone but not the biscuit.\tThe pup ate the xxxxx\t'
'bone###biscuit\t0\n')
def write_tuple_inference_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tss<>v f d<>oo o<>c$$$s<>v ff<>o i###ss r<>v<>o e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tss<>v<>oo o<>c$$$s e<>ff<>o ii i###ss r<>rr<>o e<>o ee\t'
'ss<>ve gg<>o sd<>ccs\t0\n')
train_file.write('2\tsg g<>vg<>oo o<>c$$$s e<>v ff<>o ii i###ss<>v rr<>o e<>o ee'
'###hh kk<>hdj d<>hh\tss ss<>ve gg<>o sd<>ccs\t2\n')
train_file.write('3\ts r<>v f d<>o ss<>c$$$s e<>v ff<>o ss i$$$r<>v ss<>s o e<>o ee\t'
'ss ss<>v g<>o sd<>ccs\t0\n')
train_file.write('4\tty y<>cf fv ss<>s ss<>c$$$rt e<>vv f<>oss i i###ss<>v<>os e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t1\n')
def write_span_prediction_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion 1 with extra words\t'
'passage with answer and a reallylongword\t13,18\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tquestion 1\tpassage1 with answer1\t14,20\n')
train_file.write('2\tquestion 2\tpassage2 with answer2\t0,8\n')
train_file.write('3\tquestion 3\tpassage3 with answer3\t9,13\n')
train_file.write('4\tquestion 4\tpassage4 with answer4\t14,20\n')
def write_sentence_selection_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tWhere is Paris?\tParis is the capital of France.###It '
'is by the Seine.###It is quite old###this is a '
'very long sentence meant to test that loading '
'and padding works properly in the model.\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tWho won Super Bowl 50?\tSuper Bowl 50 was in Santa '
'Clara.###The Patriots beat the Broncos.\t1\n')
train_file.write('2\tWhen is Thanksgiving?\tFolk tales tell '
'of the Pilgrims celebrating the holiday.###Many '
'people eat a lot.###It is in November.\t2\n')
train_file.write('3\tWhen were computers invented?\tThe ancient Chinese used '
'abacuses.###Alan Turing cracked Enigma.###It is hard to '
'pinpoint an inventor of the computer.\t2\n')
def write_pretrained_vector_files(self):
# write the file
with codecs.open(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8') as vector_file:
vector_file.write('word2 0.21 0.57 0.51 0.31\n')
vector_file.write('sentence1 0.81 0.48 0.19 0.47\n')
# compress the file
with open(self.PRETRAINED_VECTORS_FILE, 'rb') as f_in:
with gzip.open(self.PRETRAINED_VECTORS_GZIP, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def write_sentence_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write("This is a sentence for language modelling.\n")
train_file.write("Here's another one for language modelling.\n")
def write_original_snli_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# pylint: disable=line-too-long
train_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
# pylint: disable=line-too-long
validation_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
| deep_qa-master | deep_qa/testing/test_case.py |
import io
import os
class TeeLogger:
"""
This class is an attempt to maintain logs of both stdout and stderr for when models are run.
To use this class, at the beginning of your script insert these lines::
sys.stdout = TeeLogger("stdout.log", sys.stdout)
sys.stderr = TeeLogger("stdout.log", sys.stderr)
"""
def __init__(self, filename: str, terminal: io.TextIOWrapper):
self.terminal = terminal
parent_directory = os.path.dirname(filename)
os.makedirs(parent_directory, exist_ok=True)
self.log = open(filename, 'a')
def write(self, message):
self.terminal.write(message)
# We'll special case a particular thing that keras does, to make the log file more
# readable. Keras uses ^H characters to get the training line to update for each batch
# without adding more lines to the terminal output. Displaying those in a file won't work
# correctly, so we'll just make sure that each batch shows up on its own line.
if '\x08' in message:
message = message.replace('\x08', '')
if len(message) == 0 or message[-1] != '\n':
message += '\n'
self.log.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
| deep_qa-master | deep_qa/common/tee_logger.py |
from typing import Any, Dict, List
from collections import MutableMapping
import logging
import pyhocon
from overrides import overrides
from .checks import ConfigurationError
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PARAMETER = 60
logging.addLevelName(PARAMETER, "PARAM")
def __param(self, message, *args, **kws):
"""
Add a method to logger which allows us to always log parameters unless you set the logging
level to be higher than 60 (which is higher than the standard highest level of 50,
corresponding to WARNING).
"""
# Logger takes its '*args' as 'args'.
if self.isEnabledFor(PARAMETER):
self._log(PARAMETER, message, args, **kws) # pylint: disable=protected-access
logging.Logger.param = __param
class Params(MutableMapping):
"""
Represents a parameter dictionary with a history, and contains other functionality around
parameter passing and validation for DeepQA.
There are currently two benefits of a ``Params`` object over a plain dictionary for parameter
passing:
#. We handle a few kinds of parameter validation, including making sure that parameters
representing discrete choices actually have acceptable values, and making sure no extra
parameters are passed.
#. We log all parameter reads, including default values. This gives a more complete
specification of the actual parameters used than is given in a JSON / HOCON file, because
those may not specify what default values were used, whereas this will log them.
The convention for using a ``Params`` object in DeepQA is that you will consume the parameters
as you read them, so that there are none left when you've read everything you expect. This
lets us easily validate that you didn't pass in any `extra` parameters, just by making sure
that the parameter dictionary is empty. You should do this when you're done handling
parameters, by calling :func:`Params.assert_empty`.
"""
# This allows us to check for the presence of "None" as a default argument,
# which we require because we make a distinction bewteen passing a value of "None"
# and passing no value to the default parameter of "pop".
DEFAULT = object()
def __init__(self, params: Dict[str, Any], history: str=""):
self.params = params
self.history = history
@overrides
def pop(self, key: str, default: Any=DEFAULT):
"""
Performs the functionality associated with dict.pop(key), along with checking for
returned dictionaries, replacing them with Param objects with an updated history.
If ``key`` is not present in the dictionary, and no default was specified, we raise a
``ConfigurationError``, instead of the typical ``KeyError``.
"""
if default is self.DEFAULT:
try:
value = self.params.pop(key)
except KeyError:
raise ConfigurationError("key \"{}\" is required at location \"{}\"".format(key, self.history))
else:
value = self.params.pop(key, default)
logger.param(self.history + key + " = " + str(value))
return self.__check_is_dict(key, value)
@overrides
def get(self, key: str, default: Any=DEFAULT):
"""
Performs the functionality associated with dict.get(key) but also checks for returned
dicts and returns a Params object in their place with an updated history.
"""
if default is self.DEFAULT:
try:
value = self.params.get(key)
except KeyError:
raise ConfigurationError("key \"{}\" is required at location \"{}\"".format(key, self.history))
else:
value = self.params.get(key, default)
return self.__check_is_dict(key, value)
def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool=False):
"""
Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of
the given choices. Note that this `pops` the key from params, modifying the dictionary,
consistent with how parameters are processed in this codebase.
Parameters
----------
key: str
Key to get the value from in the param dictionary
choices: List[Any]
A list of valid options for values corresponding to ``key``. For example, if you're
specifying the type of encoder to use for some part of your model, the choices might be
the list of encoder classes we know about and can instantiate. If the value we find in
the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because
the user specified an invalid value in their parameter file.
default_to_first_choice: bool, optional (default=False)
If this is ``True``, we allow the ``key`` to not be present in the parameter
dictionary. If the key is not present, we will use the return as the value the first
choice in the ``choices`` list. If this is ``False``, we raise a
``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to
specify your model class when running an experiment, but you can feel free to use
default settings for encoders if you want).
"""
default = choices[0] if default_to_first_choice else self.DEFAULT
value = self.pop(key, default)
if value not in choices:
message = '%s not in acceptable choices for %s: %s' % (value, self.history, str(choices))
raise ConfigurationError(message)
return value
def as_dict(self, quiet=False):
"""
Sometimes we need to just represent the parameters as a dict, for instance when we pass
them to a Keras layer(so that they can be serialised).
Parameters
----------
quiet: bool, optional (default = False)
Whether to log the parameters before returning them as a dict.
"""
if quiet:
return self.params
def log_recursively(parameters, history):
for key, value in parameters.items():
if isinstance(value, dict):
new_local_history = history + key + "."
log_recursively(value, new_local_history)
else:
logger.param(history + key + " = " + str(value))
logger.info("Converting Params object to dict; logging of default "
"values will not occur when dictionary parameters are "
"used subsequently.")
logger.info("CURRENTLY DEFINED PARAMETERS: ")
log_recursively(self.params, self.history)
return self.params
def assert_empty(self, class_name: str):
"""
Raises a ``ConfigurationError`` if ``self.params`` is not empty. We take ``class_name`` as
an argument so that the error message gives some idea of where an error happened, if there
was one. ``class_name`` should be the name of the `calling` class, the one that got extra
parameters (if there are any).
"""
if len(self.params) != 0:
raise ConfigurationError("Extra parameters passed to {}: {}".format(class_name, self.params))
def __getitem__(self, key):
if key in self.params:
return self.__check_is_dict(key, self.params[key])
else:
raise KeyError
def __setitem__(self, key, value):
self.params[key] = value
def __delitem__(self, key):
del self.params[key]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def __check_is_dict(self, new_history, value):
if isinstance(value, dict):
new_history = self.history + new_history + "."
return Params(value, new_history)
else:
return value
def pop_choice(params: Dict[str, Any],
key: str,
choices: List[Any],
default_to_first_choice: bool=False,
history: str="?.") -> Any:
"""
Performs the same function as :func:`Params.pop_choice`, but is required in order to deal with
places that the Params object is not welcome, such as inside Keras layers. See the docstring
of that method for more detail on how this function works.
This method adds a ``history`` parameter, in the off-chance that you know it, so that we can
reproduce :func:`Params.pop_choice` exactly. We default to using "?." if you don't know the
history, so you'll have to fix that in the log if you want to actually recover the logged
parameters.
"""
value = Params(params, history).pop_choice(key, choices, default_to_first_choice)
return value
def replace_none(dictionary: Dict[str, Any]) -> Dict[str, Any]:
for key in dictionary.keys():
if dictionary[key] == "None":
dictionary[key] = None
elif isinstance(dictionary[key], pyhocon.config_tree.ConfigTree):
dictionary[key] = replace_none(dictionary[key])
return dictionary
| deep_qa-master | deep_qa/common/params.py |
from typing import List
from keras.models import Model
from ..training.models import DeepQaModel
def get_submodel(model: Model,
input_layer_names: List[str],
output_layer_names: List[str],
train_model: bool=False,
name=None):
"""
Returns a new model constructed from ``model``. This model will be a subset of the given
``Model``, with the inputs specified by ``input_layer_names`` and the outputs specified by
``output_layer_names``. For example, if the input model is :class:`BiDAF
.models.reading_comprehens.bidirectional_attention.BidirectionalAttentionFlow`, you can use
this to get a model that outputs the passage embedding, just before the span prediction
layers, by calling
``get_submodel(bidaf.model, ['question_input', 'passage_input'], ['final_merged_passage'])``.
"""
layer_input_dict = {}
layer_output_dict = {}
for layer in model.layers:
layer_input_dict[layer.name] = layer.get_input_at(0)
layer_output_dict[layer.name] = layer.get_output_at(0)
input_layers = [layer_input_dict[name] for name in input_layer_names]
output_layers = [layer_output_dict[name] for name in output_layer_names]
submodel = DeepQaModel(inputs=input_layers, outputs=output_layers, name=name)
if not train_model:
submodel.trainable = False
return submodel
| deep_qa-master | deep_qa/common/models.py |
from itertools import zip_longest
from typing import Any, Dict, List
import random
def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]:
"""
Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the
list at the end if the list is not divisable by ``count``.
For example:
>>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0)
[[1, 2, 3], [4, 5, 6], [7, 0, 0]]
This is a short method, but it's complicated and hard to remember as a one-liner, so we just
make a function out of it.
"""
return [list(l) for l in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)]
def add_noise_to_dict_values(dictionary: Dict[Any, float], noise_param: float) -> Dict[Any, float]:
"""
Returns a new dictionary with noise added to every key in ``dictionary``. The noise is
uniformly distributed within ``noise_param`` percent of the value for every value in the
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
noise_value = value * noise_param
noise = random.uniform(-noise_value, noise_value)
new_dict[key] = value + noise
return new_dict
def clean_layer_name(input_name: str,
strip_right_of_last_backslash: bool=True,
strip_numerics_after_underscores: bool=True):
"""
There exist cases when layer names need to be concatenated in order to create new, unique
layer names. However, the indices added to layer names designating the ith output of calling
the layer cannot occur within a layer name apart from at the end, so this utility function
removes these.
Parameters
----------
input_name: str, required
A Keras layer name.
strip_right_of_last_backslash: bool, optional, (default = True)
Should we strip anything past the last backslash in the name?
This can be useful for controlling scopes.
strip_numerics_after_underscores: bool, optional, (default = True)
If there are numerical values after an underscore at the end of the layer name,
this flag specifies whether or not to remove them.
"""
# Always strip anything after :, as these will be numerical
# counts of the number of times the layer has been called,
# which cannot be included in a layer name.
if ':' in input_name:
input_name = input_name.split(':')[0]
if '/' in input_name and strip_right_of_last_backslash:
input_name = input_name.rsplit('/', 1)[0]
if input_name.split('_')[-1].isdigit() and strip_numerics_after_underscores:
input_name = '_'.join(input_name.split('_')[:-1])
return input_name
| deep_qa-master | deep_qa/common/util.py |
import logging
import os
REQUIRED_PYTHONHASHSEED = '2157'
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ConfigurationError(Exception):
def __init__(self, message):
super(ConfigurationError, self).__init__()
self.message = message
def __str__(self):
return repr(self.message)
def ensure_pythonhashseed_set():
message = """You must set PYTHONHASHSEED to %s so we get repeatable results and tests pass.
You can do this with the command `export PYTHONHASHSEED=%s`.
See https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHASHSEED for more info.
"""
assert os.environ.get('PYTHONHASHSEED', None) == REQUIRED_PYTHONHASHSEED, \
message % (REQUIRED_PYTHONHASHSEED, REQUIRED_PYTHONHASHSEED)
def log_keras_version_info():
import keras
logger.info("Keras version: " + keras.__version__)
from keras import backend as K
try:
backend = K.backend()
except AttributeError:
backend = K._BACKEND # pylint: disable=protected-access
if backend == 'theano':
import theano
logger.info("Theano version: " + theano.__version__)
logger.warning("Using Keras' theano backend is not supported! Expect to crash...")
elif backend == 'tensorflow':
import tensorflow
logger.info("Tensorflow version: " + tensorflow.__version__) # pylint: disable=no-member
| deep_qa-master | deep_qa/common/checks.py |
deep_qa-master | deep_qa/common/__init__.py |
|
from .datasets.dataset import Dataset, IndexedDataset, TextDataset
from .data_generator import DataGenerator
from .data_indexer import DataIndexer
from .tokenizers import tokenizers
| deep_qa-master | deep_qa/data/__init__.py |
from collections import defaultdict
import codecs
import logging
import tqdm
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class DataIndexer:
"""
A DataIndexer maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
DataIndexers are fit to a particular dataset, which we use to decide which words are
in-vocabulary.
DataIndexers also allow for several different namespaces, so you can have separate word indices
for 'a' as a word, and 'a' as a character, for instance. Most of the methods on this class
allow you to pass in a namespace; by default we use the 'words' namespace, and you can omit the
namespace argument everywhere and just use the default.
"""
def __init__(self):
# Typically all input words to this code are lower-cased, so we could simply use "PADDING"
# for this. But doing it this way, with special characters, future-proofs the code in case
# it is used later in a setting where not all input is lowercase.
self._padding_token = "@@PADDING@@"
self._oov_token = "@@UNKOWN@@"
self.word_indices = defaultdict(lambda: {self._padding_token: 0, self._oov_token: 1})
self.reverse_word_indices = defaultdict(lambda: {0: self._padding_token, 1: self._oov_token})
self._finalized = False
def set_from_file(self, filename: str, oov_token: str="@@UNKNOWN@@", namespace: str="words"):
self._oov_token = oov_token
self.word_indices[namespace] = {self._padding_token: 0}
self.reverse_word_indices[namespace] = {0: self._padding_token}
with codecs.open(filename, 'r', 'utf-8') as input_file:
for i, line in enumerate(input_file.readlines()):
token = line[:-1] # remove the newline
self.word_indices[namespace][token] = i + 1
self.reverse_word_indices[namespace][i + 1] = token
def finalize(self):
logger.info("Finalizing data indexer")
self._finalized = True
def fit_word_dictionary(self, dataset, min_count: int=1):
"""
Given a ``Dataset``, this method decides which words are given an index, and which ones are
mapped to an OOV token (in this case "UNK"). This method must be called before any dataset
is indexed with this ``DataIndexer``. If you don't first fit the word dictionary, you'll
basically map every token onto "UNK".
We call ``instance.words()`` for each instance in the dataset, and then keep all words that
appear at least ``min_count`` times.
Parameters
----------
dataset: ``TextDataset``
The dataset to index.
min_count: int, optional (default=1)
The minimum number of occurences a word must have in the dataset
in order to be assigned an index.
"""
logger.info("Fitting word dictionary with min count of %d, finalized is %s",
min_count, self._finalized)
if self._finalized:
logger.warning("Trying to fit a finalized DataIndexer. This is a no-op. Did you "
"really want to do this?")
return
namespace_word_counts = defaultdict(lambda: defaultdict(int))
for instance in tqdm.tqdm(dataset.instances):
namespace_dict = instance.words()
for namespace in namespace_dict:
for word in namespace_dict[namespace]:
namespace_word_counts[namespace][word] += 1
for namespace in tqdm.tqdm(namespace_word_counts):
for word, count in namespace_word_counts[namespace].items():
if count >= min_count:
self.add_word_to_index(word, namespace)
def add_word_to_index(self, word: str, namespace: str='words') -> int:
"""
Adds `word` to the index, if it is not already present. Either way, we return the index of
the word.
"""
if self._finalized:
logger.warning("Trying to add a word to a finalized DataIndexer. This is a no-op. "
"Did you really want to do this?")
return self.word_indices[namespace].get(word, -1)
if word not in self.word_indices[namespace]:
index = len(self.word_indices[namespace])
self.word_indices[namespace][word] = index
self.reverse_word_indices[namespace][index] = word
return index
else:
return self.word_indices[namespace][word]
def words_in_index(self, namespace: str='words'):
return self.word_indices[namespace].keys()
def get_word_index(self, word: str, namespace: str='words'):
if word in self.word_indices[namespace]:
return self.word_indices[namespace][word]
else:
return self.word_indices[namespace][self._oov_token]
def get_word_from_index(self, index: int, namespace: str='words'):
return self.reverse_word_indices[namespace][index]
def get_vocab_size(self, namespace: str='words'):
return len(self.word_indices[namespace])
| deep_qa-master | deep_qa/data/data_indexer.py |
import codecs
import gzip
import logging
import numpy
from keras.layers import Embedding
from .data_indexer import DataIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class PretrainedEmbeddings:
@staticmethod
def initialize_random_matrix(shape, seed=1337):
# TODO(matt): we now already set the random seed, in run_solver.py. This should be
# changed.
numpy_rng = numpy.random.RandomState(seed)
return numpy_rng.uniform(size=shape, low=0.05, high=-0.05)
@staticmethod
def get_embedding_layer(embeddings_filename: str,
data_indexer: DataIndexer,
trainable=False,
log_misses=False,
name="pretrained_embedding"):
"""
Reads a pre-trained embedding file and generates a Keras Embedding layer that has weights
initialized to the pre-trained embeddings. The Embedding layer can either be trainable or
not.
We use the DataIndexer to map from the word strings in the embeddings file to the indices
that we need, and to know which words from the embeddings file we can safely ignore. If we
come across a word in DataIndexer that does not show up with the embeddings file, we give
it a zero vector.
The embeddings file is assumed to be gzipped, formatted as [word] [dim 1] [dim 2] ...
"""
words_to_keep = set(data_indexer.words_in_index())
vocab_size = data_indexer.get_vocab_size()
embeddings = {}
embedding_dim = None
# TODO(matt): make this a parameter
embedding_misses_filename = 'embedding_misses.txt'
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading embeddings from file")
with gzip.open(embeddings_filename, 'rb') as embeddings_file:
for line in embeddings_file:
fields = line.decode('utf-8').strip().split(' ')
if embedding_dim is None:
embedding_dim = len(fields) - 1
assert embedding_dim > 1, "Found embedding size of 1; do you have a header?"
else:
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
continue
word = fields[0]
if word in words_to_keep:
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[word] = vector
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
if log_misses:
logger.info("Logging embedding misses to %s", embedding_misses_filename)
embedding_misses_file = codecs.open(embedding_misses_filename, 'w', 'utf-8')
embedding_matrix = PretrainedEmbeddings.initialize_random_matrix((vocab_size, embedding_dim))
# The 2 here is because we know too much about the DataIndexer. Index 0 is the padding
# index, and the vector for that dimension is going to be 0. Index 1 is the OOV token, and
# we can't really set a vector for the OOV token.
for i in range(2, vocab_size):
word = data_indexer.get_word_from_index(i)
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if word in embeddings:
embedding_matrix[i] = embeddings[word]
elif log_misses:
print(word, file=embedding_misses_file)
if log_misses:
embedding_misses_file.close()
# The weight matrix is initialized, so we construct and return the actual Embedding layer.
return Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
mask_zero=True,
weights=[embedding_matrix],
trainable=trainable,
name=name)
| deep_qa-master | deep_qa/data/embeddings.py |
from typing import List
import logging
import random
from copy import deepcopy
from ..common.params import Params
from ..common.util import group_by_count
from . import IndexedDataset
from .instances import IndexedInstance
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class DataGenerator:
"""
A ``DataGenerator`` takes an :class:`~.dataset.IndexedDataset` and converts it into a
generator, yielding batches suitable for training. You might want to do this instead of just
creating one large set of numpy arrays for a few reasons:
#. Creating large arrays for your whole data could take a whole lot of memory, maybe more than
is available on your machine.
#. Creating one large array means padding all of your instances to the same length. This
typically means you waste a whole lot of computation on padding tokens. Using a
``DataGenerator`` instead allows you to only pad each `batch` to the same length, instead of
all of your instances across your whole dataset. We've typically seen a 4-5x speed up just
from doing this (partially because Keras is pretty bad at doing variable-length computation;
the speed-up isn't quite as large with plain tensorflow, I think).
#. If we're varying the padding lengths in each batch, we can also vary the batch size, to
optimize GPU memory usage. This means we'll use smaller batch sizes for big instances, and
larger batch sizes for small instances. We've seen speedups up to 10-12x (on top of the
4-5x speed up above) from doing this.
Parameters
----------
text_trainer: TextTrainer
We need access to the ``TextTrainer`` object so we can call some methods on it, such as
:func:`~deep_qa.training.TextTrainer.get_instance_sorting_keys`.
dynamic_padding: bool, optional (default=False)
If ``True``, we will set padding lengths based on the data `per batch`, instead of on the
whole dataset. This only works if your model is structured to allow variable-length
sequences (typically using ``None`` for specific dimensions when you build your model), and
if you don't set padding values in
:func:`~deep_qa.training.TextTrainer._set_padding_lengths`. This flag specifically is read
in :func:`~deep_qa.training.TextTrainer._set_padding_lengths` to know if we should set
certain padding values or not. It's handled correctly for ``num_sentence_words`` and
``num_word_characters`` in :class:`~deep_qa.training.TextTrainer`, but you need to be sure
to implement it correctly in subclasses for this to work.
padding_noise: double, optional (default=.1)
When sorting by padding length, we add a bit of noise to the lengths, so that the sorting
isn't deterministic. This parameter determines how much noise we add, as a percentage of
the actual padding value for each instance.
sort_every_epoch: bool, optional (default=True)
If ``True``, we will re-sort the data after every epoch, then re-group the instances into
batches. If ``padding_noise`` is zero, this does nothing, but if it's non-zero, this will
give you a slightly different ordering, so you don't have exactly the same batches at every
epoch. If you're doing adaptive batch sizes, this will lead to re-computing the adaptive
batches each epoch, which could give a different number of batches for the whole dataset,
which means each "epoch" might no longer correspond to `exactly` one pass over the data.
This is probably a pretty minor issue, though.
adaptive_batch_sizes: bool, optional (default=False)
Only relevant if ``dynamic_padding`` is ``True``. If ``adaptive_batch_sizes`` is ``True``,
we will vary the batch size to try to optimize GPU memory usage. Because padding lengths
are done dynamically, we can have larger batches when padding lengths are smaller,
maximizing our usage of the GPU. In order for this to work, you need to do two things: (1)
override :func:`~TextTrainer._get_padding_memory_scaling` to give a big-O bound on memory
usage given padding lengths, and (2) tune the `adaptive_memory_usage_constant` parameter
for your particular model and GPU. See the documentation for
:func:`~TextTrainer._get_padding_memory_scaling` for more information.
adaptive_memory_usage_constant: int, optional (default=None)
Only relevant if ``adaptive_batch_sizes`` is ``True``. This is a manually-tuned parameter,
specific to a particular model architecture and amount of GPU memory (e.g., if you change
the number of hidden layers in your model, this number will need to change). See
:func:`~TextTrainer._get_padding_memory_scaling` for more detail. The recommended way to
tune this parameter is to (1) use a fixed batch size, with ``biggest_batch_first`` set to
``True``, and find out the maximum batch size you can handle on your biggest instances
without running out of memory. Then (2) turn on ``adaptive_batch_sizes``, and set this
parameter so that you get the right batch size for your biggest instances. If you set the
log level to ``DEBUG`` in ``scripts/run_model.py``, you can see the batch sizes that are
computed.
maximum_batch_size: int, optional (default=1000000)
If we're using adaptive batch sizes, you can use this to be sure you do not create batches
larger than this, even if you have enough memory to handle it on your GPU. You might
choose to do this to keep smaller batches because you like the noisier gradient estimates
that come from smaller batches, for instance.
biggest_batch_first: bool, optional (default=False)
This is largely for testing, to see how large of a batch you can safely use with your GPU.
It's only meaningful if you're using dynamic padding - this will let you try out the
largest batch that you have in the data `first`, so that if you're going to run out of
memory, you know it early, instead of waiting through the whole batch to find out at the
end that you're going to crash.
"""
def __init__(self, text_trainer, params: Params):
self.text_trainer = text_trainer
self.dynamic_padding = params.pop('dynamic_padding', False)
self.padding_noise = params.pop('padding_noise', 0.2)
self.sort_every_epoch = params.pop('sort_every_epoch', True)
self.adaptive_batch_sizes = params.pop('adaptive_batch_sizes', False)
self.adaptive_memory_usage_constant = params.pop('adaptive_memory_usage_constant', False)
self.maximum_batch_size = params.pop('maximum_batch_size', 1000000)
self.biggest_batch_first = params.pop('biggest_batch_first', False)
#: This field can be read after calling ``create_generator`` to get the number of steps you
#: should take per epoch in ``model.fit_generator`` or ``model.evaluate_generator`` for
#: this data.
self.last_num_batches = None
def create_generator(self, dataset: IndexedDataset, batch_size: int=None):
"""
Main external API call: converts an ``IndexedDataset`` into a data generator suitable for
use with Keras' ``fit_generator`` and related methods.
"""
if batch_size is None:
batch_size = self.text_trainer.batch_size
grouped_instances = self.__create_batches(dataset, batch_size)
self.last_num_batches = len(grouped_instances)
def generator():
while True:
if self.sort_every_epoch:
unpadded_dataset = deepcopy(dataset)
groups = self.__create_batches(unpadded_dataset, batch_size)
else:
groups = grouped_instances
for group in groups:
batch = IndexedDataset(group)
batch.pad_instances(self.text_trainer.get_padding_lengths(), verbose=False)
yield batch.as_training_data()
return generator()
def __create_batches(self, dataset: IndexedDataset, batch_size: int) -> List[List[IndexedInstance]]:
if self.dynamic_padding:
dataset.sort_by_padding(self.text_trainer.get_instance_sorting_keys(), self.padding_noise)
instances = dataset.instances
if self.adaptive_batch_sizes:
grouped_instances = self.__adaptive_grouping(instances)
else:
grouped_instances = group_by_count(instances, batch_size, None)
grouped_instances[-1] = [instance for instance in grouped_instances[-1] if instance is not None]
if self.biggest_batch_first:
# We'll actually pop the last _two_ batches, because the last one might not
# be full.
last_batch = grouped_instances.pop()
penultimate_batch = grouped_instances.pop()
random.shuffle(grouped_instances)
grouped_instances.insert(0, penultimate_batch)
grouped_instances.insert(0, last_batch)
else:
random.shuffle(grouped_instances)
return grouped_instances
def __adaptive_grouping(self, instances: List[IndexedInstance]):
batches = []
current_batch = []
current_lengths = {}
logger.debug("Creating adatpive groups")
for instance in instances:
current_batch.append(instance)
instance_lengths = instance.get_padding_lengths()
for key in instance_lengths:
current_lengths[key] = max(instance_lengths[key], current_lengths.get(key, -1))
big_o_memory_constant = self.text_trainer.get_padding_memory_scaling(current_lengths)
if (len(current_batch) * big_o_memory_constant > self.adaptive_memory_usage_constant
or len(current_batch) > self.maximum_batch_size):
current_batch.pop()
if logger.getEffectiveLevel() <= logging.DEBUG:
padding_lengths = IndexedDataset(current_batch).padding_lengths()
logger.debug("Batch size: %d; padding: %s", len(current_batch), padding_lengths)
batches.append(current_batch)
current_batch = [instance]
current_lengths = instance_lengths
if logger.getEffectiveLevel() <= logging.DEBUG:
padding_lengths = IndexedDataset(current_batch).padding_lengths()
logger.debug("Batch size: %d; padding: %s", len(current_batch), padding_lengths)
batches.append(current_batch)
return batches
| deep_qa-master | deep_qa/data/data_generator.py |
from collections import OrderedDict
from typing import List
from overrides import overrides
class WordSplitter:
"""
A ``WordSplitter`` splits strings into words. This is typically called a "tokenizer" in NLP,
but we need ``Tokenizer`` to refer to something else, so we're using ``WordSplitter`` here
instead.
"""
def split_words(self, sentence: str) -> List[str]:
raise NotImplementedError
class SimpleWordSplitter(WordSplitter):
"""
Does really simple tokenization. NLTK was too slow, so we wrote our own simple tokenizer
instead. This just does an initial split(), followed by some heuristic filtering of each
whitespace-delimited token, separating contractions and punctuation. We assume lower-cased,
reasonably well-formed English sentences as input.
"""
def __init__(self):
# These are certainly incomplete. But at least it's a start.
self.special_cases = set(['mr.', 'mrs.', 'etc.', 'e.g.', 'cf.', 'c.f.', 'eg.', 'al.'])
self.contractions = set(["n't", "'s", "'ve", "'re", "'ll", "'d", "'m"])
self.contractions |= set([x.replace("'", "’") for x in self.contractions])
self.ending_punctuation = set(['"', "'", '.', ',', ';', ')', ']', '}', ':', '!', '?', '%', '”', "’"])
self.beginning_punctuation = set(['"', "'", '(', '[', '{', '#', '$', '“', "‘"])
@overrides
def split_words(self, sentence: str) -> List[str]:
"""
Splits a sentence into word tokens. We handle four kinds of things: words with punctuation
that should be ignored as a special case (Mr. Mrs., etc.), contractions/genitives (isn't,
don't, Matt's), and beginning and ending punctuation ("antennagate", (parentheticals), and
such.).
The basic outline is to split on whitespace, then check each of these cases. First, we
strip off beginning punctuation, then strip off ending punctuation, then strip off
contractions. When we strip something off the beginning of a word, we can add it to the
list of tokens immediately. When we strip it off the end, we have to save it to be added
to after the word itself has been added. Before stripping off any part of a token, we
first check to be sure the token isn't in our list of special cases.
"""
fields = sentence.lower().split()
tokens = []
for field in fields: # type: str
add_at_end = []
while self._can_split(field) and field[0] in self.beginning_punctuation:
tokens.append(field[0])
field = field[1:]
while self._can_split(field) and field[-1] in self.ending_punctuation:
add_at_end.insert(0, field[-1])
field = field[:-1]
# There could (rarely) be several contractions in a word, but we check contractions
# sequentially, in a random order. If we've removed one, we need to check again to be
# sure there aren't others.
remove_contractions = True
while remove_contractions:
remove_contractions = False
for contraction in self.contractions:
if self._can_split(field) and field.endswith(contraction):
field = field[:-len(contraction)]
add_at_end.insert(0, contraction)
remove_contractions = True
if field:
tokens.append(field)
tokens.extend(add_at_end)
return tokens
def _can_split(self, token: str):
return token and token not in self.special_cases
class NltkWordSplitter(WordSplitter):
"""
A tokenizer that uses nltk's word_tokenize method.
I found that nltk is very slow, so I switched to using my own simple one, which is a good deal
faster. But I'm adding this one back so that there's consistency with older versions of the
code, if you really want it.
"""
def __init__(self):
pass
@overrides
def split_words(self, sentence: str) -> List[str]:
# Import is here because it's slow, and by default unnecessary.
from nltk.tokenize import word_tokenize
return word_tokenize(sentence.lower())
class SpacyWordSplitter(WordSplitter):
"""
A tokenizer that uses spaCy's Tokenizer, which is much faster than the others.
"""
def __init__(self):
# Import is here it's slow, and can be unnecessary.
import spacy
self.en_nlp = spacy.load('en')
@overrides
def split_words(self, sentence: str) -> List[str]:
return [str(token.lower_) for token in self.en_nlp.tokenizer(sentence)]
class NoOpWordSplitter(WordSplitter):
"""
This is a word splitter that does nothing. We're playing a little loose with python's dynamic
typing, breaking the typical WordSplitter API a bit and assuming that you've already split
``sentence`` into a list somehow, so you don't need to do anything else here. For example, the
``PreTokenizedTaggingInstance`` requires this word splitter, because it reads in pre-tokenized
data from a file.
"""
@overrides
def __init__(self):
pass
@overrides
def split_words(self, sentence: str) -> List[str]:
assert isinstance(sentence, list), "This splitter is only meant to be used for pre-split text"
return sentence
word_splitters = OrderedDict() # pylint: disable=invalid-name
word_splitters['simple'] = SimpleWordSplitter
word_splitters['nltk'] = NltkWordSplitter
word_splitters['spacy'] = SpacyWordSplitter
word_splitters['no_op'] = NoOpWordSplitter
| deep_qa-master | deep_qa/data/tokenizers/word_splitter.py |
from collections import OrderedDict
from .character_tokenizer import CharacterTokenizer
from .word_and_character_tokenizer import WordAndCharacterTokenizer
from .word_tokenizer import WordTokenizer
# The first item added here will be used as the default in some cases.
tokenizers = OrderedDict() # pylint: disable=invalid-name
tokenizers['words'] = WordTokenizer
tokenizers['characters'] = CharacterTokenizer
tokenizers['words and characters'] = WordAndCharacterTokenizer
| deep_qa-master | deep_qa/data/tokenizers/__init__.py |
from typing import List
from .word_splitter import word_splitters
from .word_stemmer import word_stemmers
from .word_filter import word_filters
from ...common.params import Params
class WordProcessor:
"""
A WordProcessor handles the splitting of strings into words (with the use of a WordSplitter) as well as any
desired post-processing (e.g., stemming, filtering, etc.)
Parameters
----------
word_splitter: str, default="simple"
The string name of the ``WordSplitter`` of choice (see the options at the bottom of
``word_splitter.py``).
word_filter: str, default="pass_through"
The name of the ``WordFilter`` to use (see the options at the bottom of
``word_filter.py``).
word_stemmer: str, default="pass_through"
The name of the ``WordStemmer`` to use (see the options at the bottom of
``word_stemmer.py``).
"""
def __init__(self, params: Params):
word_splitter_choice = params.pop_choice('word_splitter', list(word_splitters.keys()),
default_to_first_choice=True)
self.word_splitter = word_splitters[word_splitter_choice]()
word_filter_choice = params.pop_choice('word_filter', list(word_filters.keys()),
default_to_first_choice=True)
self.word_filter = word_filters[word_filter_choice]()
word_stemmer_choice = params.pop_choice('word_stemmer', list(word_stemmers.keys()),
default_to_first_choice=True)
self.word_stemmer = word_stemmers[word_stemmer_choice]()
params.assert_empty("WordProcessor")
def get_tokens(self, sentence: str) -> List[str]:
"""
Does whatever processing is required to convert a string of text into a sequence of tokens.
At a minimum, this uses a ``WordSplitter`` to split words into text. It may also do
stemming or stopword removal, depending on the parameters given to the constructor.
"""
words = self.word_splitter.split_words(sentence)
filtered_words = self.word_filter.filter_words(words)
stemmed_words = [self.word_stemmer.stem_word(word) for word in filtered_words]
return stemmed_words
| deep_qa-master | deep_qa/data/tokenizers/word_processor.py |
from typing import Callable, Dict, List, Tuple
from overrides import overrides
from keras.layers import Layer
from .tokenizer import Tokenizer
from .word_processor import WordProcessor
from ..data_indexer import DataIndexer
from ...common.params import Params
class WordTokenizer(Tokenizer):
"""
A ``WordTokenizer`` splits strings into word tokens.
There are several ways that you can split a string into words, so we rely on a
``WordProcessor`` to do that work for us. Note that we're using the word "tokenizer" here for
something different than is typical in NLP - we're referring here to how strings are
represented as numpy arrays, not the linguistic notion of splitting sentences into tokens.
Those things are handled in the ``WordProcessor``, which is a common dependency in several
``Tokenizers``.
Parameters
----------
processor: Dict[str, Any], default={}
Contains parameters for processing text strings into word tokens, including, e.g.,
splitting, stemming, and filtering words. See ``WordProcessor`` for a complete description
of available parameters.
"""
def __init__(self, params: Params):
self.word_processor = WordProcessor(params.pop('processor', {}))
super(WordTokenizer, self).__init__(params)
@overrides
def tokenize(self, text: str) -> List[str]:
return self.word_processor.get_tokens(text)
@overrides
def get_words_for_indexer(self, text: str) -> Dict[str, List[str]]:
return {'words': self.tokenize(text)}
@overrides
def index_text(self, text: str, data_indexer: DataIndexer) -> List:
return [data_indexer.get_word_index(word, namespace='words') for word in self.tokenize(text)]
@overrides
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=""):
# pylint: disable=protected-access
return embed_function(input_layer,
embedding_name='words' + embedding_suffix,
vocab_name='words')
@overrides
def get_sentence_shape(self, sentence_length: int, word_length: int) -> Tuple[int]:
return (sentence_length,)
@overrides
def get_padding_lengths(self, sentence_length: int, word_length: int) -> Dict[str, int]:
return {'num_sentence_words': sentence_length}
| deep_qa-master | deep_qa/data/tokenizers/word_tokenizer.py |
from typing import Callable, Dict, List, Tuple
from keras.layers import Layer
from ..data_indexer import DataIndexer
from ...common.params import Params
class Tokenizer:
"""
A Tokenizer splits strings into sequences of tokens that can be used in a model. The "tokens"
here could be words, characters, or words and characters. The Tokenizer object handles various
things involved with this conversion, including getting a list of tokens for pre-computing a
vocabulary, getting the shape of a word sequence in a model, etc. The Tokenizer needs to
handle these things because the tokenization you do could affect the shape of word sequence
tensors in the model (e.g., a sentence could have shape (num_words,), (num_characters,), or
(num_words, num_characters)).
"""
def __init__(self, params: Params):
# This class does not take any parameters, but for consistency in the API we take a params
# dict as an argument.
params.assert_empty("Tokenizer")
def get_custom_objects(self) -> Dict[str, 'Layer']: # pylint: disable=no-self-use
"""
If you use any custom ``Layers`` in your ``embed_input`` method, you need to return them
here, so that the ``TextTrainer`` can correctly load models.
"""
return {}
def tokenize(self, text: str) -> List[str]:
"""
Actually splits the string into a sequence of tokens. Note that this will only give you
top-level tokenization! If you're using a word-and-character tokenizer, for instance, this
will only return the word tokenization.
"""
raise NotImplementedError
def get_words_for_indexer(self, text: str) -> Dict[str, List[str]]:
"""
The DataIndexer needs to assign indices to whatever strings we see in the training data
(possibly doing some frequency filtering and using an OOV token). This method takes some
text and returns whatever the DataIndexer would be asked to index from that text. Note
that this returns a dictionary of token lists keyed by namespace. Typically, the key would
be either 'words' or 'characters'. An example for indexing the string 'the' might be
{'words': ['the'], 'characters': ['t', 'h', 'e']}, if you are indexing both words and
characters.
"""
raise NotImplementedError
def index_text(self,
text: str,
data_indexer: DataIndexer) -> List:
"""
This method actually converts some text into an indexed list. This could be a list of
integers (for either word tokens or characters), or it could be a list of arrays (for word
tokens combined with characters), or something else.
"""
raise NotImplementedError
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=''):
"""
Applies embedding layers to the input_layer. See :func:`TextTrainer._embed_input
<deep_qa.training.text_trainer.TextTrainer._embed_input>` for a more detailed comment on
what this method does.
Parameters
----------
input_layer: Keras ``Input()`` layer
The layer to embed.
embed_function: Callable[['Layer', str, str], 'Tensor']
This should be the __get_embedded_input method from your instantiated ``TextTrainer``.
This function actually applies an ``Embedding`` layer (and maybe also a projection and
dropout) to the input layer.
text_trainer: TextTrainer
Simple ``Tokenizers`` will just need to use the ``embed_function`` that gets passed as
a parameter here, but complex ``Tokenizers`` might need more than just an embedding
function. So that you can get an encoder or other things from the ``TextTrainer`` here
if you need them, we take this object as a parameter.
embedding_suffix: str, optional (default="")
A suffix to add to embedding keys that we use, so that, e.g., you could specify several
different word embedding matrices, for whatever reason.
"""
raise NotImplementedError
def get_sentence_shape(self, sentence_length: int, word_length: int) -> Tuple[int]:
"""
If we have a text sequence of length `sentence_length`, what shape would that correspond to
with this encoding? For words or characters only, this would just be (sentence_length,).
For an encoding that contains both words and characters, it might be (sentence_length,
word_length).
"""
raise NotImplementedError
def get_padding_lengths(self, sentence_length: int, word_length: int) -> Dict[str, int]:
"""
When dealing with padding in TextTrainer, TextInstances need to know what to pad and how
much. This function takes a potential max sentence length and word length, and returns a
`lengths` dictionary containing keys for the padding that is applicable to this encoding.
"""
raise NotImplementedError
def char_span_to_token_span(self,
sentence: str,
span: Tuple[int, int],
slack: int=3) -> Tuple[int, int]:
"""
Converts a character span from a sentence into the corresponding token span in the
tokenized version of the sentence. If you pass in a character span that does not
correspond to complete tokens in the tokenized version, we'll do our best, but the behavior
is officially undefined.
The basic outline of this method is to find the token that starts the same number of
characters into the sentence as the given character span. We try to handle a bit of error
in the tokenization by checking `slack` tokens in either direction from that initial
estimate.
The returned ``(begin, end)`` indices are `inclusive` for ``begin``, and `exclusive` for
``end``. So, for example, ``(2, 2)`` is an empty span, ``(2, 3)`` is the one-word span
beginning at token index 2, and so on.
"""
# First we'll tokenize the span and the sentence, so we can count tokens and check for
# matches.
span_chars = sentence[span[0]:span[1]]
tokenized_span = self.tokenize(span_chars)
tokenized_sentence = self.tokenize(sentence)
# Then we'll find what we think is the first token in the span
chars_seen = 0
index = 0
while index < len(tokenized_sentence) and chars_seen < span[0]:
chars_seen += len(tokenized_sentence[index]) + 1
index += 1
# index is now the span start index. Is it a match?
if self._spans_match(tokenized_sentence, tokenized_span, index):
return (index, index + len(tokenized_span))
for i in range(1, slack + 1):
if self._spans_match(tokenized_sentence, tokenized_span, index + i):
return (index + i, index + i+ len(tokenized_span))
if self._spans_match(tokenized_sentence, tokenized_span, index - i):
return (index - i, index - i + len(tokenized_span))
# No match; we'll just return our best guess.
return (index, index + len(tokenized_span))
@staticmethod
def _spans_match(sentence_tokens: List[str], span_tokens: List[str], index: int) -> bool:
if index < 0 or index >= len(sentence_tokens):
return False
if sentence_tokens[index] == span_tokens[0]:
span_index = 1
while (span_index < len(span_tokens) and
sentence_tokens[index + span_index] == span_tokens[span_index]):
span_index += 1
if span_index == len(span_tokens):
return True
return False
| deep_qa-master | deep_qa/data/tokenizers/tokenizer.py |
from collections import OrderedDict
from nltk.stem import PorterStemmer as NltkPorterStemmer
from overrides import overrides
class WordStemmer:
"""
A ``WordStemmer`` lemmatizes words. This means that we map words to their root form, so that,
e.g., "have", "has", and "had" all have the same internal representation.
You should think carefully about whether and how much stemming you want in your model. Kind of
the whole point of using word embeddings is so that you don't have to do this, but in a highly
inflected language, or in a low-data setting, you might need it anyway. The default
``WordStemmer`` does nothing, just returning the work token as-is.
"""
def stem_word(self, word: str) -> str:
"""Converts a word to its lemma"""
raise NotImplementedError
class PassThroughWordStemmer(WordStemmer):
"""
Does not stem words; it's a no-op. This is the default word stemmer.
"""
@overrides
def stem_word(self, word: str) -> str:
return word
class PorterStemmer(WordStemmer):
"""
Uses NLTK's PorterStemmer to stem words.
"""
def __init__(self):
self.stemmer = NltkPorterStemmer()
@overrides
def stem_word(self, word: str) -> str:
return self.stemmer.stem(word)
word_stemmers = OrderedDict() # pylint: disable=invalid-name
word_stemmers['pass_through'] = PassThroughWordStemmer
word_stemmers['porter'] = PorterStemmer
| deep_qa-master | deep_qa/data/tokenizers/word_stemmer.py |
from typing import Any, Callable, Dict, List, Tuple
from overrides import overrides
from keras import backend as K
from keras.layers import Concatenate, Layer
from .tokenizer import Tokenizer
from .word_processor import WordProcessor
from ..data_indexer import DataIndexer
from ...layers.backend import CollapseToBatch
from ...layers.backend import ExpandFromBatch
from ...layers.wrappers import EncoderWrapper
from ...layers import VectorMatrixSplit
from ...common.params import Params
from ...common.util import clean_layer_name
class WordAndCharacterTokenizer(Tokenizer):
"""
A ``WordAndCharacterTokenizer`` first splits strings into words, then splits those words into
characters, and returns a representation that contains `both` a word index and a sequence of
character indices for each word. See the documention for ``WordTokenizer`` for a note about
naming, and the typical notion of "tokenization" in NLP.
Notes
-----
In ``embed_input``, this ``Tokenizer`` uses an encoder to get a character-level word embedding,
which then gets concatenated with a standard word embedding from an embedding matrix. To
specify the encoder to use for this character-level word embedding, use the ``"word"`` key in
the ``encoder`` parameter to your model (which should be a ``TextTrainer`` subclass - see the
documentation there for some more info). If you do not give a ``"word"`` key in the
``encoder`` dict, we'll create a new encoder using the ``"default"`` parameters.
"""
def __init__(self, params: Params):
self.word_processor = WordProcessor(params.pop('processor', {}))
super(WordAndCharacterTokenizer, self).__init__(params)
@overrides
def tokenize(self, text: str) -> List[str]:
return self.word_processor.get_tokens(text)
@overrides
def get_words_for_indexer(self, text: str) -> Dict[str, List[str]]:
words = self.tokenize(text)
characters = [char for word in words for char in word]
return {'words': words, 'characters': characters}
@overrides
def index_text(self, text: str, data_indexer: DataIndexer) -> List:
words = self.tokenize(text)
arrays = []
for word in words:
word_index = data_indexer.get_word_index(word, namespace='words')
# TODO(matt): I'd be nice to keep the capitalization of the word in the character
# representation. Doing that would require pretty fancy logic here, though.
char_indices = [data_indexer.get_word_index(char, namespace='characters') for char in word]
arrays.append([word_index] + char_indices)
return arrays
@overrides
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=""):
"""
A combined word-and-characters representation requires some fancy footwork to do the
embedding properly.
This method assumes the input shape is (..., sentence_length, word_length + 1), where the
first integer for each word in the tensor is the word index, and the remaining word_length
entries is the character sequence. We'll first split this into two tensors, one of shape
(..., sentence_length), and one of shape (..., sentence_length, word_length), where the
first is the word sequence, and the second is the character sequence for each word. We'll
pass the word sequence through an embedding layer, as normal, and pass the character
sequence through a _separate_ embedding layer, then an encoder, to get a word vector out.
We'll then concatenate the two word vectors, returning a tensor of shape
(..., sentence_length, embedding_dim * 2).
"""
# pylint: disable=protected-access
# This is happening before any masking is done, so we don't need to worry about the
# mask_split_axis argument to VectorMatrixSplit.
words, characters = VectorMatrixSplit(split_axis=-1)(input_layer)
word_embedding = embed_function(words,
embedding_name='words' + embedding_suffix,
vocab_name='words')
character_embedding = embed_function(characters,
embedding_name='characters' + embedding_suffix,
vocab_name='characters')
# A note about masking here: we care about the character masks when encoding a character
# sequence, so we need the mask to be passed to the character encoder correctly. However,
# we _don't_ care here about whether the whole word will be masked, as the word_embedding
# will carry that information. Because of the way `Concatenate` handles masks, if you've
# done something crazy where you have a word index but your character indices are all zero,
# you will get a 0 in the mask for that word at the end of this. But assuming you're using
# this correctly, you should only get a 0 in the character-level mask in the same places
# that you have 0s in the word-level mask, so `Concatenate` further below will do the right
# thing.
# character_embedding has shape `(batch_size, ..., num_words, word_length,
# embedding_dim)`, but our encoder expects a tensor with shape `(batch_size,
# word_length, embedding_dim)` to the word encoder. Typically, we would use Keras'
# TimeDistributed layer to handle this. However, if we're using dynamic padding, we
# may not know `num_words` (which we're collapsing) or even `word_length` at runtime,
# which messes with TimeDistributed. In order to handle this correctly, we'll use
# CollapseToBatch and ExpandFromBatch instead of TimeDistributed. Those layers
# together do basically the same thing, collapsing all of the unwanted dimensions into
# the batch_size temporarily, but they can handle unknown runtime shapes.
dims_to_collapse = K.ndim(character_embedding) - 3
collapsed_character_embedding = CollapseToBatch(dims_to_collapse)(character_embedding)
word_encoder = text_trainer._get_encoder(name="word", fallback_behavior="use default params")
collapsed_word_encoding = word_encoder(collapsed_character_embedding)
word_encoding = ExpandFromBatch(dims_to_collapse)([collapsed_word_encoding, character_embedding])
# If you're embedding multiple inputs in your model, we need the final concatenation here
# to have a unique name each time. In order to get a unique name, we use the name of the
# input layer. Except sometimes Keras adds funny things to the ends of the input layer, so
# we'll strip those off.
name = 'combined_word_embedding_for_' + clean_layer_name(input_layer.name)
final_embedded_input = Concatenate(name=name)([word_embedding, word_encoding])
return final_embedded_input
@overrides
def get_sentence_shape(self, sentence_length: int, word_length: int=None) -> Tuple[int]:
return (sentence_length, word_length)
@overrides
def get_padding_lengths(self, sentence_length: int, word_length: int) -> Dict[str, int]:
return {'num_sentence_words': sentence_length, 'num_word_characters': word_length}
@overrides
def get_custom_objects(self) -> Dict[str, Any]:
return {
'CollapseToBatch': CollapseToBatch,
'EncoderWrapper': EncoderWrapper,
'ExpandFromBatch': ExpandFromBatch,
'VectorMatrixSplit': VectorMatrixSplit,
}
| deep_qa-master | deep_qa/data/tokenizers/word_and_character_tokenizer.py |
from typing import Callable, Dict, List, Tuple
from keras.layers import Layer
from overrides import overrides
from .tokenizer import Tokenizer
from ..data_indexer import DataIndexer
from ...common.params import Params
class CharacterTokenizer(Tokenizer):
"""
A CharacterTokenizer splits strings into character tokens.
Notes
-----
Note that in the code, we're still using the "words" namespace, and the "num_sentence_words"
padding key, instead of using a different "characters" namespace. This is so that the rest of
the code doesn't have to change as much to just use this different tokenizer. For example,
this is an issue when adding start and stop tokens - how is an ``Instance`` class supposed to
know if it should use the "words" or the "characters" namespace when getting a start token id?
If we just always use the "words" namespace for the top-level token namespace, it's not an
issue.
But confusingly, we'll still use the "characters" embedding key... At least the user-facing
parts all use ``characters``; it's only in writing tokenizer code that you need to be careful
about namespaces. TODO(matt): it probably makes sense to change the default namespace to
"tokens", and use that for both the words in ``WordTokenizer`` and the characters in
``CharacterTokenizer``, so the naming isn't so confusing.
"""
def __init__(self, params: Params):
super(CharacterTokenizer, self).__init__(params)
@overrides
def tokenize(self, text: str) -> List[str]:
return list(text)
@overrides
def get_words_for_indexer(self, text: str) -> Dict[str, List[str]]:
return {'words': self.tokenize(text)}
@overrides
def index_text(self,
text: str,
data_indexer: DataIndexer) -> List:
return [data_indexer.get_word_index(char) for char in self.tokenize(text)]
@overrides
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=''):
return embed_function(input_layer,
embedding_name='characters' + embedding_suffix,
vocab_name='words')
@overrides
def get_sentence_shape(self, sentence_length: int, word_length: int) -> Tuple[int]:
return (sentence_length,)
@overrides
def get_padding_lengths(self, sentence_length: int, word_length: int) -> Dict[str, int]:
# Note that `sentence_length` here is the number of _characters_ in the sentence, because
# of how `self.index_text` works. And even though the name isn't great, we'll use
# `num_sentence_words` for the key to this, so that the rest of the code is simpler.
return {'num_sentence_words': sentence_length}
| deep_qa-master | deep_qa/data/tokenizers/character_tokenizer.py |
from collections import OrderedDict
from typing import List
from overrides import overrides
class WordFilter:
"""
A ``WordFilter`` removes words from a token list. Typically, this is for stopword removal,
though you could feasibly use it for more domain-specific removal if you want.
Word removal happens `before` stemming, so keep that in mind if you're designing a list of
words to be removed.
"""
def filter_words(self, words: List[str]) -> List[str]:
"""Filters words from the given word list"""
raise NotImplementedError
class PassThroughWordFilter(WordFilter):
"""
Does not filter words; it's a no-op. This is the default word filter.
"""
@overrides
def filter_words(self, words: List[str]) -> List[str]:
return words
class StopwordFilter(WordFilter):
"""
Uses a list of stopwords to filter.
"""
def __init__(self):
# TODO(matt): Allow this to be specified somehow, either with a file, or with parameters,
# or something.
self.stopwords = set(['I', 'a', 'aboard', 'about', 'above', 'accordance', 'according',
'across', 'after', 'against', 'along', 'alongside', 'also', 'am',
'amid', 'amidst', 'an', 'and', 'apart', 'are', 'around', 'as',
'aside', 'astride', 'at', 'atop', 'back', 'be', 'because', 'before',
'behind', 'below', 'beneath', 'beside', 'besides', 'between',
'beyond', 'but', 'by', 'concerning', 'do', 'down', 'due', 'during',
'either', 'except', 'exclusive', 'false', 'for', 'from', 'happen',
'he', 'her', 'hers', 'herself', 'him', 'himself', 'his', 'how',
'how many', 'how much', 'i', 'if', 'in', 'including', 'inside',
'instead', 'into', 'irrespective', 'is', 'it', 'its', 'itself',
'less', 'me', 'mine', 'minus', 'my', 'myself', 'neither', 'next',
'not', 'occur', 'of', 'off', 'on', 'onto', 'opposite', 'or', 'our',
'ours', 'ourselves', 'out', 'out of', 'outside', 'over', 'owing',
'per', 'prepatory', 'previous', 'prior', 'pursuant', 'regarding',
's', 'sans', 'she', 'subsequent', 'such', 'than', 'thanks', 'that',
'the', 'their', 'theirs', 'them', 'themselves', 'then', 'these',
'they', 'this', 'those', 'through', 'throughout', 'thru', 'till',
'to', 'together', 'top', 'toward', 'towards', 'true', 'under',
'underneath', 'unlike', 'until', 'up', 'upon', 'us', 'using',
'versus', 'via', 'was', 'we', 'were', 'what', 'when', 'where',
'which', 'who', 'why', 'will', 'with', 'within', 'without', 'you',
'your', 'yours', 'yourself', 'yourselves', ",", '.', ':', '!', ';',
"'", '"', '&', '$', '#', '@', '(', ')', '?'])
@overrides
def filter_words(self, words: List[str]) -> List[str]:
return [word for word in words if word not in self.stopwords]
word_filters = OrderedDict() # pylint: disable=invalid-name
word_filters['pass_through'] = PassThroughWordFilter
word_filters['stopwords'] = StopwordFilter
| deep_qa-master | deep_qa/data/tokenizers/word_filter.py |
deep_qa-master | deep_qa/data/dataset_readers/__init__.py |
|
import argparse
from collections import Counter
import json
import logging
import os
import random
from typing import List, Tuple
import numpy
from tqdm import tqdm
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
random.seed(2157)
def main():
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
parser = argparse.ArgumentParser(description=("Parse a SQuAD v1.1 data file for "
"use by the SentenceSelectionInstance"))
parser.add_argument('input_filename', help='Input SQuAD json file.')
parser.add_argument('--output_directory',
help='Output directory. Make sure to end the string with a /')
parser.add_argument('--negatives', default='paragraph', help="See class docstring")
args = parser.parse_args()
reader = SquadSentenceSelectionReader(args.output_directory, args.negatives)
reader.read_file(args.input_filename)
class SquadSentenceSelectionReader():
"""
Parameters
----------
output_directory: str, optional (default=None)
If you want the output stored somewhere other than in a ``processed/`` subdirectory next to
the input, you can override the default with this parameter.
negative_sentence_selection: str, optional (default="paragraph")
A comma-separated list of methods to use to generate negative sentences in the data.
There are three options here:
(1) "paragraph", which means to use as negative sentences all other sentences in the same
paragraph as the correct answer sentence.
(2) "random-[int]", which means to randomly select [int] sentences from all SQuAD sentences
to use as negative sentences.
(3) "pad-to-[int]", which means to randomly select sentences from all SQuAD sentences until
there are a total of [int] sentences. This will not remove any previously selected
sentences if you already have more than [int].
(4) "question", which means to use as a negative sentence the `question` itself.
(5) "questions-random-[int]", which means to select [int] random `questions` from SQuAD to
use as negative sentences (this could include the question corresponding to the
example; we don't filter out that case).
We will process these options in order, so the "pad-to-[int]" option mostly only makes
sense as the last option.
"""
def __init__(self, output_directory: str=None, negative_sentence_selection: str="paragraph"):
self.output_directory = output_directory
self.negative_sentence_selection_methods = negative_sentence_selection.split(",")
# Initializing some data structures here that will be useful when reading a file.
# Maps sentence strings to sentence indices
self.sentence_to_id = {}
# Maps sentence indices to sentence strings
self.id_to_sentence = {}
# Maps paragraph ids to lists of contained sentence ids
self.paragraph_sentences = {}
# Maps sentence ids to the containing paragraph id.
self.sentence_paragraph_map = {}
# Maps question strings to question indices
self.question_to_id = {}
# Maps question indices to question strings
self.id_to_question = {}
def _clear_state(self):
self.sentence_to_id.clear()
self.id_to_sentence.clear()
self.paragraph_sentences.clear()
self.sentence_paragraph_map.clear()
self.question_to_id.clear()
self.id_to_question.clear()
def _get_sentence_choices(self, question_id: int, answer_id: int) -> Tuple[List[str], int]:
# Because sentences and questions have different indices, we need this to hold tuples of
# ("sentence", id) or ("question", id), instead of just single ids.
negative_sentences = set()
for selection_method in self.negative_sentence_selection_methods:
if selection_method == 'paragraph':
paragraph_id = self.sentence_paragraph_map[answer_id]
paragraph_sentences = self.paragraph_sentences[paragraph_id]
negative_sentences.update(("sentence", sentence_id)
for sentence_id in paragraph_sentences
if sentence_id != answer_id)
elif selection_method.startswith("random-"):
num_to_pick = int(selection_method.split('-')[1])
num_sentences = len(self.sentence_to_id)
# We'll ignore here the small probability that we pick `answer_id`, or a
# sentence we've chosen previously.
selected_ids = numpy.random.choice(num_sentences, (num_to_pick,), replace=False)
negative_sentences.update(("sentence", sentence_id)
for sentence_id in selected_ids
if sentence_id != answer_id)
elif selection_method.startswith("pad-to-"):
desired_num_sentences = int(selection_method.split('-')[2])
# Because we want to pad to a specific number of sentences, we'll do the choice
# logic in a loop, to be sure we actually get to the right number.
while desired_num_sentences > len(negative_sentences):
num_to_pick = desired_num_sentences - len(negative_sentences)
num_sentences = len(self.sentence_to_id)
if num_to_pick > num_sentences:
raise RuntimeError("Not enough sentences to pick from")
selected_ids = numpy.random.choice(num_sentences, (num_to_pick,), replace=False)
negative_sentences.update(("sentence", sentence_id)
for sentence_id in selected_ids
if sentence_id != answer_id)
elif selection_method == "question":
negative_sentences.add(("question", question_id))
elif selection_method.startswith("questions-random-"):
num_to_pick = int(selection_method.split('-')[2])
num_questions = len(self.question_to_id)
# We'll ignore here the small probability that we pick `question_id`, or a
# question we've chosen previously.
selected_ids = numpy.random.choice(num_questions, (num_to_pick,), replace=False)
negative_sentences.update(("question", q_id) for q_id in selected_ids)
else:
raise RuntimeError("Unrecognized selection method:", selection_method)
choices = list(negative_sentences) + [("sentence", answer_id)]
random.shuffle(choices)
correct_choice = choices.index(("sentence", answer_id))
sentence_choices = []
for sentence_type, index in choices:
if sentence_type == "sentence":
sentence_choices.append(self.id_to_sentence[index])
else:
sentence_choices.append(self.id_to_question[index])
return sentence_choices, correct_choice
def read_file(self, input_filepath: str):
# Import is here, since it isn't necessary by default.
import nltk
self._clear_state()
# Holds tuples of (question_text, answer_sentence_id)
questions = []
logger.info("Reading file at %s", input_filepath)
with open(input_filepath) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json['data']
logger.info("Reading the dataset")
for article in tqdm(dataset):
for paragraph in article['paragraphs']:
paragraph_id = len(self.paragraph_sentences)
self.paragraph_sentences[paragraph_id] = []
context_article = paragraph["context"]
# replace newlines in the context article
cleaned_context_article = context_article.replace("\n", "")
# Split the cleaned_context_article into a list of sentences.
sentences = nltk.sent_tokenize(cleaned_context_article)
# Make a dict from span indices to sentence. The end span is
# exclusive, and the start span is inclusive.
span_to_sentence_index = {}
current_index = 0
for sentence in sentences:
sentence_id = len(self.sentence_to_id)
self.sentence_to_id[sentence] = sentence_id
self.id_to_sentence[sentence_id] = sentence
self.sentence_paragraph_map[sentence_id] = paragraph_id
self.paragraph_sentences[paragraph_id].append(sentence_id)
sentence_len = len(sentence)
# Need to add one to the end index to account for the
# trailing space after punctuation that is stripped by NLTK.
span_to_sentence_index[(current_index,
current_index + sentence_len + 1)] = sentence
current_index += sentence_len + 1
for question_answer in paragraph['qas']:
question_text = question_answer["question"].strip()
question_id = len(self.question_to_id)
self.question_to_id[question_text] = question_id
self.id_to_question[question_id] = question_text
# There may be multiple answer annotations, so pick the one
# that occurs the most.
candidate_answer_start_indices = Counter()
for answer in question_answer["answers"]:
candidate_answer_start_indices[answer["answer_start"]] += 1
answer_start_index, _ = candidate_answer_start_indices.most_common(1)[0]
# Get the full sentence corresponding to the answer.
answer_sentence = None
for span_tuple in span_to_sentence_index:
start_span, end_span = span_tuple
if start_span <= answer_start_index and answer_start_index < end_span:
answer_sentence = span_to_sentence_index[span_tuple]
break
else: # no break
raise ValueError("Index of answer start was out of bounds. "
"This should never happen, please raise "
"an issue on GitHub.")
# Now that we have the string of the full sentence, we need to
# search for it in our shuffled list to get the index.
answer_id = self.sentence_to_id[answer_sentence]
# Now we can make the string representation and add this
# to the list of processed_rows.
questions.append((question_id, answer_id))
processed_rows = []
logger.info("Processing questions into training instances")
for question_id, answer_id in tqdm(questions):
sentence_choices, correct_choice = self._get_sentence_choices(question_id, answer_id)
question_text = self.id_to_question[question_id]
row_text = (question_text + "\t" + '###'.join(sentence_choices) +
"\t" + str(correct_choice))
processed_rows.append(row_text)
logger.info("Writing output file")
input_directory, input_filename = os.path.split(input_filepath)
output_filename = "sentence_selection_" + input_filename + ".tsv"
if self.output_directory:
# Use a custom output directory.
output_filepath = os.path.join(self.output_directory, output_filename)
else:
# Make a subdirectory of the input_directory called "processed",
# and write the file there
if not os.path.exists(os.path.join(input_directory, "processed")):
os.makedirs(os.path.join(input_directory, "processed"))
output_filepath = os.path.join(input_directory, "processed",
output_filename)
with open(output_filepath, 'w') as file_handler:
for row in processed_rows:
file_handler.write("{}\n".format(row))
logger.info("Wrote output to %s", output_filepath)
return output_filepath
if __name__ == '__main__':
main()
| deep_qa-master | deep_qa/data/dataset_readers/squad_sentence_selection_reader.py |
from collections import OrderedDict
from .entailment.snli_dataset import SnliDataset
from .language_modeling.language_modeling_dataset import LanguageModelingDataset
from .dataset import Dataset, TextDataset, IndexedDataset
concrete_datasets = OrderedDict() # pylint: disable=invalid-name
concrete_datasets["text"] = TextDataset
concrete_datasets["language_modeling"] = LanguageModelingDataset
concrete_datasets["snli"] = SnliDataset
| deep_qa-master | deep_qa/data/datasets/__init__.py |
import codecs
import itertools
import logging
from typing import Dict, List
import numpy
import tqdm
from ...common.util import add_noise_to_dict_values
from ...common.params import Params
from ..data_indexer import DataIndexer
from ..instances.instance import Instance, TextInstance, IndexedInstance
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def log_label_counts(instances: List[TextInstance]):
labels = [(x.label, x) for x in instances]
labels.sort(key=lambda x: str(x[0]))
label_counts = [(label, len([x for x in group]))
for label, group in itertools.groupby(labels, lambda x: x[0])]
label_count_str = str(label_counts)
if len(label_count_str) > 100:
label_count_str = label_count_str[:100] + '...'
logger.info("Finished reading dataset; label counts: %s", label_count_str)
class Dataset:
"""
A collection of Instances.
This base class has general methods that apply to all collections of Instances. That basically
is just methods that operate on sets, like merging and truncating.
"""
def __init__(self, instances: List[Instance]):
"""
A Dataset just takes a list of instances in its constructor. It's important that all
subclasses have an identical constructor to this (though possibly with different Instance
types). If you change the constructor, you also have to override all methods in this base
class that call the constructor, such as `merge()` and `truncate()`.
"""
self.instances = instances
def merge(self, other: 'Dataset') -> 'Dataset':
"""
Combine two datasets. If you call try to merge two Datasets of the same subtype, you will
end up with a Dataset of the same type (i.e., calling IndexedDataset.merge() with another
IndexedDataset will return an IndexedDataset). If the types differ, this method currently
raises an error, because the underlying Instance objects are not currently type compatible.
"""
if type(self) is type(other):
return self.__class__(self.instances + other.instances)
else:
raise RuntimeError("Cannot merge datasets with different types")
def truncate(self, max_instances: int):
"""
If there are more instances than `max_instances` in this dataset, returns a new dataset
with a random subset of size `max_instances`. If there are fewer than `max_instances`
already, we just return self.
"""
if len(self.instances) <= max_instances:
return self
new_instances = [i for i in self.instances]
return self.__class__(new_instances[:max_instances])
class TextDataset(Dataset):
"""
A Dataset of TextInstances, with a few helper methods.
TextInstances aren't useful for much with Keras until they've been indexed. So this class just
has methods to read in data from a file and convert it into other kinds of Datasets.
"""
def __init__(self, instances: List[TextInstance], params: Params=None):
if params is not None:
params.assert_empty("TextDataset")
super(TextDataset, self).__init__(instances)
def to_indexed_dataset(self, data_indexer: DataIndexer) -> 'IndexedDataset':
'''
Converts the Dataset into an IndexedDataset, given a DataIndexer.
'''
indexed_instances = [instance.to_indexed_instance(data_indexer) for instance in tqdm.tqdm(self.instances)]
return IndexedDataset(indexed_instances)
@staticmethod
def read_from_file(filename: str, instance_class, params: Params=None):
with codecs.open(filename, 'r', 'utf-8') as input_file:
lines = [x.strip() for x in tqdm.tqdm(input_file.readlines())]
return TextDataset.read_from_lines(lines, instance_class, params)
@staticmethod
def read_from_lines(lines: List[str], instance_class, params: Params=None):
instances = [instance_class.read_from_line(x) for x in lines]
log_label_counts(instances)
return TextDataset(instances, params)
class IndexedDataset(Dataset):
"""
A Dataset of IndexedInstances, with some helper methods.
IndexedInstances have text sequences replaced with lists of word indices, and are thus able to
be padded to consistent lengths and converted to training inputs.
"""
def __init__(self, instances: List[IndexedInstance]):
super(IndexedDataset, self).__init__(instances)
def sort_by_padding(self, sorting_keys: List[str], padding_noise: float=0.0):
"""
Sorts the ``Instances`` in this ``Dataset`` by their padding lengths, using the keys in
``sorting_keys`` (in the order in which they are provided).
"""
instances_with_lengths = []
for instance in self.instances:
padding_lengths = instance.get_padding_lengths()
if padding_noise > 0.0:
padding_lengths = add_noise_to_dict_values(padding_lengths, padding_noise)
instance_with_lengths = [padding_lengths[key] for key in sorting_keys] + [instance]
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=lambda x: x[:-1])
self.instances = [instance_with_lengths[-1] for instance_with_lengths in instances_with_lengths]
def padding_lengths(self):
padding_lengths = {}
lengths = [instance.get_padding_lengths() for instance in self.instances]
if not lengths:
return padding_lengths
for key in lengths[0]:
padding_lengths[key] = max(x[key] if key in x else 0 for x in lengths)
return padding_lengths
def pad_instances(self, padding_lengths: Dict[str, int]=None, verbose: bool=True):
"""
Makes all of the ``IndexedInstances`` in the dataset have the same length by padding them.
This ``Dataset`` object doesn't know what things there are in the ``Instance`` to pad, but
the ``Instances`` do, and so does the model that called us, passing in a
``padding_lengths`` dictionary. The keys in that dictionary must match the lengths that
the ``Instance`` knows about.
Given that, this method does two things: (1) it asks each of the ``Instances`` what their
padding lengths are, and takes a max (using :func:`~IndexedDataset.padding_lengths()`). It
then reconciles those values with the ``padding_lengths`` we were passed as an argument to
this method, and pads the instances with :func:`IndexedInstance.pad()`. If
``padding_lengths`` has a particular key specified with a value, that value takes
precedence over whatever we computed in our data. TODO(matt): with dynamic padding, we
should probably have this be a max padding length, not a hard setting, but that requires
some API changes.
This method `modifies` the current object, it does not return a new ``IndexedDataset``.
Parameters
----------
padding_lengths: Dict[str, int]
If a key is present in this dictionary with a non-`None` value, we will pad to that
length instead of the length calculated from the data. This lets you, e.g., set a
maximum value for sentence length, or word length, if you want to throw out long
sequences.
verbose: bool, optional (default=True)
Should we output logging information when we're doing this padding? If the dataset is
large, this is nice to have, because padding a large dataset could take a long time.
But if you're doing this inside of a data generator, having all of this output per
batch is a bit obnoxious.
"""
# First we need to decide _how much_ to pad. To do that, we find the max length for all
# relevant padding decisions from the instances themselves. Then we check whether we were
# given a max length for a particular dimension. If we were, we use that instead of the
# instance-based one.
if verbose:
logger.info("Padding dataset of size %d to lengths %s", len(self.instances), str(padding_lengths))
logger.info("Getting max lengths from instances")
instance_padding_lengths = self.padding_lengths()
if verbose:
logger.info("Instance max lengths: %s", str(instance_padding_lengths))
lengths_to_use = {}
for key in instance_padding_lengths:
if padding_lengths and padding_lengths[key] is not None:
lengths_to_use[key] = padding_lengths[key]
else:
lengths_to_use[key] = instance_padding_lengths[key]
if verbose:
logger.info("Now actually padding instances to length: %s", str(lengths_to_use))
for instance in tqdm.tqdm(self.instances):
instance.pad(lengths_to_use)
else:
for instance in self.instances:
instance.pad(lengths_to_use)
def as_training_data(self):
"""
Takes each ``IndexedInstance`` and converts it into (inputs, labels), according to the
Instance's as_training_data() method. Both the inputs and the labels are numpy arrays.
Note that if the ``Instances`` return tuples for their inputs, we convert the list of
tuples into a tuple of lists, before converting everything to numpy arrays.
"""
inputs = []
labels = []
instances = self.instances
for instance in instances:
instance_inputs, label = instance.as_training_data()
inputs.append(instance_inputs)
labels.append(label)
if isinstance(inputs[0], tuple):
inputs = [numpy.asarray(x) for x in zip(*inputs)]
else:
inputs = numpy.asarray(inputs)
if isinstance(labels[0], tuple):
labels = [numpy.asarray(x) for x in zip(*labels)]
else:
labels = numpy.asarray(labels)
return inputs, labels
| deep_qa-master | deep_qa/data/datasets/dataset.py |
from typing import List
import json
from overrides import overrides
from ..dataset import TextDataset, log_label_counts
from ...instances import TextInstance
from ....common.params import Params
class SnliDataset(TextDataset):
def __init__(self, instances: List[TextInstance], params: Params=None):
super(SnliDataset, self).__init__(instances, params)
@staticmethod
@overrides
def read_from_file(filename: str, instance_class, params: Params=None):
instances = []
for line in open(filename, 'r'):
example = json.loads(line)
# TODO(mark) why does this not match snli? Fix.
label = example["gold_label"]
if label == "entailment":
label = "entails"
elif label == "contradiction":
label = "contradicts"
text = example["sentence1"]
hypothesis = example["sentence2"]
instances.append(instance_class(text, hypothesis, label))
log_label_counts(instances)
return SnliDataset(instances, params)
| deep_qa-master | deep_qa/data/datasets/entailment/snli_dataset.py |
deep_qa-master | deep_qa/data/datasets/entailment/__init__.py |
|
deep_qa-master | deep_qa/data/datasets/language_modeling/__init__.py |
|
from typing import List
from overrides import overrides
from ..dataset import TextDataset, log_label_counts
from ...instances import TextInstance
from ...instances.language_modeling import SentenceInstance
from ....common.params import Params
class LanguageModelingDataset(TextDataset):
def __init__(self, instances: List[TextInstance], params: Params=None):
# TODO(Mark): We are splitting on spaces below, so this won't end up being
# the exact sequence length. This could be solved by passing the tokeniser
# to the dataset.
self.sequence_length = params.pop("sequence_length")
super(LanguageModelingDataset, self).__init__(instances)
@staticmethod
@overrides
def read_from_file(filename: str, instance_class, params: Params=None):
sequence_length = params.get("sequence_length", 20)
with open(filename, "r") as text_file:
text = text_file.readlines()
text = " ".join([x.replace("\n", " ").strip() for x in text]).split(" ")
instances = []
for index in range(0, len(text) - sequence_length, sequence_length):
word_sequence = " ".join(text[index: index + sequence_length])
instances.append(SentenceInstance(word_sequence))
log_label_counts(instances)
return LanguageModelingDataset(instances, params)
| deep_qa-master | deep_qa/data/datasets/language_modeling/language_modeling_dataset.py |
"""
This module contains the base ``Instance`` classes that concrete classes
inherit from. Specifically, there are three classes:
1. ``Instance``, that just exists as a base type with no functionality
2. ``TextInstance``, which adds a ``words()`` method and a method to convert
strings to indices using a DataIndexer.
3. ``IndexedInstance``, which is a ``TextInstance`` that has had all of its
strings converted into indices.
This class has methods to deal with padding (so that sequences all have the
same length) and converting an ``Instance`` into a set of Numpy arrays
suitable for use with Keras.
As this codebase is dealing mostly with textual question answering, pretty much
all of the concrete ``Instance`` types will have both a ``TextInstance`` and a
corresponding ``IndexedInstance``, which you can see in the individual files
for each ``Instance`` type.
"""
import itertools
from typing import Any, Callable, Dict, List
from ...common.params import Params
from ..tokenizers import tokenizers
from ..data_indexer import DataIndexer
class Instance:
"""
A data instance, used either for training a neural network or for testing one.
Parameters
----------
label : Any
Any kind of label that you might want to predict in a model. Could be a class label, a
tag sequence, a character span in a passage, etc.
index : int, optional
Used for matching instances with other data, such as background
sentences.
"""
def __init__(self, label, index: int=None):
self.label = label
self.index = index
class TextInstance(Instance):
"""
An ``Instance`` that has some attached text, typically either a sentence
or a logical form. This is called a ``TextInstance`` because the
individual tokens here are encoded as strings, and we can
get a list of strings out when we ask what words show up in the instance.
We use these kinds of instances to fit a ``DataIndexer`` (i.e., deciding
which words should be mapped to an unknown token); to use them in training
or testing, we need to first convert them into ``IndexedInstances``.
In order to actually convert text into some kind of indexed sequence,
we rely on a ``TextEncoder``. There are several ``TextEncoder`` subclasses,
that will let you use word token sequences, character sequences, and other
options. By default we use word tokens. You can override this by setting
the ``encoder`` class variable.
"""
tokenizer = tokenizers['words'](Params({}))
def __init__(self, label, index: int=None):
super(TextInstance, self).__init__(label, index)
def _words_from_text(self, text: str) -> Dict[str, List[str]]:
return self.tokenizer.get_words_for_indexer(text)
def _index_text(self, text: str, data_indexer: DataIndexer) -> List[int]:
return self.tokenizer.index_text(text, data_indexer)
def words(self) -> Dict[str, List[str]]:
"""
Returns a list of all of the words in this instance, contained in a
namespace dictionary.
This is mainly used for computing word counts when fitting a word
vocabulary on a dataset. The namespace dictionary allows you to have
several embedding matrices with different vocab sizes, e.g., for words
and for characters (in fact, words and characters are the only use
cases I can think of for now, but this allows you to do other more
crazy things if you want). You can call the namespaces whatever you
want, but if you want the ``DataIndexer`` to work correctly without
namespace arguments, you should use the key 'words' to represent word
tokens.
Returns
-------
namespace : Dictionary of {str: List[str]}
The ``str`` key refers to vocabularies, and the ``List[str]``
should contain the tokens in that vocabulary. For example, you
should use the key ``words`` to represent word tokens, and the
correspoding value in the dictionary would be a list of all the
words in the instance.
"""
raise NotImplementedError
def to_indexed_instance(self, data_indexer: DataIndexer) -> 'IndexedInstance':
"""
Converts the words in this ``Instance`` into indices using
the ``DataIndexer``.
Parameters
----------
data_indexer : DataIndexer
``DataIndexer`` to use in converting the ``Instance`` to
an ``IndexedInstance``.
Returns
-------
indexed_instance : IndexedInstance
A ``TextInstance`` that has had all of its strings converted into
indices.
"""
raise NotImplementedError
@classmethod
def read_from_line(cls, line: str):
"""
Reads an instance of this type from a line.
Parameters
----------
line : str
A line from a data file.
Returns
-------
indexed_instance : IndexedInstance
A ``TextInstance`` that has had all of its strings converted into
indices.
Notes
-----
We throw a ``RuntimeError`` here instead of a ``NotImplementedError``,
because it's not expected that all subclasses will implement this.
"""
# pylint: disable=unused-argument
raise RuntimeError("%s instances can't be read from a line!" % str(cls))
class IndexedInstance(Instance):
"""
An indexed data instance has all word tokens replaced with word indices,
along with some kind of label, suitable for input to a Keras model. An
``IndexedInstance`` is created from an ``Instance`` using a
``DataIndexer``, and the indices here have no recoverable meaning without
the ``DataIndexer``.
For example, we might have the following ``Instance``:
- ``TrueFalseInstance('Jamie is nice, Holly is mean', True, 25)``
After being converted into an ``IndexedInstance``, we might have
the following:
- ``IndexedTrueFalseInstance([1, 6, 7, 1, 6, 8], True, 25)``
This would mean that ``"Jamie"`` and ``"Holly"`` were OOV to the
``DataIndexer``, and the other words were given indices.
"""
@classmethod
def empty_instance(cls):
"""
Returns an empty, unpadded instance of this class. Necessary for option
padding in multiple choice instances.
"""
raise NotImplementedError
def get_padding_lengths(self) -> Dict[str, int]:
"""
Returns the length of this instance in all dimensions that require padding.
Different kinds of instances have different fields that are padded, such as sentence
length, number of background sentences, number of options, etc.
Returns
-------
padding_lengths: Dict[str, int]
A dictionary mapping padding keys (like "num_sentence_words") to lengths.
"""
raise NotImplementedError
def pad(self, padding_lengths: Dict[str, int]):
"""
Add zero-padding to make each data example of equal length for use
in the neural network.
This modifies the current object.
Parameters
----------
padding_lengths: Dict[str, int]
In this dictionary, each ``str`` refers to a type of token (e.g.
``num_sentence_words``), and the corresponding ``int`` is the value. This dictionary
must have the same keys as was returned by
:func:`~IndexedInstance.get_padding_lengths()`. We will use these lengths to pad the
instance in all of the necessary dimensions to the given leangths.
"""
raise NotImplementedError
def as_training_data(self):
"""
Convert this ``IndexedInstance`` to NumPy arrays suitable for use as
training data to Keras models.
Returns
-------
train_data : (inputs, label)
The ``IndexedInstance`` as NumPy arrays to be uesd in Keras.
Note that ``inputs`` might itself be a complex tuple, depending
on the ``Instance`` type.
"""
raise NotImplementedError
@staticmethod
def _get_word_sequence_lengths(word_indices: List) -> Dict[str, int]:
"""
Because ``TextEncoders`` can return complex data structures, we might
actually have several things to pad for a single word sequence. We
check for that and handle it in a single spot here. We return a
dictionary containing 'num_sentence_words', which is the number of
words in word_indices. If the word representations also contain
characters, the dictionary additionally contains a
'num_word_characters' key, with a value corresponding to the longest
word in the sequence.
"""
padding_lengths = {'num_sentence_words': len(word_indices)}
if len(word_indices) > 0 and not isinstance(word_indices[0], int):
if isinstance(word_indices[0], list):
padding_lengths['num_word_characters'] = max([len(word) for word in word_indices])
# There might someday be other cases we're missing here, but we'll punt for now.
return padding_lengths
@staticmethod
def pad_word_sequence(word_sequence: List[int],
padding_lengths: Dict[str, int],
truncate_from_right: bool=True) -> List:
"""
Take a list of indices and pads them.
Parameters
----------
word_sequence : List of int
A list of word indices.
padding_lengths : Dict[str, int]
In this dictionary, each ``str`` refers to a type of token (e.g.
``num_sentence_words``), and the corresponding ``int`` is the value. This dictionary
must have the same dimension as was returned by
:func:`~IndexedInstance.get_padding_lengths()`. We will use these lengths to pad the
instance in all of the necessary dimensions to the given leangths.
truncate_from_right : bool, default=True
If truncating the indices is necessary, this parameter dictates whether we do so on the
left or right.
Returns
-------
padded_word_sequence : List of int
A padded list of word indices.
Notes
-----
The reason we truncate from the right by default is for cases that are questions, with long
set ups. We at least want to get the question encoded, which is always at the end, even if
we've lost much of the question set up. If you want to truncate from the other direction,
you can.
TODO(matt): we should probably switch the default to truncate from the left, and clear up
the naming here - it's easy to get confused about what "truncate from right" means.
"""
default_value = lambda: 0
if 'num_word_characters' in padding_lengths:
default_value = lambda: []
padded_word_sequence = IndexedInstance.pad_sequence_to_length(
word_sequence, padding_lengths['num_sentence_words'], default_value, truncate_from_right)
if 'num_word_characters' in padding_lengths:
desired_length = padding_lengths['num_word_characters']
longest_word = max(padded_word_sequence, key=len)
if desired_length > len(longest_word):
# since we want to pad to greater than the longest word, we add a
# "dummy word" to get the speed of itertools.zip_longest
padded_word_sequence.append([0]*desired_length)
# pad the list of lists to the longest sublist, appending 0's
words_padded_to_longest = list(zip(*itertools.zip_longest(*padded_word_sequence,
fillvalue=0)))
if desired_length > len(longest_word):
# now we remove the "dummy word" if we appended one.
words_padded_to_longest.pop()
# now we need to truncate all of them to our desired length.
# since truncate_from_right is always False, we chop off starting from
# the right.
padded_word_sequence = [list(word[:desired_length])
for word in words_padded_to_longest]
return padded_word_sequence
@staticmethod
def pad_sequence_to_length(sequence: List,
desired_length: int,
default_value: Callable[[], Any]=lambda: 0,
truncate_from_right: bool=True) -> List:
"""
Take a list of indices and pads them to the desired length.
Parameters
----------
word_sequence : List of int
A list of word indices.
desired_length : int
Maximum length of each sequence. Longer sequences
are truncated to this length, and shorter ones are padded to it.
default_value: Callable, default=lambda: 0
Callable that outputs a default value (of any type) to use as
padding values.
truncate_from_right : bool, default=True
If truncating the indices is necessary, this parameter dictates
whether we do so on the left or right.
Returns
-------
padded_word_sequence : List of int
A padded or truncated list of word indices.
Notes
-----
The reason we truncate from the right by default is for
cases that are questions, with long set ups. We at least want to get
the question encoded, which is always at the end, even if we've lost
much of the question set up. If you want to truncate from the other
direction, you can.
"""
if truncate_from_right:
truncated = sequence[-desired_length:]
else:
truncated = sequence[:desired_length]
if len(truncated) < desired_length:
# If the length of the truncated sequence is less than the desired
# length, we need to pad.
padding_sequence = [default_value()] * (desired_length - len(truncated))
if truncate_from_right:
# When we truncate from the right, we add zeroes to the front.
padding_sequence.extend(truncated)
return padding_sequence
else:
# When we do not truncate from the right, we add zeroes to the end.
truncated.extend(padding_sequence)
return truncated
return truncated
| deep_qa-master | deep_qa/data/instances/instance.py |
from .instance import Instance, TextInstance, IndexedInstance
| deep_qa-master | deep_qa/data/instances/__init__.py |
from .pretokenized_tagging_instance import PreTokenizedTaggingInstance
from .tagging_instance import TaggingInstance, IndexedTaggingInstance
concrete_instances = { # pylint: disable=invalid-name
'PreTokenizedTaggingInstance': PreTokenizedTaggingInstance,
}
| deep_qa-master | deep_qa/data/instances/sequence_tagging/__init__.py |
from typing import Dict, List, Any
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class TaggingInstance(TextInstance):
"""
A ``TaggingInstance`` represents a passage of text and a tag sequence over that text.
There are some sticky issues with tokenization and how exactly the label is specified. For
example, if your label is a sequence of tags, that assumes a particular tokenization, which
interacts in a funny way with our tokenization code. This is a general superclass containing
common functionality for most simple sequence tagging tasks. The specifics of reading in data
from a file and converting that data into properly-indexed tag sequences is left to subclasses.
"""
def __init__(self, text: str, label: Any, index: int=None):
super(TaggingInstance, self).__init__(label, index)
self.text = text
def __str__(self):
return "TaggedSequenceInstance(" + self.text + ", " + str(self.label) + ")"
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.text)
words['tags'] = self.tags_in_label()
return words
def tags_in_label(self):
"""
Returns all of the tag words in this instance, so that we can convert them into indices.
This is called in ``self.words()``. Not necessary if you have some pre-indexed labeling
scheme.
"""
raise NotImplementedError
def _index_label(self, label: Any, data_indexer: DataIndexer) -> List[int]:
"""
Index the labels. Since we don't know what form the label takes, we leave it to subclasses
to implement this method. If you need to convert tag names into indices, use the namespace
'tags' in the ``DataIndexer``.
"""
raise NotImplementedError
def to_indexed_instance(self, data_indexer: DataIndexer):
text_indices = self._index_text(self.text, data_indexer)
label_indices = self._index_label(self.label, data_indexer)
assert len(text_indices) == len(label_indices), "Tokenization is off somehow"
return IndexedTaggingInstance(text_indices, label_indices, self.index)
class IndexedTaggingInstance(IndexedInstance):
def __init__(self, text_indices: List[int], label: List[int], index: int=None):
super(IndexedTaggingInstance, self).__init__(label, index)
self.text_indices = text_indices
@classmethod
@overrides
def empty_instance(cls):
return TaggingInstance([], label=None, index=None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return self._get_word_sequence_lengths(self.text_indices)
@overrides
def pad(self, padding_lengths: Dict[str, int]):
self.text_indices = self.pad_word_sequence(self.text_indices, padding_lengths,
truncate_from_right=False)
self.label = self.pad_sequence_to_length(self.label,
desired_length=padding_lengths['num_sentence_words'],
default_value=lambda: self.label[0],
truncate_from_right=False)
@overrides
def as_training_data(self):
text_array = numpy.asarray(self.text_indices, dtype='int32')
label_array = numpy.asarray(self.label, dtype='int32')
return text_array, label_array
| deep_qa-master | deep_qa/data/instances/sequence_tagging/tagging_instance.py |
from typing import List
import numpy
from overrides import overrides
from .tagging_instance import TaggingInstance
from ...data_indexer import DataIndexer
class PreTokenizedTaggingInstance(TaggingInstance):
"""
This is a ``TaggingInstance`` where the text has been pre-tokenized. Thus the ``text`` member
variable here is actually a ``List[str]``, instead of a ``str``.
When using this ``Instance``, you `must` use the ``NoOpWordSplitter`` as well, or things will
break. You probably also do not want any kind of filtering (though stemming is ok), because
only the words will get filtered, not the labels.
"""
def __init__(self, text: List[str], label: List[str], index: int=None):
super(PreTokenizedTaggingInstance, self).__init__(text, label, index)
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Reads a ``PreTokenizedTaggingInstance`` from a line. The format has one of two options:
1. [example index][token1]###[tag1][tab][token2]###[tag2][tab]...
2. [token1]###[tag1][tab][token2]###[tag2][tab]...
"""
fields = line.split("\t")
if fields[0].isdigit():
index = int(fields[0])
fields = fields[1:]
else:
index = None
tokens = []
tags = []
for field in fields:
token, tag = field.rsplit("###", 1)
tokens.append(token)
tags.append(tag)
return cls(tokens, tags, index)
@overrides
def tags_in_label(self):
return [tag for tag in self.label]
@overrides
def _index_label(self, label: List[str], data_indexer: DataIndexer) -> List[int]:
tag_indices = [data_indexer.get_word_index(tag, namespace='tags') for tag in label]
indexed_label = []
for tag_index in tag_indices:
# We subtract 2 here to account for the unknown and padding tokens that the DataIndexer
# uses.
tag_one_hot = numpy.zeros(data_indexer.get_vocab_size(namespace='tags') - 2)
tag_one_hot[tag_index - 2] = 1
indexed_label.append(tag_one_hot)
return indexed_label
| deep_qa-master | deep_qa/data/instances/sequence_tagging/pretokenized_tagging_instance.py |
from overrides import overrides
from .sentence_pair_instance import SentencePairInstance
class SnliInstance(SentencePairInstance):
"""
An SnliInstance is a SentencePairInstance that represents a pair of (text, hypothesis) from the
Stanford Natural Language Inference (SNLI) dataset, with an associated label. The main thing
we need to add here is handling of the label, because there are a few different ways we can use
this Instance.
The label can either be a three-way decision (one of either "entails", "contradicts", or
"neutral"), or a binary decision (grouping either "entails" and "contradicts", for relevance
decisions, or "contradicts" and "neutral", for entails/not entails decisions.
The input label must be one of the strings in the label_mapping field below. The difference
between the ``*_softmax`` and ``*_sigmoid`` labels are just for implementation reasons. A softmax over
two dimensions is exactly equivalent to a sigmoid, but to make our lives easier in building
models, sometimes we use a sigmoid and sometimes we use a softmax over two dimensions. Having
separate labels for these cases makes it easier to use this data in whatever kind of model you
want.
It might make sense to push this difference more generally into some common place, so that we
can separate the label itself from how it's encoded for training. But that might also be
complicated to implement, and it's not needed right now. TODO(matt): if we find ourselves
doing this kind of thing in several places, we should think about making that change.
"""
label_mapping = {
"entails": [1, 0, 0],
"contradicts": [0, 1, 0],
"neutral": [0, 0, 1],
"attention_true": [1],
"attention_false": [0],
"entails_softmax": [0, 1],
"not_entails_softmax": [1, 0],
"entails_sigmoid": [1],
"not_entails_sigmoid": [0],
}
def __init__(self, text: str, hypothesis: str, label: str, index: int=None):
# This intentionally crashes if `label` is not one of the keys in `label_mapping`.
super(SnliInstance, self).__init__(text, hypothesis, self.label_mapping[label], index)
def __str__(self):
return 'SnliInstance(' + self.first_sentence + ', ' + self.second_sentence + ', ' + str(self.label) + ')'
def to_attention_instance(self):
"""
This returns a new SnliInstance with a different label.
"""
if self.label == self.label_mapping["entails"] or self.label == self.label_mapping["contradicts"]:
new_label = "attention_true"
elif self.label == self.label_mapping["neutral"]:
new_label = "attention_false"
else:
raise RuntimeError("Can't convert " + str(self.label) + " to an attention label")
return SnliInstance(self.first_sentence, self.second_sentence, new_label, self.index)
def to_entails_instance(self, activation: str):
"""
This returns a new SnliInstance with a different label. The new label will be binary
(entails / not entails), but we need to distinguish between two different label types.
Sometimes we need the label to be encoded in a single dimension (i.e., either `0` or `1`),
and sometimes we need it to be encoded in two dimensions (i.e., either `[0, 1]` or `[1,
0]`). This depends on the activation function of the final layer in our network - a
sigmoid activation will need the former, while a softmax activation will need the later.
So, we encode these differently, as strings, which will be converted to the right array
later, in IndexedSnliInstance.
"""
if self.label == self.label_mapping["entails"]:
new_label = "entails"
elif self.label == self.label_mapping["neutral"] or self.label == self.label_mapping["contradicts"]:
new_label = "not_entails"
else:
raise RuntimeError("Can't convert " + str(self.label) + " to an entails/not-entails label")
new_label += '_' + activation
return SnliInstance(self.first_sentence, self.second_sentence, new_label, self.index)
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Reads an SnliInstance object from a line. The format has one of two options:
(1) [example index][tab][text][tab][hypothesis][tab][label]
(2) [text][tab][hypothesis][tab][label]
[label] is assumed to be one of "entails", "contradicts", or "neutral".
"""
fields = line.split("\t")
if len(fields) == 4:
index_string, text, hypothesis, label = fields
index = int(index_string)
elif len(fields) == 3:
text, hypothesis, label = fields
index = None
else:
raise RuntimeError("Unrecognized line format: " + line)
return cls(text, hypothesis, label, index)
| deep_qa-master | deep_qa/data/instances/entailment/snli_instance.py |
from .sentence_pair_instance import SentencePairInstance, IndexedSentencePairInstance
from .snli_instance import SnliInstance
| deep_qa-master | deep_qa/data/instances/entailment/__init__.py |
from typing import Dict, List
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class SentencePairInstance(TextInstance):
"""
SentencePairInstance contains a labeled pair of instances accompanied by a binary label. You
could have the label represent whatever you want, such as entailment, or occuring in the same
context, or whatever.
"""
def __init__(self, first_sentence: str, second_sentence: str, label: List[int], index: int=None):
super(SentencePairInstance, self).__init__(label, index)
self.first_sentence = first_sentence
self.second_sentence = second_sentence
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.first_sentence)
second_sentence_words = self._words_from_text(self.second_sentence)
for namespace in words:
words[namespace].extend(second_sentence_words[namespace])
return words
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
first_sentence = self._index_text(self.first_sentence, data_indexer)
second_sentence = self._index_text(self.second_sentence, data_indexer)
return IndexedSentencePairInstance(first_sentence, second_sentence, self.label, self.index)
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Expected format:
[sentence1][tab][sentence2][tab][label]
"""
fields = line.split("\t")
first_sentence, second_sentence, label = fields
return cls(first_sentence, second_sentence, [int(label)])
class IndexedSentencePairInstance(IndexedInstance):
"""
This is an indexed instance that is commonly used for labeled sentence pairs. Examples of this are
SnliInstances where we have a labeled pair of text and hypothesis, and a sentence2vec instance where the
objective is to train an encoder to predict whether the sentences are in context or not.
"""
def __init__(self, first_sentence_indices: List[int], second_sentence_indices: List[int], label: List[int],
index: int=None):
super(IndexedSentencePairInstance, self).__init__(label, index)
self.first_sentence_indices = first_sentence_indices
self.second_sentence_indices = second_sentence_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedSentencePairInstance([], [], label=None, index=None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
first_sentence_lengths = self._get_word_sequence_lengths(self.first_sentence_indices)
second_sentence_lengths = self._get_word_sequence_lengths(self.second_sentence_indices)
lengths = {}
for key in first_sentence_lengths:
lengths[key] = max(first_sentence_lengths[key], second_sentence_lengths[key])
return lengths
@overrides
def pad(self, padding_lengths: Dict[str, int]):
self.first_sentence_indices = self.pad_word_sequence(self.first_sentence_indices, padding_lengths)
self.second_sentence_indices = self.pad_word_sequence(self.second_sentence_indices, padding_lengths)
@overrides
def as_training_data(self):
first_sentence_array = numpy.asarray(self.first_sentence_indices, dtype='int32')
second_sentence_array = numpy.asarray(self.second_sentence_indices, dtype='int32')
return (first_sentence_array, second_sentence_array), numpy.asarray(self.label)
| deep_qa-master | deep_qa/data/instances/entailment/sentence_pair_instance.py |
from typing import Dict, List, Tuple
import numpy as np
from overrides import overrides
from .question_passage_instance import IndexedQuestionPassageInstance, QuestionPassageInstance
from ...data_indexer import DataIndexer
class McQuestionPassageInstance(QuestionPassageInstance):
"""
A McQuestionPassageInstance is a QuestionPassageInstance that represents a (question,
passage, answer_options) tuple from the McQuestionPassageInstance dataset, with an
associated label indicating the index of the correct answer choice.
"""
def __init__(self,
question: str,
passage: str,
answer_options: List[str],
label: int,
index: int=None):
super(McQuestionPassageInstance, self).__init__(question, passage, label, index)
self.answer_options = answer_options
def __str__(self):
return ('McQuestionPassageInstance({}, {}, {}, {})'.format(self.question_text,
self.passage_text,
'|'.join(self.answer_options),
str(self.label)))
@overrides
def words(self) -> Dict[str, List[str]]:
words = super(McQuestionPassageInstance, self).words()
for option in self.answer_options:
option_words = self._words_from_text(option)
for namespace in words:
words[namespace].extend(option_words[namespace])
return words
@overrides
def _index_label(self, label: Tuple[int, int]) -> List[int]:
"""
Specify how to index `self.label`, which is needed to convert the
McQuestionPassageInstance into an IndexedInstance (conversion handled in superclass).
"""
return self.label
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
question_indices = self._index_text(self.question_text, data_indexer)
passage_indices = self._index_text(self.passage_text, data_indexer)
option_indices = [self._index_text(option, data_indexer) for option in
self.answer_options]
return IndexedMcQuestionPassageInstance(question_indices, passage_indices,
option_indices, self.label, self.index)
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Reads a McQuestionPassageInstance object from a line. The format has one of two options:
(1) [example index][tab][passage][tab][question][tab][options][tab][label]
(2) [passage][tab][question][tab][options][tab][label]
The ``answer_options`` column is assumed formatted as: ``[option]###[option]###[option]...``
That is, we split on three hashes (``"###"``).
"""
fields = line.split("\t")
if len(fields) == 5:
index_string, passage, question, options, label_string = fields
index = int(index_string)
elif len(fields) == 4:
passage, question, options, label_string = fields
index = None
else:
raise RuntimeError("Unrecognized line format: " + line)
# get the answer options
answer_options = options.split("###")
label = int(label_string)
return cls(question, passage, answer_options, label, index)
class IndexedMcQuestionPassageInstance(IndexedQuestionPassageInstance):
def __init__(self,
question_indices: List[int],
passage_indices: List[int],
option_indices: List[List[int]],
label: List[int],
index: int=None):
super(IndexedMcQuestionPassageInstance, self).__init__(question_indices,
passage_indices,
label, index)
self.option_indices = option_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedQuestionPassageInstance([], [], [[]], None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
We need to pad the answer option length (in words), the number of answer
options, the question length (in words), the passage length (in words),
and the word length (in characters) among all the questions, passages,
and answer options.
"""
option_lengths = [self._get_word_sequence_lengths(option) for option in self.option_indices]
lengths = super(IndexedMcQuestionPassageInstance, self).get_padding_lengths()
# the number of options
lengths['num_options'] = len(self.option_indices)
# the number of words in the longest option
lengths['num_option_words'] = max([lengths['num_sentence_words'] for
lengths in option_lengths])
# the length of the longest word across the passage, question, and options
if 'num_word_characters' in option_lengths[0]:
# length of longest word (in characters) in options
max_option_word_length = max([lengths['num_word_characters'] for
lengths in option_lengths])
lengths['num_word_characters'] = max(lengths['num_word_characters'],
max_option_word_length)
return lengths
@overrides
def pad(self, padding_lengths: Dict[str, int]):
"""
In this function, we pad the questions and passages (in terms of number of words in each),
as well as the individual words in the questions and passages themselves. We also pad the
number of answer options, the answer options (in terms of numbers or words in each),
as well as the individual words in the answer options.
"""
super(IndexedMcQuestionPassageInstance, self).pad(padding_lengths)
# pad the number of options
num_options = padding_lengths['num_options']
while len(self.option_indices) < num_options:
self.option_indices.append([])
self.option_indices = self.option_indices[:num_options]
# pad the number of words in the options, number of characters in each word in option
padded_options = []
for indices in self.option_indices:
padding_lengths['num_sentence_words'] = padding_lengths['num_option_words']
padded_options.append(self.pad_word_sequence(indices, padding_lengths))
self.option_indices = padded_options
@overrides
def as_training_data(self):
question_array = np.asarray(self.question_indices, dtype='int32')
passage_array = np.asarray(self.passage_indices, dtype='int32')
options_array = np.asarray(self.option_indices, dtype='int32')
if self.label is None:
label = None
else:
label = np.zeros((len(self.option_indices)))
label[self.label] = 1
return (question_array, passage_array, options_array), label
| deep_qa-master | deep_qa/data/instances/reading_comprehension/mc_question_passage_instance.py |
from typing import Dict, List, Any
import numpy as np
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class QuestionPassageInstance(TextInstance):
"""
A QuestionPassageInstance is a base class for datasets that consist primarily of a question
text and a passage, where the passage contains the answer to the question. This class should
not be used directly due to the missing ``_index_label`` function, use a subclass instead.
"""
def __init__(self, question_text: str, passage_text: str, label: Any, index: int=None):
super(QuestionPassageInstance, self).__init__(label, index)
self.question_text = question_text
self.passage_text = passage_text
def __str__(self):
return ('QuestionPassageInstance(' + self.question_text +
', ' + self.passage_text + ', ' +
str(self.label) + ')')
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.question_text)
passage_words = self._words_from_text(self.passage_text)
for namespace in words:
words[namespace].extend(passage_words[namespace])
return words
def _index_label(self, label: Any) -> List[int]:
"""
Index the labels. Since we don't know what form the label takes,
we leave it to subclasses to implement this method.
"""
raise NotImplementedError
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
question_indices = self._index_text(self.question_text, data_indexer)
passage_indices = self._index_text(self.passage_text, data_indexer)
label_indices = self._index_label(self.label)
return IndexedQuestionPassageInstance(question_indices,
passage_indices, label_indices,
self.index)
class IndexedQuestionPassageInstance(IndexedInstance):
"""
This is an indexed instance that is used for (question, passage) pairs.
"""
def __init__(self,
question_indices: List[int],
passage_indices: List[int],
label: List[int],
index: int=None):
super(IndexedQuestionPassageInstance, self).__init__(label, index)
self.question_indices = question_indices
self.passage_indices = passage_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedQuestionPassageInstance([], [], label=None, index=None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
We need to pad at least the question length, the passage length, and the
word length across all the questions and passages. Subclasses that
add more arguments should also override this method to enable padding on said
arguments.
"""
question_lengths = self._get_word_sequence_lengths(self.question_indices)
passage_lengths = self._get_word_sequence_lengths(self.passage_indices)
lengths = {}
# the number of words to pad the question to
lengths['num_question_words'] = question_lengths['num_sentence_words']
# the number of words to pad the passage to
lengths['num_passage_words'] = passage_lengths['num_sentence_words']
if 'num_word_characters' in question_lengths and 'num_word_characters' in passage_lengths:
# the length of the longest word across the passage and question
lengths['num_word_characters'] = max(question_lengths['num_word_characters'],
passage_lengths['num_word_characters'])
return lengths
@overrides
def pad(self, padding_lengths: Dict[str, int]):
"""
In this function, we pad the questions and passages (in terms of number of words in each),
as well as the individual words in the questions and passages themselves.
"""
padding_lengths_tmp = padding_lengths.copy()
padding_lengths_tmp['num_sentence_words'] = padding_lengths_tmp['num_question_words']
self.question_indices = self.pad_word_sequence(self.question_indices, padding_lengths_tmp)
padding_lengths_tmp['num_sentence_words'] = padding_lengths_tmp['num_passage_words']
self.passage_indices = self.pad_word_sequence(self.passage_indices, padding_lengths_tmp,
truncate_from_right=False)
@overrides
def as_training_data(self):
question_array = np.asarray(self.question_indices, dtype='int32')
passage_array = np.asarray(self.passage_indices, dtype='int32')
return (question_array, passage_array), np.asarray(self.label)
| deep_qa-master | deep_qa/data/instances/reading_comprehension/question_passage_instance.py |
from .character_span_instance import CharacterSpanInstance, IndexedCharacterSpanInstance
from .mc_question_passage_instance import McQuestionPassageInstance, IndexedMcQuestionPassageInstance
from .question_passage_instance import QuestionPassageInstance, IndexedQuestionPassageInstance
| deep_qa-master | deep_qa/data/instances/reading_comprehension/__init__.py |
from typing import Tuple, List
import numpy
from overrides import overrides
from .question_passage_instance import QuestionPassageInstance, IndexedQuestionPassageInstance
from ...data_indexer import DataIndexer
class CharacterSpanInstance(QuestionPassageInstance):
"""
A CharacterSpanInstance is a QuestionPassageInstance that represents a (question, passage) pair
with an associated label, which is the data given for the span prediction task. The label is a
span of characters in the passage that indicates where the answer to the question begins and
where the answer to the question ends.
The main thing this class handles over QuestionPassageInstance is in specifying the form of and
how to index the label, which is given as a span of _characters_ in the passage. The label we
are going to use in the rest of the code is a span of _tokens_ in the passage, so the mapping
from character labels to token labels depends on the tokenization we did, and the logic to
handle this is, unfortunately, a little complicated. The label conversion happens when
converting a CharacterSpanInstance to in IndexedInstance (where character indices are generally
lost, anyway).
This class should be used to represent training instances for the SQuAD (Stanford Question
Answering) and NewsQA datasets, to name a few.
"""
# We add a special token to the end of the passage. This is because our span labels are
# end-exclusive, and we do a softmax over the passage to determine span end. So if we want to
# be able to include the last token of the passage, we need to have a special symbol at the
# end.
stop_token = "@@STOP@@"
def __init__(self, question: str, passage: str, label: Tuple[int, int], index: int=None):
super(CharacterSpanInstance, self).__init__(question, passage, label, index)
def __str__(self):
return ('CharacterSpanInstance(' + self.question_text + ', ' +
self.passage_text + ', ' + str(self.label) + ')')
@overrides
def _index_label(self, label: Tuple[int, int]) -> List[int]:
"""
Specify how to index `self.label`, which is needed to convert the CharacterSpanInstance
into an IndexedInstance (handled in superclass).
"""
if self.label is not None:
return self.tokenizer.char_span_to_token_span(self.passage_text, self.label)
return None
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Reads a CharacterSpanInstance object from a line. The format has one of two options:
(1) [example index][tab][question][tab][passage][tab][label]
(2) [question][tab][passage][tab][label]
[label] is assumed to be a comma-separated pair of integers.
"""
fields = line.split("\t")
if len(fields) == 4:
index_string, question, passage, label = fields
index = int(index_string)
elif len(fields) == 3:
question, passage, label = fields
index = None
else:
raise RuntimeError("Unrecognized line format (" + str(len(fields)) + " columns): " + line)
label_fields = label.split(",")
span_begin = int(label_fields[0])
span_end = int(label_fields[1])
return cls(question, passage, (span_begin, span_end), index)
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
instance = super(CharacterSpanInstance, self).to_indexed_instance(data_indexer)
stop_index = data_indexer.add_word_to_index(self.stop_token)
if isinstance(instance.passage_indices[0], list):
instance.passage_indices.append([stop_index])
else:
instance.passage_indices.append(stop_index)
return IndexedCharacterSpanInstance(instance.question_indices, instance.passage_indices,
instance.label, instance.index)
class IndexedCharacterSpanInstance(IndexedQuestionPassageInstance):
@overrides
def as_training_data(self):
input_arrays, _ = super(IndexedCharacterSpanInstance, self).as_training_data()
span_begin_label = span_end_label = None
if self.label is not None:
span_begin_label = numpy.zeros((len(self.passage_indices)))
span_end_label = numpy.zeros((len(self.passage_indices)))
span_begin_label[self.label[0]] = 1
span_end_label[self.label[1]] = 1
return input_arrays, (span_begin_label, span_end_label)
| deep_qa-master | deep_qa/data/instances/reading_comprehension/character_span_instance.py |
from .text_classification_instance import TextClassificationInstance, IndexedTextClassificationInstance
| deep_qa-master | deep_qa/data/instances/text_classification/__init__.py |
from typing import Dict, List
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class TextClassificationInstance(TextInstance):
"""
A TextClassificationInstance is a :class:`TextInstance` that is a single passage of text,
where that passage has some associated (categorical, or possibly real-valued) label.
"""
def __init__(self, text: str, label: bool, index: int=None):
"""
text: the text of this instance, typically either a sentence or a logical form.
"""
super(TextClassificationInstance, self).__init__(label, index)
self.text = text
def __str__(self):
return 'TextClassificationInstance(' + self.text + ', ' + str(self.label) + ')'
@overrides
def words(self) -> Dict[str, List[str]]:
return self._words_from_text(self.text)
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
indices = self._index_text(self.text, data_indexer)
return IndexedTextClassificationInstance(indices, self.label, self.index)
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Reads a TextClassificationInstance object from a line. The format has one of four options:
(1) [sentence]
(2) [sentence index][tab][sentence]
(3) [sentence][tab][label]
(4) [sentence index][tab][sentence][tab][label]
If no label is given, we use ``None`` as the label.
"""
fields = line.split("\t")
if len(fields) == 3:
index, text, label_string = fields
label = label_string == '1'
return cls(text, label, int(index))
elif len(fields) == 2:
if fields[0].isdecimal():
index, text = fields
return cls(text, None, int(index))
elif fields[1].isdecimal():
text, label_string = fields
label = label_string == '1'
return cls(text, label)
else:
raise RuntimeError("Unrecognized line format: " + line)
elif len(fields) == 1:
text = fields[0]
return cls(text, None)
else:
raise RuntimeError("Unrecognized line format: " + line)
class IndexedTextClassificationInstance(IndexedInstance):
def __init__(self, word_indices: List[int], label, index: int=None):
super(IndexedTextClassificationInstance, self).__init__(label, index)
self.word_indices = word_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedTextClassificationInstance([], label=None, index=None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return self._get_word_sequence_lengths(self.word_indices)
@overrides
def pad(self, padding_lengths: Dict[str, int]):
self.word_indices = self.pad_word_sequence(self.word_indices, padding_lengths)
@overrides
def as_training_data(self):
word_array = numpy.asarray(self.word_indices, dtype='int32')
if self.label is True:
label = numpy.zeros((2))
label[1] = 1
elif self.label is False:
label = numpy.zeros((2))
label[0] = 1
else:
label = None
return word_array, label
| deep_qa-master | deep_qa/data/instances/text_classification/text_classification_instance.py |
from typing import Dict, List
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class SentenceInstance(TextInstance):
"""
A ``SentenceInstance`` is a :class:`TextInstance` that is a single passage of text, with no
associated label. The label is the passage itself offset by one, because we will use this in a
language modeling context, to predict the next word in the passage given the previous words.
"""
def __init__(self, text: str, index: int=None):
super(SentenceInstance, self).__init__(None, index)
self.text = text
def __str__(self):
return 'SentenceInstance(' + self.text + ')'
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.text)
words['words'].extend(['<S>', '</S>'])
return self._words_from_text(self.text)
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
indices = self._index_text(self.text, data_indexer)
# We'll add start and end symbols to the indices here, then split this into an input
# sequence and an output sequence, offset by one, where the input has the start token, and
# the output has the end token.
start_index = data_indexer.get_word_index('<S>')
end_index = data_indexer.get_word_index('</S>')
if isinstance(indices[0], list):
indices = [[start_index]] + indices + [[end_index]]
else:
indices = [start_index] + indices + [end_index]
word_indices = indices[:-1]
label_indices = indices[1:]
if isinstance(label_indices[0], list):
label_indices = [x[0] for x in label_indices]
return IndexedSentenceInstance(word_indices, label_indices, self.index)
@classmethod
@overrides
def read_from_line(cls, line: str):
"""
Reads a SentenceInstance object from a line. The format has one of two options:
(1) [sentence]
(2) [sentence index][tab][sentence]
"""
fields = line.split("\t")
if len(fields) == 2:
index, text = fields
return cls(text, int(index))
elif len(fields) == 1:
text = fields[0]
return cls(text, None)
else:
raise RuntimeError("Unrecognized line format: " + line)
class IndexedSentenceInstance(IndexedInstance):
def __init__(self, word_indices: List[int], label_indices: List[int], index: int=None):
super(IndexedSentenceInstance, self).__init__(label_indices, index)
self.word_indices = word_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedSentenceInstance([], [], index=None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
# len(label_indices) == len(word_indices), so we only need to return this one length.
return self._get_word_sequence_lengths(self.word_indices)
@overrides
def pad(self, padding_lengths: Dict[str, int]):
self.word_indices = self.pad_word_sequence(self.word_indices, padding_lengths)
self.label = self.pad_sequence_to_length(self.label, padding_lengths['num_sentence_words'])
@overrides
def as_training_data(self):
word_array = numpy.asarray(self.word_indices, dtype='int32')
label_array = numpy.asarray(self.label, dtype='int32')
# The expand dims here is because Keras' sparse categorical cross entropy expects tensors
# of shape (batch_size, num_words, 1).
return word_array, numpy.expand_dims(label_array, axis=2)
| deep_qa-master | deep_qa/data/instances/language_modeling/sentence_instance.py |
from .sentence_instance import SentenceInstance, IndexedSentenceInstance
| deep_qa-master | deep_qa/data/instances/language_modeling/__init__.py |
# pylint: disable=invalid-name,no-self-use
import pyhocon
from deep_qa.common.params import Params, replace_none
from deep_qa.models import concrete_models
from deep_qa.testing.test_case import DeepQaTestCase
class TestExampleExperiments(DeepQaTestCase):
def setUp(self):
super(TestExampleExperiments, self).setUp()
self.write_pretrained_vector_files()
self.example_experiments_dir = "./example_experiments"
self.entailment_dir = self.example_experiments_dir + "/entailment/"
self.reading_comprehension_dir = self.example_experiments_dir + "/reading_comprehension/"
self.sequence_tagging_dir = self.example_experiments_dir + "/sequence_tagging/"
def test_entailment_examples_can_train(self):
self.write_snli_files()
snli_decomposable_attention = self.entailment_dir + "snli_decomposable_attention.json"
self.check_experiment_type_can_train(snli_decomposable_attention)
def test_bidaf_can_train(self):
self.write_span_prediction_files()
bidaf_squad = self.reading_comprehension_dir + "bidaf_squad.json"
self.check_experiment_type_can_train(bidaf_squad)
def test_ga_reader_can_train(self):
self.write_who_did_what_files()
gareader_who_did_what = self.reading_comprehension_dir + "gareader_who_did_what.json"
self.check_experiment_type_can_train(gareader_who_did_what)
def test_as_reader_can_train(self):
self.write_who_did_what_files()
as_reader_who_did_what = self.reading_comprehension_dir + "asreader_who_did_what.json"
self.check_experiment_type_can_train(as_reader_who_did_what)
def test_simple_tagger_can_train(self):
self.write_sequence_tagging_files()
simple_tagger = self.sequence_tagging_dir + "simple_tagger.json"
self.check_experiment_type_can_train(simple_tagger)
def check_experiment_type_can_train(self, param_file):
param_dict = pyhocon.ConfigFactory.parse_file(param_file)
params = Params(replace_none(param_dict))
model_class = concrete_models[params.pop("model_class")]
# Tests will try to create root directories as we have /net/efs paths,
# so we just remove the serialisation aspect here, alter the train/validation
# paths to the dummy test ones and make sure we only do one epoch to
# speed things up.
params["model_serialization_prefix"] = None
if len(params["train_files"]) > 1:
params["train_files"] = [self.TRAIN_FILE, self.TRAIN_BACKGROUND]
params["validation_files"] = [self.VALIDATION_FILE, self.VALIDATION_BACKGROUND]
else:
params["train_files"] = [self.TRAIN_FILE]
params["validation_files"] = [self.TRAIN_FILE]
params["num_epochs"] = 1
try:
if params["embeddings"]["words"]["pretrained_file"]:
params["embeddings"]["words"]["pretrained_file"] = self.PRETRAINED_VECTORS_GZIP
except KeyError:
# No embedding/words field passed in the parameters,
# so nothing to change.
pass
model = self.get_model(model_class, params)
model.train()
| deep_qa-master | tests/example_experiments_test.py |
deep_qa-master | tests/__init__.py |
|
# pylint: disable=invalid-name,no-self-use
import json
import os
import numpy
from numpy.testing import assert_almost_equal
from deep_qa.run import compute_accuracy
from deep_qa.run import run_model_from_file, load_model, evaluate_model
from deep_qa.run import score_dataset, score_dataset_with_ensemble
from deep_qa.testing.test_case import DeepQaTestCase
class TestRun(DeepQaTestCase):
# Our point here is mostly just to make sure the scripts don't crash.
def setUp(self):
super(TestRun, self).setUp()
self.write_true_false_model_files()
model_params = self.get_model_params({"model_class": "ClassificationModel",
'save_models': True})
self.param_path = os.path.join(self.TEST_DIR, "params.json")
with open(self.param_path, "w") as file_path:
json.dump(model_params.as_dict(), file_path)
def test_run_model_does_not_crash(self):
run_model_from_file(self.param_path)
def test_load_model_does_not_crash(self):
run_model_from_file(self.param_path)
loaded_model = load_model(self.param_path)
assert loaded_model.can_train()
def test_score_dataset_does_not_crash(self):
run_model_from_file(self.param_path)
score_dataset(self.param_path, [self.TEST_FILE])
def test_evalaute_model_does_not_crash(self):
run_model_from_file(self.param_path)
evaluate_model(self.param_path, [self.TEST_FILE])
def test_score_dataset_with_ensemble_gives_same_predictions_as_score_dataset(self):
# We're just going to test something simple here: that the methods don't crash, and that we
# get the same result with an ensemble of one model that we do with `score_dataset`.
run_model_from_file(self.param_path)
predictions, _ = score_dataset(self.param_path, [self.TEST_FILE])
ensembled_predictions, _ = score_dataset_with_ensemble([self.param_path], [self.TEST_FILE])
assert_almost_equal(predictions, ensembled_predictions)
def test_compute_accuracy_computes_a_correct_metric(self):
predictions = numpy.asarray([[.5, .5, .6], [.1, .4, .0]])
labels = numpy.asarray([[1, 0, 0], [0, 1, 0]])
assert compute_accuracy(predictions, labels) == .5
| deep_qa-master | tests/run_test.py |
# pylint: disable=no-self-use
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from keras.layers import Input
from keras.models import Model
from deep_qa.layers import Overlap
class TestOverlap:
def test_batched_case(self):
tensor_a_len = 5
tensor_b_len = 4
tensor_a_input = Input(shape=(tensor_a_len,),
dtype='int32',
name="tensor_a")
tensor_b_input = Input(shape=(tensor_b_len,),
dtype='int32',
name="tensor_b")
overlap_output = Overlap()([tensor_a_input,
tensor_b_input])
model = Model([tensor_a_input,
tensor_b_input],
overlap_output)
tensor_a = numpy.array([[1, 3, 4, 8, 2], [2, 8, 1, 2, 3]])
tensor_b = numpy.array([[9, 4, 2, 5], [6, 1, 2, 2]])
expected_output = numpy.array([[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0]]])
# Testing the general batched case
result = model.predict([tensor_a, tensor_b])
assert_almost_equal(result, expected_output)
def test_masked_batched_case(self):
tensor_a = K.variable(numpy.array([[1, 3, 4, 8, 2], [2, 8, 1, 2, 3]]),
dtype="int32")
tensor_b = K.variable(numpy.array([[9, 4, 2, 5], [6, 1, 2, 2]]),
dtype="int32")
mask_a = K.variable(numpy.array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0]]))
mask_b = K.variable(numpy.array([[1, 1, 0, 0], [1, 1, 0, 0]]))
expected_output = numpy.array([[[1.0, 0.0], [1.0, 0.0],
[0.0, 1.0], [1.0, 0.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0],
[0.0, 1.0], [1.0, 0.0], [1.0, 0.0]]])
# Testing the masked general batched case
result = K.eval(Overlap()([tensor_a, tensor_b], mask=[mask_a, mask_b]))
assert_almost_equal(result, expected_output)
| deep_qa-master | tests/layers/overlap_test.py |
# pylint: disable=no-self-use
import numpy
from keras.layers import Input, Embedding, merge
from keras.models import Model
import keras.backend as K
from deep_qa.layers.encoders import AttentiveGru
class TestAttentiveGRU:
def test_on_unmasked_input(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
attention = Input(shape=(sentence_length,), dtype='float32')
# Embedding does not mask zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim)
attentive_gru = AttentiveGru(output_dim=embedding_dim,
input_length=sentence_length,
return_sequences=True,
name='attentive_gru_test')
embedded_input = embedding(input_layer)
concat_mode = lambda layer_outs: K.concatenate([K.expand_dims(layer_outs[0], axis=2),
layer_outs[1]],
axis=2)
combined_sentence_with_attention = merge([attention, embedded_input],
mode=concat_mode,
output_shape=(5, 11))
sequence_of_outputs = attentive_gru(combined_sentence_with_attention)
model = Model(inputs=[input_layer, attention], outputs=sequence_of_outputs)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 3, 1, 7, 10]], dtype='int32')
attention_input = numpy.asarray([[1., 0., 0., 0., 0.]], dtype='float32')
# To debug this model, we are going to check that if we pass an attention mask into
# the attentive_gru which has all zeros apart from the first element which is one,
# all the elements should be equal to the first output as the state won't change over
# time, as we add in none of the memory. This is not the intended use of this class,
# but if this works, the intended use will be correct.
actual_sequence_of_outputs = numpy.squeeze(model.predict([test_input, attention_input]))
for i in range(sentence_length - 1):
assert numpy.array_equal(actual_sequence_of_outputs[i, :], actual_sequence_of_outputs[i+1, :])
| deep_qa-master | tests/layers/attentive_gru_test.py |
# pylint: disable=no-self-use
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import keras.backend as K
from keras.layers import Input
from keras.models import Model
from deep_qa.common.checks import ConfigurationError
from deep_qa.layers import OptionAttentionSum
from deep_qa.testing.test_case import DeepQaTestCase
class TestOptionAttentionSum(DeepQaTestCase):
def test_mean_mode(self):
document_probabilities_length = 6
document_indices_length = document_probabilities_length
max_num_options = 3
max_num_words_per_option = 2
document_indices_input = Input(shape=(document_indices_length,),
dtype='int32',
name="document_indices_input")
document_probabilities_input = Input(shape=(document_probabilities_length,),
dtype='float32',
name="document_probabilities_input")
options_input = Input(shape=(max_num_options, max_num_words_per_option),
dtype='int32', name="options_input")
option_attention_sum_mean = OptionAttentionSum()([document_indices_input,
document_probabilities_input,
options_input])
model = Model([document_indices_input,
document_probabilities_input,
options_input],
option_attention_sum_mean)
document_indices = np.array([[1, 2, 3, 4, 1, 2]])
document_probabilities = np.array([[.1, .2, .3, .4, 0.01, 0.03]])
# Testing the general single-batch case.
options = np.array([[[1, 2], [3, 4], [1, 2]]])
result = model.predict([document_indices, document_probabilities, options])
assert_array_almost_equal(result, np.array([[0.17, 0.35, 0.17]]))
options = np.array([[[1, 1], [3, 1], [4, 2]]])
result = model.predict([document_indices, document_probabilities, options])
assert_array_almost_equal(result, np.array([[0.11, 0.205, 0.315]]))
# Testing the general batch case.
batch_document_indices = np.array([[1, 2, 3, 4, 1, 2], [1, 2, 3, 4, 1, 2]])
batch_document_probabilities = np.array([[.1, .2, .3, .4, 0.01, 0.03],
[.1, .2, .3, .4, 0.01, 0.03]])
batch_options = np.array([[[1, 2], [3, 4], [1, 2]], [[1, 1], [3, 1], [4, 2]]])
result = model.predict([batch_document_indices, batch_document_probabilities,
batch_options])
assert_array_almost_equal(result, np.array([[0.17, 0.35, 0.17],
[0.11, 0.205, 0.315]]))
def test_mean_mode_mask(self):
# Testing the general masked batched case.
document_indices = K.variable(np.array([[1, 2, 3, 4, 1, 2]]))
document_probabilities = K.variable(np.array([[.1, .2, .3, .4, 0.01, 0.03]]))
options = K.variable(np.array([[[1, 2, 1], [3, 4, 2], [4, 1, 0]]]))
option_attention_sum_mean = K.eval(OptionAttentionSum().call([document_indices,
document_probabilities,
options]))
assert_array_almost_equal(option_attention_sum_mean,
np.array([[0.14999999, 0.31000003, 0.255]]))
options = K.variable(np.array([[[1, 2, 1], [3, 4, 2], [0, 0, 0]]]))
option_attention_sum_mean = K.eval(OptionAttentionSum().call([document_indices,
document_probabilities,
options]))
assert_array_almost_equal(option_attention_sum_mean,
np.array([[0.14999999, 0.31000003, 0.0]]))
# Testing the masked batched case where input is all 0s.
options = K.variable(np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]))
option_attention_sum_mean = K.eval(OptionAttentionSum().call([document_indices,
document_probabilities,
options]))
assert_array_almost_equal(option_attention_sum_mean,
np.array([[0, 0, 0]]))
def test_sum_mode(self):
document_probabilities_length = 6
document_indices_length = document_probabilities_length
max_num_options = 3
max_num_words_per_option = 2
document_indices_input = Input(shape=(document_indices_length,),
dtype='int32',
name="document_indices_input")
document_probabilities_input = Input(shape=(document_probabilities_length,),
dtype='float32',
name="document_probabilities_input")
options_input = Input(shape=(max_num_options, max_num_words_per_option),
dtype='int32', name="options_input")
option_attention_sum_mean = OptionAttentionSum("sum")([document_indices_input,
document_probabilities_input,
options_input])
model = Model([document_indices_input,
document_probabilities_input,
options_input],
option_attention_sum_mean)
document_indices = np.array([[1, 2, 3, 4, 1, 2]])
document_probabilities = np.array([[.1, .2, .3, .4, 0.01, 0.03]])
# Testing the general single-batch case.
options = np.array([[[1, 2], [3, 4], [1, 2]]])
result = model.predict([document_indices, document_probabilities, options])
assert_array_almost_equal(result, np.array([[0.34, 0.70, 0.34]]))
options = np.array([[[1, 1], [3, 1], [4, 2]]])
result = model.predict([document_indices, document_probabilities, options])
assert_array_almost_equal(result, np.array([[0.22, 0.41, 0.63]]))
# Testing the general batch case
batch_document_indices = np.array([[1, 2, 3, 4, 1, 2], [1, 2, 3, 4, 1, 2]])
batch_document_probabilities = np.array([[.1, .2, .3, .4, 0.01, 0.03],
[.1, .2, .3, .4, 0.01, 0.03]])
batch_options = np.array([[[1, 2], [3, 4], [1, 2]], [[1, 1], [3, 1], [4, 2]]])
result = model.predict([batch_document_indices, batch_document_probabilities,
batch_options])
assert_array_almost_equal(result, np.array([[0.34, 0.70, 0.34],
[0.22, 0.41, 0.63]]))
def test_multiword_option_mode_validation(self):
self.assertRaises(ConfigurationError, OptionAttentionSum, "summean")
def test_compute_mask(self):
option_attention_sum = OptionAttentionSum()
result = option_attention_sum.compute_mask(["_", "_",
K.variable(np.array([[[1, 2, 0], [2, 3, 3],
[0, 0, 0], [0, 0, 0]]],
dtype="int32"))])
assert_array_equal(K.eval(result), np.array([[1, 1, 0, 0]]))
result = option_attention_sum.compute_mask(["_", "_",
K.variable(np.array([[[1, 2, 0], [1, 0, 0],
[0, 0, 0], [0, 0, 0]]],
dtype="int32"))])
assert_array_equal(K.eval(result), np.array([[1, 1, 0, 0]]))
result = option_attention_sum.compute_mask(["_", "_",
K.variable(np.array([[[1, 2, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]]],
dtype="int32"))])
assert_array_equal(K.eval(result), np.array([[1, 0, 0, 0]]))
# test batch case
result = option_attention_sum.compute_mask(["_", "_",
K.variable(np.array([[[1, 2, 0], [2, 3, 3],
[0, 0, 0], [0, 0, 0]],
[[1, 1, 0], [3, 3, 3],
[0, 0, 3], [0, 0, 0]]],
dtype="int32"))])
assert_array_equal(K.eval(result), np.array([[1, 1, 0, 0], [1, 1, 1, 0]]))
| deep_qa-master | tests/layers/test_option_attention_sum.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Embedding, Input
from keras.models import Model
from deep_qa.layers.entailment_models import MultipleChoiceTupleEntailment
class TestTupleAlignment:
def test_tuple_alignment_does_not_crash(self):
question_length = 5
num_options = 4
tuple_size = 3
num_tuples = 7
embedding_dim = 10
vocabulary_size = 15
batch_size = 32
question_input_layer = Input(shape=(question_length,), dtype='int32')
answer_input_layer = Input(shape=(num_options,), dtype='int32')
knowledge_input_layer = Input(shape=(num_tuples, tuple_size), dtype='int32')
# Embedding does not mask zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim,
mask_zero=True)
embedded_question = embedding(question_input_layer)
embedded_answer = embedding(answer_input_layer)
embedded_knowledge = embedding(knowledge_input_layer)
entailment_layer = MultipleChoiceTupleEntailment()
entailment_scores = entailment_layer([embedded_knowledge, embedded_question, embedded_answer])
model = Model(inputs=[knowledge_input_layer, question_input_layer, answer_input_layer],
outputs=entailment_scores)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
knowledge_input = numpy.random.randint(0, vocabulary_size, (batch_size, num_tuples, tuple_size))
question_input = numpy.random.randint(0, vocabulary_size, (batch_size, question_length))
answer_input = numpy.random.randint(0, vocabulary_size, (batch_size, num_options))
model.predict([knowledge_input, question_input, answer_input])
| deep_qa-master | tests/layers/tuple_alignment_test.py |
# pylint: disable=no-self-use
import numpy
from numpy.testing import assert_almost_equal
from keras.layers import Input
from keras.models import Model
from deep_qa.layers import BiGRUIndexSelector
class TestBiGRUIndexSelector():
def test_batched_case(self):
document_length = 5
gru_hidden_dim = 2
target = 8
word_indices_input = Input(shape=(document_length,),
dtype='int32',
name="word_indices_input")
gru_f_input = Input(shape=(document_length, gru_hidden_dim),
dtype='float32',
name="gru_f_input")
gru_b_input = Input(shape=(document_length, gru_hidden_dim),
dtype='float32',
name="gru_b_input")
index_bigru_output = BiGRUIndexSelector(target)([word_indices_input,
gru_f_input,
gru_b_input])
model = Model([word_indices_input,
gru_f_input,
gru_b_input],
index_bigru_output)
document_indices = numpy.array([[1, 3, 4, 8, 2], [2, 8, 1, 2, 3]])
gru_f_input = numpy.array([[[0.1, 0.5], [0.3, 0.4], [0.4, 0.1], [0.9, 0.2], [0.1, 0.3]],
[[0.4, 0.6], [0.7, 0.1], [0.3, 0.1], [0.9, 0.5], [0.4, 0.7]]])
gru_b_input = numpy.array([[[0.7, 0.2], [0.9, 0.1], [0.3, 0.8], [0.2, 0.6], [0.7, 0.2]],
[[0.2, 0.1], [0.3, 0.6], [0.2, 0.8], [0.3, 0.6], [0.4, 0.4]]])
expected_output = numpy.array([[0.9, 0.2, 0.2, 0.6], [0.7, 0.1, 0.3, 0.6]])
# Testing the general single-batch case.
result = model.predict([document_indices, gru_f_input, gru_b_input])
assert_almost_equal(result, expected_output)
| deep_qa-master | tests/layers/bigru_index_selector_test.py |
deep_qa-master | tests/layers/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from keras.layers import Input, Embedding
from keras.models import Model
import numpy as np
from deep_qa.layers.encoders import PositionalEncoder
class TestPositionalEncoder:
def test_on_unmasked_input(self):
sentence_length = 3
embedding_dim = 3
vocabulary_size = 5
# Manual embedding vectors so we can compute exact values for test.
embedding_weights = np.asarray([[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]])
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, weights=[embedding_weights],
output_dim=embedding_dim, mask_zero=True)
encoder = PositionalEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd")
test_input = np.asarray([[1, 2, 3]])
actual_output = model.predict(test_input)[0]
expected_output = np.asarray([1.3333333, 1.6666666, 2])
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_on_masked_input(self):
sentence_length = 5
embedding_dim = 3
vocabulary_size = 5
# Manual embedding vectors so we can compute exact values for test.
embedding_weights = np.asarray([[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]])
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, weights=[embedding_weights],
output_dim=embedding_dim, mask_zero=True)
encoder = PositionalEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd")
test_input = np.asarray([[0, 1, 2, 3, 0]])
actual_output = model.predict(test_input)[0]
expected_output = np.asarray([1.3333333, 1.6666666, 2])
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_on_completely_masked_input(self):
sentence_length = 5
embedding_dim = 3
vocabulary_size = 5
embedding_weights = np.asarray([[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]])
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, weights=[embedding_weights],
output_dim=embedding_dim, mask_zero=True)
encoder = PositionalEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd")
test_input = np.asarray([[0, 0, 0, 0, 0]])
actual_output = model.predict(test_input)[0]
expected_output = np.asarray([0.0, 0.0, 0.0])
np.testing.assert_array_almost_equal(expected_output, actual_output)
| deep_qa-master | tests/layers/positional_encoder_test.py |
# pylint: disable=no-self-use
import numpy as np
from numpy.testing import assert_array_almost_equal
from deep_qa.layers import L1Normalize
from deep_qa.testing.test_case import DeepQaTestCase
from keras.layers import Input, Masking
from keras.models import Model
class TestL1Normalize(DeepQaTestCase):
def test_general_case(self):
input_length = 6
input_layer = Input(shape=(input_length,), dtype='float32', name="input")
l1_normalize_layer = L1Normalize()
normalized_input = l1_normalize_layer(input_layer)
model = Model([input_layer], normalized_input)
# Testing general unmasked 1D case.
unnormalized_vector = np.array([[.1, .2, .3, .4, 0.01, 0.03]])
result = model.predict([unnormalized_vector])
assert_array_almost_equal(result, np.array([[0.09615385, 0.1923077,
0.28846157, 0.38461539,
0.00961538, 0.02884615]]))
assert_array_almost_equal(np.sum(result, axis=1), np.ones(1))
# Testing general unmasked batched case.
unnormalized_matrix = np.array([[.1, .2, .3, .4, 0.01, 0.03],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
result = model.predict([unnormalized_matrix])
assert_array_almost_equal(result, np.array([[0.09615385, 0.1923077,
0.28846157, 0.38461539,
0.00961538, 0.02884615],
[1.0/21.0, 2.0/21.0, 3.0/21.0,
4.0/21.0, 5.0/21.0, 6.0/21.0]]))
assert_array_almost_equal(np.sum(result, axis=1), np.ones(2))
def test_squeeze_case_mask(self):
input_length = 4
mask_value = 3
input_layer = Input(shape=(input_length, 1), dtype='float32', name="input")
mask_layer = Masking(mask_value=mask_value)
masked_input = mask_layer(input_layer)
l1_normalize_layer = L1Normalize()
normalized_input = l1_normalize_layer(masked_input)
model = Model([input_layer], normalized_input)
unnormalized_vector = np.array([[[1.0], [2.0], [3.0], [4.0]]])
result = model.predict([unnormalized_vector])
assert_array_almost_equal(result, np.array([[0.14285715, 0.2857143,
0, 0.5714286]]))
assert_array_almost_equal(np.sum(result, axis=1), np.ones(1))
# Testing general masked batched case
unnormalized_matrix = np.array([[[1.0], [2.0], [3.0], [4.0]],
[[3.0], [2.0], [3.0], [4.0]]])
result = model.predict([unnormalized_matrix])
assert_array_almost_equal(result, np.array([[0.14285715, 0.2857143,
0, 0.5714286],
[0, 2.0/6.0, 0, 4.0/6.0]]))
assert_array_almost_equal(np.sum(result, axis=1), np.ones(2))
def test_squeeze_case(self):
input_length = 6
input_layer = Input(shape=(input_length, 1), dtype='float32', name="input")
l1_normalize_layer = L1Normalize()
normalized_input = l1_normalize_layer(input_layer)
model = Model([input_layer], normalized_input)
unnormalized_vector = np.array([[[.1], [.2], [.3], [.4], [0.01], [0.03]]])
result = model.predict([unnormalized_vector])
assert_array_almost_equal(result, np.array([[0.09615385, 0.1923077,
0.28846157, 0.38461539,
0.00961538, 0.02884615]]))
assert_array_almost_equal(np.sum(result, axis=1), np.ones(1))
# Testing general unmasked batched case.
unnormalized_matrix = np.array([[[.1], [.2], [.3], [.4], [0.01], [0.03]],
[[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]])
result = model.predict([unnormalized_matrix])
assert_array_almost_equal(result, np.array([[0.09615385, 0.1923077,
0.28846157, 0.38461539,
0.00961538, 0.02884615],
[1.0/21.0, 2.0/21.0, 3.0/21.0,
4.0/21.0, 5.0/21.0, 6.0/21.0]]))
assert_array_almost_equal(np.sum(result, axis=1), np.ones(2))
| deep_qa-master | tests/layers/test_l1_normalize.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from flaky import flaky
from keras.layers import Input
from keras.models import Model
from deep_qa.layers import ComplexConcat
class TestComplexConcatLayer:
def test_call_works_on_simple_input(self):
input_shape = (3, 4, 5, 7)
input_1 = Input(shape=input_shape[1:], dtype='float32')
input_2 = Input(shape=input_shape[1:], dtype='float32')
input_3 = Input(shape=input_shape[1:], dtype='float32')
input_4 = Input(shape=input_shape[1:], dtype='float32')
inputs = [input_1, input_2, input_3, input_4]
concatenated = ComplexConcat(combination='1,2,3,4')(inputs)
model = Model(inputs=inputs, outputs=[concatenated])
input_1_tensor = numpy.random.rand(*input_shape)
input_2_tensor = numpy.random.rand(*input_shape)
input_3_tensor = numpy.random.rand(*input_shape)
input_4_tensor = numpy.random.rand(*input_shape)
input_tensors = [input_1_tensor, input_2_tensor, input_3_tensor, input_4_tensor]
concat_tensor = model.predict(input_tensors)
assert concat_tensor.shape == (3, 4, 5, 7*4)
numpy.testing.assert_almost_equal(concat_tensor, numpy.concatenate(input_tensors, axis=-1))
@flaky
def test_call_handles_complex_combinations(self):
input_shape = (3, 4, 5, 7)
input_1 = Input(shape=input_shape[1:], dtype='float32')
input_2 = Input(shape=input_shape[1:], dtype='float32')
input_3 = Input(shape=input_shape[1:], dtype='float32')
input_4 = Input(shape=input_shape[1:], dtype='float32')
inputs = [input_1, input_2, input_3, input_4]
concatenated = ComplexConcat(combination='1-2,2*4,3/1,4+3,3', axis=1)(inputs)
model = Model(inputs=inputs, outputs=[concatenated])
input_1_tensor = numpy.random.rand(*input_shape)
input_2_tensor = numpy.random.rand(*input_shape)
input_3_tensor = numpy.random.rand(*input_shape)
input_4_tensor = numpy.random.rand(*input_shape)
input_tensors = [input_1_tensor, input_2_tensor, input_3_tensor, input_4_tensor]
concat_tensor = model.predict(input_tensors)
assert concat_tensor.shape == (3, 4*5, 5, 7)
expected_tensor = numpy.concatenate([
input_1_tensor - input_2_tensor,
input_2_tensor * input_4_tensor,
input_3_tensor / input_1_tensor,
input_4_tensor + input_3_tensor,
input_3_tensor
], axis=1)
numpy.testing.assert_almost_equal(concat_tensor, expected_tensor, decimal=3)
| deep_qa-master | tests/layers/complex_concat_test.py |
# pylint: disable=no-self-use
import numpy as np
from numpy.testing import assert_array_almost_equal
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend.add_mask import AddMask
from deep_qa.layers.subtract_minimum import SubtractMinimum
from deep_qa.testing.test_case import DeepQaTestCase
class TestSubtractMinimum(DeepQaTestCase):
def test_general_case(self):
input_layer = Input(shape=(4, 3,), dtype='float32', name="input")
subtract_minimum_layer = SubtractMinimum(axis=1)
normalized_input = subtract_minimum_layer(input_layer)
model = Model([input_layer], normalized_input)
# Testing general unmasked 1D case.
unnormalized_tensor = np.array([[[0.1, 0.1, 0.1],
[0.2, 0.3, 0.4],
[0.5, 0.4, 0.6],
[0.5, 0.4, 0.6]]])
result = model.predict([unnormalized_tensor])
assert_array_almost_equal(result, np.array([[[0.0, 0.0, 0.0],
[0.1, 0.2, 0.3],
[0.4, 0.3, 0.5],
[0.4, 0.3, 0.5]]]))
# Testing masked batched case.
# By setting the mast value to 0.1. should ignore this value when deciding the minimum
mask_layer = AddMask(mask_value=0.1)
masked_input = mask_layer(input_layer)
normalized_masked_input = subtract_minimum_layer(masked_input)
masking_model = Model([input_layer], normalized_masked_input)
masked_result = masking_model.predict([unnormalized_tensor])
assert_array_almost_equal(masked_result, np.array([[[-0.1, -0.2, -0.3],
[0.0, 0.0, 0.0],
[0.3, 0.1, 0.2],
[0.3, 0.1, 0.2]]]))
| deep_qa-master | tests/layers/test_subtract_minimum.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_array_equal
from keras.layers import Embedding, Input
from keras.models import Model
from deep_qa.layers import VectorMatrixMerge
from deep_qa.layers.wrappers import OutputMask
class TestVectorMatrixMerge:
def test_merge_works_correctly_on_word_indices(self):
vocab_size = 10
sentence_length = 10
word_length = 7
num_sentences = 7
for concat_axis in [2, -1]:
sentence_input = Input(shape=(sentence_length, word_length - 2), dtype='int32')
additional_input = Input(shape=(sentence_length,), dtype='int32')
additional_input2 = Input(shape=(sentence_length,), dtype='int32')
merge_layer = VectorMatrixMerge(concat_axis=concat_axis)
merged = merge_layer([additional_input, additional_input2, sentence_input])
model = Model(inputs=[sentence_input, additional_input, additional_input2], outputs=merged)
sentence_tensor = numpy.random.randint(0, vocab_size, (num_sentences, sentence_length, word_length))
merged_tensor = model.predict([sentence_tensor[:, :, 2:],
sentence_tensor[:, :, 0],
sentence_tensor[:, :, 1]])
assert_array_equal(sentence_tensor, merged_tensor)
def test_merge_adds_words_to_sentence_correctly(self):
# The thing to note here is that when we're adding words, we're adding rows to the mask as
# well. This test makes sure that this works correctly.
vocab_size = 10
sentence_length = 3
word_length = 3
embedding_dim = 10
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
extra_word_input = Input(shape=(word_length,), dtype='int32')
embedding = Embedding(input_dim=vocab_size, output_dim=embedding_dim, mask_zero=True)
embedded_sentence = embedding(sentence_input) # (batch_size, sentence_length, word_length, embedding_dim)
embedded_extra_word = embedding(extra_word_input) # (batch_size, word_length, embedding_dim)
merge_layer = VectorMatrixMerge(concat_axis=1)
merged_sentence = merge_layer([embedded_extra_word, embedded_sentence])
result_mask = OutputMask()(merged_sentence)
model = Model(inputs=[sentence_input, extra_word_input], outputs=[merged_sentence, result_mask])
sentence_tensor = numpy.asarray([[[1, 3, 0], [2, 8, 7], [0, 0, 0]]])
extra_word_tensor = numpy.asarray([[9, 0, 0]])
merged_tensor, result_mask_tensor = model.predict([sentence_tensor, extra_word_tensor])
expected_mask = numpy.asarray([[[1, 0, 0], [1, 1, 0], [1, 1, 1], [0, 0, 0]]])
assert merged_tensor.shape == (1, sentence_length + 1, word_length, embedding_dim)
assert_array_equal(result_mask_tensor, expected_mask)
def test_merge_adds_dims_to_word_embedding_correctly(self):
# The thing to note here is that when we're adding dimensions to an embedding, we're not
# changing the mask. That is, the concat axis is greater than the dimensionality of the
# mask. This test makes sure that this works correctly.
vocab_size = 10
sentence_length = 6
embedding_dim = 10
for concat_axis in [2, -1]:
sentence_input = Input(shape=(sentence_length,), dtype='int32')
extra_embedding_input = Input(shape=(sentence_length,), dtype='float32')
embedding = Embedding(input_dim=vocab_size, output_dim=embedding_dim, mask_zero=True)
embedded_sentence = embedding(sentence_input) # (batch_size, sentence_length, embedding_dim)
merge_layer = VectorMatrixMerge(concat_axis=concat_axis)
merged_sentence = merge_layer([extra_embedding_input, embedded_sentence])
result_mask = OutputMask()(merged_sentence)
model = Model(inputs=[sentence_input, extra_embedding_input], outputs=[merged_sentence, result_mask])
sentence_tensor = numpy.asarray([[1, 3, 6, 2, 0, 0]])
extra_word_tensor = numpy.asarray([[1, 2, 3, 4, 5, 6]])
merged_tensor, result_mask_tensor = model.predict([sentence_tensor, extra_word_tensor])
expected_mask = numpy.asarray([[1, 1, 1, 1, 0, 0]])
assert merged_tensor.shape == (1, sentence_length, embedding_dim + 1)
assert_array_equal(merged_tensor[0, :, 0], [1, 2, 3, 4, 5, 6])
assert_array_equal(result_mask_tensor, expected_mask)
| deep_qa-master | tests/layers/vector_matrix_merge_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input, Embedding
from keras.models import Model
from deep_qa.layers.entailment_models import DecomposableAttentionEntailment
class TestDecomposableAttention:
def test_decomposable_attention_does_not_crash(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
num_sentences = 7
premise_input_layer = Input(shape=(sentence_length,), dtype='int32')
hypothesis_input_layer = Input(shape=(sentence_length,), dtype='int32')
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
embedded_premise = embedding(premise_input_layer)
embedded_hypothesis = embedding(hypothesis_input_layer)
entailment_layer = DecomposableAttentionEntailment()
entailment_scores = entailment_layer([embedded_premise, embedded_hypothesis])
model = Model(inputs=[premise_input_layer, hypothesis_input_layer], outputs=entailment_scores)
premise_input = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length))
hypothesis_input = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length))
model.predict([premise_input, hypothesis_input])
| deep_qa-master | tests/layers/decomposable_attention_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input, Embedding
from keras.models import Model
from deep_qa.layers import VectorMatrixSplit
from deep_qa.layers.wrappers import OutputMask
class TestVectorMatrixSplit:
def test_split_works_correctly_on_word_indices(self):
vocabulary_size = 10
sentence_length = 10
word_length = 5
num_sentences = 7
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
split_layer = VectorMatrixSplit(split_axis=2)
words, characters = split_layer(sentence_input)
model = Model(inputs=[sentence_input], outputs=[words, characters])
sentence_tensor = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length, word_length))
word_tensor, character_tensor = model.predict([sentence_tensor])
assert numpy.array_equal(word_tensor, sentence_tensor[:, :, 0])
assert numpy.array_equal(character_tensor, sentence_tensor[:, :, 1:])
def test_split_works_correctly_with_negative_axis(self):
vocabulary_size = 10
sentence_length = 10
word_length = 5
num_sentences = 7
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
split_layer = VectorMatrixSplit(split_axis=-1)
words, characters = split_layer(sentence_input)
model = Model(inputs=[sentence_input], outputs=[words, characters])
sentence_tensor = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length, word_length))
word_tensor, character_tensor = model.predict([sentence_tensor])
assert numpy.array_equal(word_tensor, sentence_tensor[:, :, 0])
assert numpy.array_equal(character_tensor, sentence_tensor[:, :, 1:])
def test_split_works_correctly_on_word_embeddings_with_masking(self):
vocabulary_size = 10
sentence_length = 10
word_length = 5
embedding_dim = 10
num_sentences = 7
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
embedded_sentence = embedding(sentence_input) # (batch_size, sentence_length, word_length, embedding_dim)
sentence_mask = OutputMask()(embedded_sentence)
# Note that this mask_split_axis doesn't make practical sense; I'm just testing the code
# with a different axis for the mask and the input.
split_layer = VectorMatrixSplit(split_axis=2, mask_split_axis=1)
words, characters = split_layer(embedded_sentence)
word_mask = OutputMask()(words)
character_mask = OutputMask()(characters)
outputs = [embedded_sentence, words, characters, sentence_mask, word_mask, character_mask]
model = Model(inputs=[sentence_input], outputs=outputs)
sentence_tensor = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length, word_length))
actual_outputs = model.predict([sentence_tensor])
sentence_tensor, word_tensor, character_tensor, sentence_mask, word_mask, character_mask = actual_outputs
assert numpy.array_equal(word_tensor, sentence_tensor[:, :, 0, :])
assert numpy.array_equal(character_tensor, sentence_tensor[:, :, 1:, :])
assert numpy.array_equal(word_mask, sentence_mask[:, 0, :])
assert numpy.array_equal(character_mask, sentence_mask[:, 1:, :])
| deep_qa-master | tests/layers/vector_matrix_split_test.py |
# pylint: disable=no-self-use, invalid-name
import numpy as np
from numpy.testing import assert_array_almost_equal
from deep_qa.layers import NoisyOr, BetweenZeroAndOne
from deep_qa.testing.test_case import DeepQaTestCase
from keras import backend as K
from keras.layers import Input
from keras.models import Model
class TestNoisyOr(DeepQaTestCase):
def test_general_case(self):
input_layer = Input(shape=(3, 2,), dtype='float32', name="input")
axis = 2
noisy_or_layer = NoisyOr(axis=axis)
output = noisy_or_layer(input_layer)
model = Model([input_layer], output)
# Testing general unmasked batched case.
q = K.eval(noisy_or_layer.noise_parameter)
batch_original_data = np.array([[[0.2, 0.1],
[0.5, 0.3],
[0.3, 0.7]],
[[0.4, 0.55],
[0.65, 0.8],
[0.9, 0.15]]])
batch_result = model.predict([batch_original_data])
batch_desired_result = 1.0 - np.prod(1.0 - (q * batch_original_data), axis=axis)
assert_array_almost_equal(batch_result, batch_desired_result)
# Testing the masked case.
# Here's a modified version of the batch_original_data, with extra probabilities.
batch_data_with_masks = K.variable(np.array([[[0.2, 0.1, 0.7], [0.5, 0.3, 0.3], [0.3, 0.7, 0.2]],
[[0.4, 0.55, 0.3], [0.65, 0.8, 0.1], [0.9, 0.15, 0.0]]]),
dtype="float32")
# Now here the added 3rd element is masked out, so the noisy_or probabilities resulting from the
# masked version should be the same as the unmasked one (above).
masks = K.variable(np.array([[[1, 1, 0], [1, 1, 0], [1, 1, 0]],
[[1, 1, 0], [1, 1, 0], [1, 1, 0]]]), dtype="float32")
masking_results = K.eval(noisy_or_layer.call(inputs=batch_data_with_masks, mask=masks))
assert_array_almost_equal(batch_result, masking_results)
def test_between_zero_and_one_constraint(self):
p = K.variable(np.asarray([0.35, -0.4, 1.0, 1.2]), dtype='float32')
desired_result = np.asarray([0.35, K.epsilon(), 1.0, 1.0])
result = K.eval(BetweenZeroAndOne()(p))
assert_array_almost_equal(result, desired_result)
| deep_qa-master | tests/layers/noisy_or_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.layers.encoders import BOWEncoder
from deep_qa.layers.wrappers import AddEncoderMask, OutputMask
from deep_qa.testing.test_case import DeepQaTestCase
from deep_qa.training.models import DeepQaModel
from keras.layers import Embedding, Input
class TestAddEncoderMask(DeepQaTestCase):
def test_mask_is_computed_correctly(self):
background_input = Input(shape=(None, 3), dtype='int32')
embedding = Embedding(input_dim=3, output_dim=2, mask_zero=True)
embedded_background = embedding(background_input)
encoded_background = BOWEncoder(units=2)(embedded_background)
encoded_background_with_mask = AddEncoderMask()([encoded_background, embedded_background])
mask_output = OutputMask()(encoded_background_with_mask)
model = DeepQaModel(inputs=[background_input], outputs=mask_output)
test_background = numpy.asarray([
[
[0, 0, 0],
[2, 2, 2],
[0, 0, 0],
[0, 1, 2],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[1, 1, 1],
]
])
expected_mask = numpy.asarray([[0, 1, 0, 1, 1, 0, 1, 1]])
actual_mask = model.predict([test_background])
numpy.testing.assert_array_equal(expected_mask, actual_mask)
| deep_qa-master | tests/layers/wrappers/add_encoder_mask_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.